mirror of
https://github.com/dolthub/dolt.git
synced 2026-02-14 01:07:08 -06:00
Merge branch 'main' into james/diff-index2
This commit is contained in:
@@ -117,7 +117,7 @@ programming language, start a SQL server on the command line:
|
||||
|
||||
```sh
|
||||
% dolt sql-server
|
||||
Starting server with Config HP="localhost:3306"|U="root"|P=""|T="28800000"|R="false"|L="info"
|
||||
Starting server with Config HP="localhost:3306"|T="28800000"|R="false"|L="info"
|
||||
```
|
||||
|
||||
Then connect to the database with any standard MySQL connector and
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/analyzer"
|
||||
"github.com/dolthub/go-mysql-server/sql/information_schema"
|
||||
"github.com/dolthub/go-mysql-server/sql/mysql_db"
|
||||
"github.com/dolthub/vitess/go/vt/sqlparser"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/cli"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/mysql_file_handler"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/config"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/tracing"
|
||||
)
|
||||
@@ -53,7 +55,10 @@ func NewSqlEngine(
|
||||
format PrintResultFormat,
|
||||
initialDb string,
|
||||
isReadOnly bool,
|
||||
tempUsers []gms.TemporaryUser,
|
||||
mysqlDbFilePath string,
|
||||
privFilePath string,
|
||||
serverUser string,
|
||||
serverPass string,
|
||||
autocommit bool) (*SqlEngine, error) {
|
||||
|
||||
parallelism := runtime.GOMAXPROCS(0)
|
||||
@@ -75,7 +80,54 @@ func NewSqlEngine(
|
||||
b := env.GetDefaultInitBranch(mrEnv.Config())
|
||||
pro := dsqle.NewDoltDatabaseProvider(b, mrEnv.FileSystem(), all...)
|
||||
|
||||
// Set mysql.db file path from server
|
||||
mysql_file_handler.SetMySQLDbFilePath(mysqlDbFilePath)
|
||||
|
||||
// Load in MySQL Db from file, if it exists
|
||||
data, err := mysql_file_handler.LoadData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use privilege file iff mysql.db file DNE
|
||||
var users []*mysql_db.User
|
||||
var roles []*mysql_db.RoleEdge
|
||||
var tempUsers []gms.TemporaryUser
|
||||
if len(data) == 0 {
|
||||
// Set privilege file path from server
|
||||
if privFilePath != "" {
|
||||
mysql_file_handler.SetPrivilegeFilePath(privFilePath)
|
||||
}
|
||||
|
||||
// Load privileges from privilege file
|
||||
users, roles, err = mysql_file_handler.LoadPrivileges()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary users if no privileges in config
|
||||
if len(users) == 0 && len(serverUser) > 0 {
|
||||
tempUsers = append(tempUsers, gms.TemporaryUser{
|
||||
Username: serverUser,
|
||||
Password: serverPass,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Set up engine
|
||||
engine := gms.New(analyzer.NewBuilder(pro).WithParallelism(parallelism).Build(), &gms.Config{IsReadOnly: isReadOnly, TemporaryUsers: tempUsers}).WithBackgroundThreads(bThreads)
|
||||
// Load MySQL Db information
|
||||
if err = engine.Analyzer.Catalog.MySQLDb.LoadData(sql.NewEmptyContext(), data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Load Privilege data iff mysql db didn't exist
|
||||
if len(data) == 0 {
|
||||
if err = engine.Analyzer.Catalog.MySQLDb.LoadPrivilegeData(sql.NewEmptyContext(), users, roles); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Set persist callbacks
|
||||
engine.Analyzer.Catalog.MySQLDb.SetPersistCallback(mysql_file_handler.SaveData)
|
||||
|
||||
if dbg, ok := os.LookupEnv("DOLT_SQL_DEBUG_LOG"); ok && strings.ToLower(dbg) == "true" {
|
||||
engine.Analyzer.Debug = true
|
||||
|
||||
@@ -377,7 +377,18 @@ func execShell(
|
||||
format engine.PrintResultFormat,
|
||||
initialDb string,
|
||||
) errhand.VerboseError {
|
||||
se, err := engine.NewSqlEngine(ctx, mrEnv, format, initialDb, false, nil, true)
|
||||
se, err := engine.NewSqlEngine(
|
||||
ctx,
|
||||
mrEnv,
|
||||
format,
|
||||
initialDb,
|
||||
false,
|
||||
"",
|
||||
"",
|
||||
"root",
|
||||
"",
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
@@ -398,7 +409,18 @@ func execBatch(
|
||||
format engine.PrintResultFormat,
|
||||
initialDb string,
|
||||
) errhand.VerboseError {
|
||||
se, err := engine.NewSqlEngine(ctx, mrEnv, format, initialDb, false, nil, false)
|
||||
se, err := engine.NewSqlEngine(
|
||||
ctx,
|
||||
mrEnv,
|
||||
format,
|
||||
initialDb,
|
||||
false,
|
||||
"",
|
||||
"",
|
||||
"root",
|
||||
"",
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
@@ -409,6 +431,9 @@ func execBatch(
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
|
||||
// Add root client
|
||||
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
|
||||
|
||||
// In batch mode, we need to set a couple flags on the session to prevent constant flushes to disk
|
||||
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
|
||||
err = runBatchMode(sqlCtx, se, batchInput, continueOnErr)
|
||||
@@ -433,7 +458,18 @@ func execMultiStatements(
|
||||
format engine.PrintResultFormat,
|
||||
initialDb string,
|
||||
) errhand.VerboseError {
|
||||
se, err := engine.NewSqlEngine(ctx, mrEnv, format, initialDb, false, nil, true)
|
||||
se, err := engine.NewSqlEngine(
|
||||
ctx,
|
||||
mrEnv,
|
||||
format,
|
||||
initialDb,
|
||||
false,
|
||||
"",
|
||||
"",
|
||||
"root",
|
||||
"",
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
@@ -444,6 +480,9 @@ func execMultiStatements(
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
|
||||
// Add root client
|
||||
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
|
||||
|
||||
err = runMultiStatementMode(sqlCtx, se, batchInput, continueOnErr)
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
@@ -455,7 +494,18 @@ func execQuery(
|
||||
format engine.PrintResultFormat,
|
||||
initialDb string,
|
||||
) errhand.VerboseError {
|
||||
se, err := engine.NewSqlEngine(ctx, mrEnv, format, initialDb, false, nil, true)
|
||||
se, err := engine.NewSqlEngine(
|
||||
ctx,
|
||||
mrEnv,
|
||||
format,
|
||||
initialDb,
|
||||
false,
|
||||
"",
|
||||
"",
|
||||
"root",
|
||||
"",
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
@@ -466,6 +516,9 @@ func execQuery(
|
||||
return errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
|
||||
// Add root client
|
||||
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
|
||||
|
||||
sqlSch, rowIter, err := processQuery(sqlCtx, query, se)
|
||||
if err != nil {
|
||||
return formatQueryError("", err)
|
||||
@@ -732,6 +785,9 @@ func runShell(ctx context.Context, se *engine.SqlEngine, mrEnv *env.MultiRepoEnv
|
||||
currentDB := sqlCtx.Session.GetCurrentDatabase()
|
||||
currEnv := mrEnv.GetEnv(currentDB)
|
||||
|
||||
// Add root client
|
||||
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
|
||||
|
||||
historyFile := filepath.Join(".sqlhistory") // history file written to working dir
|
||||
initialPrompt := fmt.Sprintf("%s> ", sqlCtx.GetCurrentDatabase())
|
||||
initialMultilinePrompt := fmt.Sprintf(fmt.Sprintf("%%%ds", len(initialPrompt)), "-> ")
|
||||
|
||||
@@ -22,10 +22,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
gms "github.com/dolthub/go-mysql-server"
|
||||
"github.com/dolthub/go-mysql-server/server"
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/mysql_db"
|
||||
"github.com/dolthub/vitess/go/mysql"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -34,7 +32,6 @@ import (
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/mysql_file_handler"
|
||||
)
|
||||
|
||||
// Serve starts a MySQL-compatible server. Returns any errors that were encountered.
|
||||
@@ -166,65 +163,24 @@ func Serve(
|
||||
serverConf.TLSConfig = tlsConfig
|
||||
serverConf.RequireSecureTransport = serverConfig.RequireSecureTransport()
|
||||
|
||||
// Set mysql.db file path from server
|
||||
if serverConfig.MySQLDbFilePath() != "" {
|
||||
mysql_file_handler.SetMySQLDbFilePath(serverConfig.MySQLDbFilePath())
|
||||
}
|
||||
|
||||
// Load in MySQL Db from file, if it exists
|
||||
data, err := mysql_file_handler.LoadData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use privilege file iff mysql.db file DNE
|
||||
var users []*mysql_db.User
|
||||
var roles []*mysql_db.RoleEdge
|
||||
var tempUsers []gms.TemporaryUser
|
||||
if len(data) == 0 {
|
||||
// Set privilege file path from server
|
||||
if serverConfig.PrivilegeFilePath() != "" {
|
||||
mysql_file_handler.SetPrivilegeFilePath(serverConfig.PrivilegeFilePath())
|
||||
}
|
||||
|
||||
// Load privileges from privilege file
|
||||
users, roles, err = mysql_file_handler.LoadPrivileges()
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
|
||||
// Create temporary users if no privileges in config
|
||||
if len(users) == 0 && len(serverConfig.User()) > 0 {
|
||||
tempUsers = append(tempUsers, gms.TemporaryUser{
|
||||
Username: serverConfig.User(),
|
||||
Password: serverConfig.Password(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Create SQL Engine with users
|
||||
sqlEngine, err := engine.NewSqlEngine(ctx, mrEnv, engine.FormatTabular, "", isReadOnly, tempUsers, serverConfig.AutoCommit())
|
||||
sqlEngine, err := engine.NewSqlEngine(
|
||||
ctx,
|
||||
mrEnv,
|
||||
engine.FormatTabular,
|
||||
"",
|
||||
isReadOnly,
|
||||
serverConfig.MySQLDbFilePath(),
|
||||
serverConfig.PrivilegeFilePath(),
|
||||
serverConfig.User(),
|
||||
serverConfig.Password(),
|
||||
serverConfig.AutoCommit(),
|
||||
)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
defer sqlEngine.Close()
|
||||
|
||||
// Load in MySQL DB information
|
||||
err = sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.LoadData(sql.NewEmptyContext(), data)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
|
||||
// Load in Privilege data iff mysql db didn't exist
|
||||
if len(data) == 0 {
|
||||
err = sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.LoadPrivilegeData(sql.NewEmptyContext(), users, roles)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Set persist callbacks
|
||||
sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.SetPersistCallback(mysql_file_handler.SaveData)
|
||||
labels := serverConfig.MetricsLabels()
|
||||
listener := newMetricsListener(labels)
|
||||
defer listener.Close()
|
||||
|
||||
@@ -386,8 +386,8 @@ func ConnectionString(config ServerConfig) string {
|
||||
|
||||
// ConfigInfo returns a summary of some of the config which contains some of the more important information
|
||||
func ConfigInfo(config ServerConfig) string {
|
||||
return fmt.Sprintf(`HP="%v:%v"|U="%v"|P="%v"|T="%v"|R="%v"|L="%v"`, config.Host(), config.Port(), config.User(),
|
||||
config.Password(), config.ReadTimeout(), config.ReadOnly(), config.LogLevel())
|
||||
return fmt.Sprintf(`HP="%v:%v"|T="%v"|R="%v"|L="%v"`, config.Host(), config.Port(),
|
||||
config.ReadTimeout(), config.ReadOnly(), config.LogLevel())
|
||||
}
|
||||
|
||||
// LoadTLSConfig loads the certificate chain from config.TLSKey() and config.TLSCert() and returns
|
||||
|
||||
111
go/gen/fb/serial/encoding.go
Normal file
111
go/gen/fb/serial/encoding.go
Normal file
@@ -0,0 +1,111 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by the FlatBuffers compiler. DO NOT EDIT.
|
||||
|
||||
package serial
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Encoding byte
|
||||
|
||||
const (
|
||||
EncodingNull Encoding = 0
|
||||
EncodingInt8 Encoding = 1
|
||||
EncodingUint8 Encoding = 2
|
||||
EncodingInt16 Encoding = 3
|
||||
EncodingUint16 Encoding = 4
|
||||
EncodingInt32 Encoding = 7
|
||||
EncodingUint32 Encoding = 8
|
||||
EncodingInt64 Encoding = 9
|
||||
EncodingUint64 Encoding = 10
|
||||
EncodingFloat32 Encoding = 11
|
||||
EncodingFloat64 Encoding = 12
|
||||
EncodingBit64 Encoding = 13
|
||||
EncodingHash128 Encoding = 14
|
||||
EncodingYear Encoding = 15
|
||||
EncodingDate Encoding = 16
|
||||
EncodingTime Encoding = 17
|
||||
EncodingDatetime Encoding = 18
|
||||
EncodingEnum Encoding = 19
|
||||
EncodingSet Encoding = 20
|
||||
EncodingString Encoding = 128
|
||||
EncodingBytes Encoding = 129
|
||||
EncodingDecimal Encoding = 130
|
||||
EncodingJSON Encoding = 131
|
||||
EncodingGeometry Encoding = 133
|
||||
)
|
||||
|
||||
var EnumNamesEncoding = map[Encoding]string{
|
||||
EncodingNull: "Null",
|
||||
EncodingInt8: "Int8",
|
||||
EncodingUint8: "Uint8",
|
||||
EncodingInt16: "Int16",
|
||||
EncodingUint16: "Uint16",
|
||||
EncodingInt32: "Int32",
|
||||
EncodingUint32: "Uint32",
|
||||
EncodingInt64: "Int64",
|
||||
EncodingUint64: "Uint64",
|
||||
EncodingFloat32: "Float32",
|
||||
EncodingFloat64: "Float64",
|
||||
EncodingBit64: "Bit64",
|
||||
EncodingHash128: "Hash128",
|
||||
EncodingYear: "Year",
|
||||
EncodingDate: "Date",
|
||||
EncodingTime: "Time",
|
||||
EncodingDatetime: "Datetime",
|
||||
EncodingEnum: "Enum",
|
||||
EncodingSet: "Set",
|
||||
EncodingString: "String",
|
||||
EncodingBytes: "Bytes",
|
||||
EncodingDecimal: "Decimal",
|
||||
EncodingJSON: "JSON",
|
||||
EncodingGeometry: "Geometry",
|
||||
}
|
||||
|
||||
var EnumValuesEncoding = map[string]Encoding{
|
||||
"Null": EncodingNull,
|
||||
"Int8": EncodingInt8,
|
||||
"Uint8": EncodingUint8,
|
||||
"Int16": EncodingInt16,
|
||||
"Uint16": EncodingUint16,
|
||||
"Int32": EncodingInt32,
|
||||
"Uint32": EncodingUint32,
|
||||
"Int64": EncodingInt64,
|
||||
"Uint64": EncodingUint64,
|
||||
"Float32": EncodingFloat32,
|
||||
"Float64": EncodingFloat64,
|
||||
"Bit64": EncodingBit64,
|
||||
"Hash128": EncodingHash128,
|
||||
"Year": EncodingYear,
|
||||
"Date": EncodingDate,
|
||||
"Time": EncodingTime,
|
||||
"Datetime": EncodingDatetime,
|
||||
"Enum": EncodingEnum,
|
||||
"Set": EncodingSet,
|
||||
"String": EncodingString,
|
||||
"Bytes": EncodingBytes,
|
||||
"Decimal": EncodingDecimal,
|
||||
"JSON": EncodingJSON,
|
||||
"Geometry": EncodingGeometry,
|
||||
}
|
||||
|
||||
func (v Encoding) String() string {
|
||||
if s, ok := EnumNamesEncoding[v]; ok {
|
||||
return s
|
||||
}
|
||||
return "Encoding(" + strconv.FormatInt(int64(v), 10) + ")"
|
||||
}
|
||||
@@ -17,440 +17,9 @@
|
||||
package serial
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
flatbuffers "github.com/google/flatbuffers/go"
|
||||
)
|
||||
|
||||
type ForeignKeyReferenceOption byte
|
||||
|
||||
const (
|
||||
ForeignKeyReferenceOptionDefaultAction ForeignKeyReferenceOption = 0
|
||||
ForeignKeyReferenceOptionCascade ForeignKeyReferenceOption = 1
|
||||
ForeignKeyReferenceOptionNoAction ForeignKeyReferenceOption = 2
|
||||
ForeignKeyReferenceOptionRestrict ForeignKeyReferenceOption = 3
|
||||
ForeignKeyReferenceOptionSetNull ForeignKeyReferenceOption = 4
|
||||
)
|
||||
|
||||
var EnumNamesForeignKeyReferenceOption = map[ForeignKeyReferenceOption]string{
|
||||
ForeignKeyReferenceOptionDefaultAction: "DefaultAction",
|
||||
ForeignKeyReferenceOptionCascade: "Cascade",
|
||||
ForeignKeyReferenceOptionNoAction: "NoAction",
|
||||
ForeignKeyReferenceOptionRestrict: "Restrict",
|
||||
ForeignKeyReferenceOptionSetNull: "SetNull",
|
||||
}
|
||||
|
||||
var EnumValuesForeignKeyReferenceOption = map[string]ForeignKeyReferenceOption{
|
||||
"DefaultAction": ForeignKeyReferenceOptionDefaultAction,
|
||||
"Cascade": ForeignKeyReferenceOptionCascade,
|
||||
"NoAction": ForeignKeyReferenceOptionNoAction,
|
||||
"Restrict": ForeignKeyReferenceOptionRestrict,
|
||||
"SetNull": ForeignKeyReferenceOptionSetNull,
|
||||
}
|
||||
|
||||
func (v ForeignKeyReferenceOption) String() string {
|
||||
if s, ok := EnumNamesForeignKeyReferenceOption[v]; ok {
|
||||
return s
|
||||
}
|
||||
return "ForeignKeyReferenceOption(" + strconv.FormatInt(int64(v), 10) + ")"
|
||||
}
|
||||
|
||||
type Column struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
|
||||
func GetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset:])
|
||||
x := &Column{}
|
||||
x.Init(buf, n+offset)
|
||||
return x
|
||||
}
|
||||
|
||||
func GetSizePrefixedRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
|
||||
x := &Column{}
|
||||
x.Init(buf, n+offset+flatbuffers.SizeUint32)
|
||||
return x
|
||||
}
|
||||
|
||||
func (rcv *Column) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
rcv._tab.Bytes = buf
|
||||
rcv._tab.Pos = i
|
||||
}
|
||||
|
||||
func (rcv *Column) Table() flatbuffers.Table {
|
||||
return rcv._tab
|
||||
}
|
||||
|
||||
func (rcv *Column) Name() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *Column) StorageOrder() uint16 {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetUint16(o + rcv._tab.Pos)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateStorageOrder(n uint16) bool {
|
||||
return rcv._tab.MutateUint16Slot(6, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) SchemaOrder() uint16 {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetUint16(o + rcv._tab.Pos)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateSchemaOrder(n uint16) bool {
|
||||
return rcv._tab.MutateUint16Slot(8, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) Type(obj *Type) *Type {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
if o != 0 {
|
||||
x := rcv._tab.Indirect(o + rcv._tab.Pos)
|
||||
if obj == nil {
|
||||
obj = new(Type)
|
||||
}
|
||||
obj.Init(rcv._tab.Bytes, x)
|
||||
return obj
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *Column) Nullable() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateNullable(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(12, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) PrimaryKey() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) MutatePrimaryKey(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(14, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) AutoIncrement() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateAutoIncrement(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(16, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) Default(obj *ColumnDefault) *ColumnDefault {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
|
||||
if o != 0 {
|
||||
x := rcv._tab.Indirect(o + rcv._tab.Pos)
|
||||
if obj == nil {
|
||||
obj = new(ColumnDefault)
|
||||
}
|
||||
obj.Init(rcv._tab.Bytes, x)
|
||||
return obj
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *Column) Constraints(obj *ColumnConstraint, j int) bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
|
||||
if o != 0 {
|
||||
x := rcv._tab.Vector(o)
|
||||
x += flatbuffers.UOffsetT(j) * 4
|
||||
x = rcv._tab.Indirect(x)
|
||||
obj.Init(rcv._tab.Bytes, x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) ConstraintsLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *Column) Comment() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ColumnStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(10)
|
||||
}
|
||||
func ColumnAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
|
||||
}
|
||||
func ColumnAddStorageOrder(builder *flatbuffers.Builder, storageOrder uint16) {
|
||||
builder.PrependUint16Slot(1, storageOrder, 0)
|
||||
}
|
||||
func ColumnAddSchemaOrder(builder *flatbuffers.Builder, schemaOrder uint16) {
|
||||
builder.PrependUint16Slot(2, schemaOrder, 0)
|
||||
}
|
||||
func ColumnAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(type_), 0)
|
||||
}
|
||||
func ColumnAddNullable(builder *flatbuffers.Builder, nullable bool) {
|
||||
builder.PrependBoolSlot(4, nullable, false)
|
||||
}
|
||||
func ColumnAddPrimaryKey(builder *flatbuffers.Builder, primaryKey bool) {
|
||||
builder.PrependBoolSlot(5, primaryKey, false)
|
||||
}
|
||||
func ColumnAddAutoIncrement(builder *flatbuffers.Builder, autoIncrement bool) {
|
||||
builder.PrependBoolSlot(6, autoIncrement, false)
|
||||
}
|
||||
func ColumnAddDefault(builder *flatbuffers.Builder, default_ flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(7, flatbuffers.UOffsetT(default_), 0)
|
||||
}
|
||||
func ColumnAddConstraints(builder *flatbuffers.Builder, constraints flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(constraints), 0)
|
||||
}
|
||||
func ColumnStartConstraintsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
}
|
||||
func ColumnAddComment(builder *flatbuffers.Builder, comment flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(9, flatbuffers.UOffsetT(comment), 0)
|
||||
}
|
||||
func ColumnEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
type Type struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
|
||||
func GetRootAsType(buf []byte, offset flatbuffers.UOffsetT) *Type {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset:])
|
||||
x := &Type{}
|
||||
x.Init(buf, n+offset)
|
||||
return x
|
||||
}
|
||||
|
||||
func GetSizePrefixedRootAsType(buf []byte, offset flatbuffers.UOffsetT) *Type {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
|
||||
x := &Type{}
|
||||
x.Init(buf, n+offset+flatbuffers.SizeUint32)
|
||||
return x
|
||||
}
|
||||
|
||||
func (rcv *Type) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
rcv._tab.Bytes = buf
|
||||
rcv._tab.Pos = i
|
||||
}
|
||||
|
||||
func (rcv *Type) Table() flatbuffers.Table {
|
||||
return rcv._tab
|
||||
}
|
||||
|
||||
func (rcv *Type) Type() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *Type) ParamKeys(j int) []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *Type) ParamKeysLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *Type) ParamValues(j int) []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *Type) ParamValuesLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func TypeStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(3)
|
||||
}
|
||||
func TypeAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(type_), 0)
|
||||
}
|
||||
func TypeAddParamKeys(builder *flatbuffers.Builder, paramKeys flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(paramKeys), 0)
|
||||
}
|
||||
func TypeStartParamKeysVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
}
|
||||
func TypeAddParamValues(builder *flatbuffers.Builder, paramValues flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(paramValues), 0)
|
||||
}
|
||||
func TypeStartParamValuesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
}
|
||||
func TypeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
type ColumnDefault struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
|
||||
func GetRootAsColumnDefault(buf []byte, offset flatbuffers.UOffsetT) *ColumnDefault {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset:])
|
||||
x := &ColumnDefault{}
|
||||
x.Init(buf, n+offset)
|
||||
return x
|
||||
}
|
||||
|
||||
func GetSizePrefixedRootAsColumnDefault(buf []byte, offset flatbuffers.UOffsetT) *ColumnDefault {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
|
||||
x := &ColumnDefault{}
|
||||
x.Init(buf, n+offset+flatbuffers.SizeUint32)
|
||||
return x
|
||||
}
|
||||
|
||||
func (rcv *ColumnDefault) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
rcv._tab.Bytes = buf
|
||||
rcv._tab.Pos = i
|
||||
}
|
||||
|
||||
func (rcv *ColumnDefault) Table() flatbuffers.Table {
|
||||
return rcv._tab
|
||||
}
|
||||
|
||||
func (rcv *ColumnDefault) Expression() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ColumnDefaultStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(1)
|
||||
}
|
||||
func ColumnDefaultAddExpression(builder *flatbuffers.Builder, expression flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(expression), 0)
|
||||
}
|
||||
func ColumnDefaultEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
type ColumnConstraint struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
|
||||
func GetRootAsColumnConstraint(buf []byte, offset flatbuffers.UOffsetT) *ColumnConstraint {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset:])
|
||||
x := &ColumnConstraint{}
|
||||
x.Init(buf, n+offset)
|
||||
return x
|
||||
}
|
||||
|
||||
func GetSizePrefixedRootAsColumnConstraint(buf []byte, offset flatbuffers.UOffsetT) *ColumnConstraint {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
|
||||
x := &ColumnConstraint{}
|
||||
x.Init(buf, n+offset+flatbuffers.SizeUint32)
|
||||
return x
|
||||
}
|
||||
|
||||
func (rcv *ColumnConstraint) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
rcv._tab.Bytes = buf
|
||||
rcv._tab.Pos = i
|
||||
}
|
||||
|
||||
func (rcv *ColumnConstraint) Table() flatbuffers.Table {
|
||||
return rcv._tab
|
||||
}
|
||||
|
||||
func (rcv *ColumnConstraint) Name() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ColumnConstraint) Expression() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ColumnConstraint) Enforced() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *ColumnConstraint) MutateEnforced(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(8, n)
|
||||
}
|
||||
|
||||
func ColumnConstraintStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(3)
|
||||
}
|
||||
func ColumnConstraintAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
|
||||
}
|
||||
func ColumnConstraintAddExpression(builder *flatbuffers.Builder, expression flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(expression), 0)
|
||||
}
|
||||
func ColumnConstraintAddEnforced(builder *flatbuffers.Builder, enforced bool) {
|
||||
builder.PrependBoolSlot(2, enforced, false)
|
||||
}
|
||||
func ColumnConstraintEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
type TableSchema struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
@@ -498,7 +67,7 @@ func (rcv *TableSchema) ColumnsLength() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *TableSchema) Indexes(obj *IndexSchema, j int) bool {
|
||||
func (rcv *TableSchema) Indexes(obj *Index, j int) bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
x := rcv._tab.Vector(o)
|
||||
@@ -518,8 +87,25 @@ func (rcv *TableSchema) IndexesLength() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *TableSchema) Checks(j int) []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *TableSchema) ChecksLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func TableSchemaStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(2)
|
||||
builder.StartObject(3)
|
||||
}
|
||||
func TableSchemaAddColumns(builder *flatbuffers.Builder, columns flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(columns), 0)
|
||||
@@ -533,38 +119,44 @@ func TableSchemaAddIndexes(builder *flatbuffers.Builder, indexes flatbuffers.UOf
|
||||
func TableSchemaStartIndexesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
}
|
||||
func TableSchemaAddChecks(builder *flatbuffers.Builder, checks flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(checks), 0)
|
||||
}
|
||||
func TableSchemaStartChecksVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
}
|
||||
func TableSchemaEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
type IndexSchema struct {
|
||||
type Column struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
|
||||
func GetRootAsIndexSchema(buf []byte, offset flatbuffers.UOffsetT) *IndexSchema {
|
||||
func GetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset:])
|
||||
x := &IndexSchema{}
|
||||
x := &Column{}
|
||||
x.Init(buf, n+offset)
|
||||
return x
|
||||
}
|
||||
|
||||
func GetSizePrefixedRootAsIndexSchema(buf []byte, offset flatbuffers.UOffsetT) *IndexSchema {
|
||||
func GetSizePrefixedRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
|
||||
x := &IndexSchema{}
|
||||
x := &Column{}
|
||||
x.Init(buf, n+offset+flatbuffers.SizeUint32)
|
||||
return x
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
func (rcv *Column) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
rcv._tab.Bytes = buf
|
||||
rcv._tab.Pos = i
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) Table() flatbuffers.Table {
|
||||
func (rcv *Column) Table() flatbuffers.Table {
|
||||
return rcv._tab
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) Name() []byte {
|
||||
func (rcv *Column) Name() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
@@ -572,241 +164,293 @@ func (rcv *IndexSchema) Name() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) Columns(j int) []byte {
|
||||
func (rcv *Column) Definition() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) ColumnsLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
func (rcv *Column) DisplayOrder() int16 {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
return rcv._tab.GetInt16(o + rcv._tab.Pos)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) Unique() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
func (rcv *Column) MutateDisplayOrder(n int16) bool {
|
||||
return rcv._tab.MutateInt16Slot(8, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) Encoding() Encoding {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
if o != 0 {
|
||||
return Encoding(rcv._tab.GetByte(o + rcv._tab.Pos))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateEncoding(n Encoding) bool {
|
||||
return rcv._tab.MutateByteSlot(10, byte(n))
|
||||
}
|
||||
|
||||
func (rcv *Column) PrimaryKey() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) MutateUnique(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(8, n)
|
||||
func (rcv *Column) MutatePrimaryKey(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(12, n)
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) SystemDefined() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
func (rcv *Column) Nullable() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) MutateSystemDefined(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(10, n)
|
||||
func (rcv *Column) MutateNullable(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(14, n)
|
||||
}
|
||||
|
||||
func (rcv *IndexSchema) Comment() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func IndexSchemaStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(5)
|
||||
}
|
||||
func IndexSchemaAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
|
||||
}
|
||||
func IndexSchemaAddColumns(builder *flatbuffers.Builder, columns flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(columns), 0)
|
||||
}
|
||||
func IndexSchemaStartColumnsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
}
|
||||
func IndexSchemaAddUnique(builder *flatbuffers.Builder, unique bool) {
|
||||
builder.PrependBoolSlot(2, unique, false)
|
||||
}
|
||||
func IndexSchemaAddSystemDefined(builder *flatbuffers.Builder, systemDefined bool) {
|
||||
builder.PrependBoolSlot(3, systemDefined, false)
|
||||
}
|
||||
func IndexSchemaAddComment(builder *flatbuffers.Builder, comment flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(comment), 0)
|
||||
}
|
||||
func IndexSchemaEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
type ForeignKey struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
|
||||
func GetRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset:])
|
||||
x := &ForeignKey{}
|
||||
x.Init(buf, n+offset)
|
||||
return x
|
||||
}
|
||||
|
||||
func GetSizePrefixedRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
|
||||
x := &ForeignKey{}
|
||||
x.Init(buf, n+offset+flatbuffers.SizeUint32)
|
||||
return x
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
rcv._tab.Bytes = buf
|
||||
rcv._tab.Pos = i
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) Table() flatbuffers.Table {
|
||||
return rcv._tab
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) Name() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ChildTable() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ChildColumns(j int) []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ChildColumnsLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ChildIndex() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ParentTable() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ParentColumns(j int) []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ParentColumnsLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) ParentIndex() []byte {
|
||||
func (rcv *Column) AutoIncrement() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateAutoIncrement(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(16, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) Hidden() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateHidden(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(18, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) Generated() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateGenerated(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(20, n)
|
||||
}
|
||||
|
||||
func (rcv *Column) Virtual() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Column) MutateVirtual(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(22, n)
|
||||
}
|
||||
|
||||
func ColumnStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(10)
|
||||
}
|
||||
func ColumnAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
|
||||
}
|
||||
func ColumnAddDefinition(builder *flatbuffers.Builder, definition flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(definition), 0)
|
||||
}
|
||||
func ColumnAddDisplayOrder(builder *flatbuffers.Builder, displayOrder int16) {
|
||||
builder.PrependInt16Slot(2, displayOrder, 0)
|
||||
}
|
||||
func ColumnAddEncoding(builder *flatbuffers.Builder, encoding Encoding) {
|
||||
builder.PrependByteSlot(3, byte(encoding), 0)
|
||||
}
|
||||
func ColumnAddPrimaryKey(builder *flatbuffers.Builder, primaryKey bool) {
|
||||
builder.PrependBoolSlot(4, primaryKey, false)
|
||||
}
|
||||
func ColumnAddNullable(builder *flatbuffers.Builder, nullable bool) {
|
||||
builder.PrependBoolSlot(5, nullable, false)
|
||||
}
|
||||
func ColumnAddAutoIncrement(builder *flatbuffers.Builder, autoIncrement bool) {
|
||||
builder.PrependBoolSlot(6, autoIncrement, false)
|
||||
}
|
||||
func ColumnAddHidden(builder *flatbuffers.Builder, hidden bool) {
|
||||
builder.PrependBoolSlot(7, hidden, false)
|
||||
}
|
||||
func ColumnAddGenerated(builder *flatbuffers.Builder, generated bool) {
|
||||
builder.PrependBoolSlot(8, generated, false)
|
||||
}
|
||||
func ColumnAddVirtual(builder *flatbuffers.Builder, virtual bool) {
|
||||
builder.PrependBoolSlot(9, virtual, false)
|
||||
}
|
||||
func ColumnEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
type Index struct {
|
||||
_tab flatbuffers.Table
|
||||
}
|
||||
|
||||
func GetRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset:])
|
||||
x := &Index{}
|
||||
x.Init(buf, n+offset)
|
||||
return x
|
||||
}
|
||||
|
||||
func GetSizePrefixedRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
|
||||
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
|
||||
x := &Index{}
|
||||
x.Init(buf, n+offset+flatbuffers.SizeUint32)
|
||||
return x
|
||||
}
|
||||
|
||||
func (rcv *Index) Init(buf []byte, i flatbuffers.UOffsetT) {
|
||||
rcv._tab.Bytes = buf
|
||||
rcv._tab.Pos = i
|
||||
}
|
||||
|
||||
func (rcv *Index) Table() flatbuffers.Table {
|
||||
return rcv._tab
|
||||
}
|
||||
|
||||
func (rcv *Index) Name() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
|
||||
if o != 0 {
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) OnUpdate() ForeignKeyReferenceOption {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(18))
|
||||
func (rcv *Index) Definition() []byte {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
|
||||
if o != 0 {
|
||||
return ForeignKeyReferenceOption(rcv._tab.GetByte(o + rcv._tab.Pos))
|
||||
return rcv._tab.ByteVector(o + rcv._tab.Pos)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rcv *Index) KeyColumns(j int) uint16 {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.GetUint16(a + flatbuffers.UOffsetT(j*2))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) MutateOnUpdate(n ForeignKeyReferenceOption) bool {
|
||||
return rcv._tab.MutateByteSlot(18, byte(n))
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) OnDelete() ForeignKeyReferenceOption {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
|
||||
func (rcv *Index) KeyColumnsLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
return ForeignKeyReferenceOption(rcv._tab.GetByte(o + rcv._tab.Pos))
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *ForeignKey) MutateOnDelete(n ForeignKeyReferenceOption) bool {
|
||||
return rcv._tab.MutateByteSlot(20, byte(n))
|
||||
func (rcv *Index) MutateKeyColumns(j int, n uint16) bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.MutateUint16(a+flatbuffers.UOffsetT(j*2), n)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ForeignKeyStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(9)
|
||||
func (rcv *Index) ValueColumns(j int) uint16 {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.GetUint16(a + flatbuffers.UOffsetT(j*2))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func ForeignKeyAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
|
||||
|
||||
func (rcv *Index) ValueColumnsLength() int {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
if o != 0 {
|
||||
return rcv._tab.VectorLen(o)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *Index) MutateValueColumns(j int, n uint16) bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
if o != 0 {
|
||||
a := rcv._tab.Vector(o)
|
||||
return rcv._tab.MutateUint16(a+flatbuffers.UOffsetT(j*2), n)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Index) Unique() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Index) MutateUnique(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(12, n)
|
||||
}
|
||||
|
||||
func (rcv *Index) SystemDefined() bool {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
|
||||
if o != 0 {
|
||||
return rcv._tab.GetBool(o + rcv._tab.Pos)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rcv *Index) MutateSystemDefined(n bool) bool {
|
||||
return rcv._tab.MutateBoolSlot(14, n)
|
||||
}
|
||||
|
||||
func IndexStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(6)
|
||||
}
|
||||
func IndexAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
|
||||
}
|
||||
func ForeignKeyAddChildTable(builder *flatbuffers.Builder, childTable flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(childTable), 0)
|
||||
func IndexAddDefinition(builder *flatbuffers.Builder, definition flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(definition), 0)
|
||||
}
|
||||
func ForeignKeyAddChildColumns(builder *flatbuffers.Builder, childColumns flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(childColumns), 0)
|
||||
func IndexAddKeyColumns(builder *flatbuffers.Builder, keyColumns flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(keyColumns), 0)
|
||||
}
|
||||
func ForeignKeyStartChildColumnsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
func IndexStartKeyColumnsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(2, numElems, 2)
|
||||
}
|
||||
func ForeignKeyAddChildIndex(builder *flatbuffers.Builder, childIndex flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(childIndex), 0)
|
||||
func IndexAddValueColumns(builder *flatbuffers.Builder, valueColumns flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(valueColumns), 0)
|
||||
}
|
||||
func ForeignKeyAddParentTable(builder *flatbuffers.Builder, parentTable flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(parentTable), 0)
|
||||
func IndexStartValueColumnsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(2, numElems, 2)
|
||||
}
|
||||
func ForeignKeyAddParentColumns(builder *flatbuffers.Builder, parentColumns flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(parentColumns), 0)
|
||||
func IndexAddUnique(builder *flatbuffers.Builder, unique bool) {
|
||||
builder.PrependBoolSlot(4, unique, false)
|
||||
}
|
||||
func ForeignKeyStartParentColumnsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(4, numElems, 4)
|
||||
func IndexAddSystemDefined(builder *flatbuffers.Builder, systemDefined bool) {
|
||||
builder.PrependBoolSlot(5, systemDefined, false)
|
||||
}
|
||||
func ForeignKeyAddParentIndex(builder *flatbuffers.Builder, parentIndex flatbuffers.UOffsetT) {
|
||||
builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(parentIndex), 0)
|
||||
}
|
||||
func ForeignKeyAddOnUpdate(builder *flatbuffers.Builder, onUpdate ForeignKeyReferenceOption) {
|
||||
builder.PrependByteSlot(7, byte(onUpdate), 0)
|
||||
}
|
||||
func ForeignKeyAddOnDelete(builder *flatbuffers.Builder, onDelete ForeignKeyReferenceOption) {
|
||||
builder.PrependByteSlot(8, byte(onDelete), 0)
|
||||
}
|
||||
func ForeignKeyEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
func IndexEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ require (
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
|
||||
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
|
||||
github.com/dolthub/vitess v0.0.0-20220525003637-9c94a4060dd1
|
||||
github.com/dolthub/vitess v0.0.0-20220601164959-a2100d98bd3b
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||
@@ -68,7 +68,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220531182937-257f07bd27e5
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220601232251-f87a296cb3a8
|
||||
github.com/google/flatbuffers v2.0.6+incompatible
|
||||
github.com/gosuri/uilive v0.0.4
|
||||
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
|
||||
|
||||
8
go/go.sum
Executable file → Normal file
8
go/go.sum
Executable file → Normal file
@@ -178,8 +178,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
|
||||
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220531182937-257f07bd27e5 h1:EuTulidBelA0x5c3OqwkC4yuNfnodxJGsGnjSPghPVQ=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220531182937-257f07bd27e5/go.mod h1:t8kUmFCl4oCVkMkRxgf7qROSn+5lQsFAUU5TZdoleI8=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220601232251-f87a296cb3a8 h1:jNQXzxQOfNByB0TO9ukbJZsMFBlfx2CG3p1+7RNlOVw=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220601232251-f87a296cb3a8/go.mod h1:VY2z/8rjWxzGzHFIRpOBFC7qBTj1PXQvNaXd5KNP+8A=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
|
||||
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
|
||||
@@ -188,8 +188,8 @@ github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66 h1:WRPDbpJWEnPxP
|
||||
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66/go.mod h1:N5ZIbMGuDUpTpOFQ7HcsN6WSIpTGQjHP+Mz27AfmAgk=
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
|
||||
github.com/dolthub/vitess v0.0.0-20220525003637-9c94a4060dd1 h1:lwzjI/92DnlmpgNqK+KV0oC31BQ/r6VE6RqDJAcb3GY=
|
||||
github.com/dolthub/vitess v0.0.0-20220525003637-9c94a4060dd1/go.mod h1:jxgvpEvrTNw2i4BKlwT75E775eUXBeMv5MPeQkIb9zI=
|
||||
github.com/dolthub/vitess v0.0.0-20220601164959-a2100d98bd3b h1:3IG5hRFsoJeKNgdnwE+n1iZQOIuwKDFgrvDOCiK9S3E=
|
||||
github.com/dolthub/vitess v0.0.0-20220601164959-a2100d98bd3b/go.mod h1:jxgvpEvrTNw2i4BKlwT75E775eUXBeMv5MPeQkIb9zI=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
|
||||
@@ -176,7 +176,9 @@ func awsConfigFromParams(params map[string]interface{}) (session.Options, error)
|
||||
}
|
||||
}
|
||||
|
||||
opts := session.Options{}
|
||||
opts := session.Options{
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
}
|
||||
|
||||
profile := ""
|
||||
if val, ok := params[AWSCredsProfile]; ok {
|
||||
|
||||
@@ -17,7 +17,6 @@ package doltdb
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
@@ -432,37 +431,6 @@ func (t *Table) RenameIndexRowData(ctx context.Context, oldIndexName, newIndexNa
|
||||
return t.SetIndexSet(ctx, indexes)
|
||||
}
|
||||
|
||||
// VerifyIndexRowData verifies that the index with the given name's data matches what the index expects.
|
||||
func (t *Table) VerifyIndexRowData(ctx context.Context, indexName string) error {
|
||||
sch, err := t.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index := sch.Indexes().GetByName(indexName)
|
||||
if index == nil {
|
||||
return fmt.Errorf("index `%s` does not exist", indexName)
|
||||
}
|
||||
|
||||
indexes, err := t.GetIndexSet(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idx, err := indexes.GetIndex(ctx, sch, indexName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
im := durable.NomsMapFromIndex(idx)
|
||||
iter, err := im.Iterator(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return index.VerifyMap(ctx, iter, im.Format())
|
||||
}
|
||||
|
||||
// GetAutoIncrementValue returns the current AUTO_INCREMENT value for this table.
|
||||
func (t *Table) GetAutoIncrementValue(ctx context.Context) (uint64, error) {
|
||||
return t.table.GetAutoIncrement(ctx)
|
||||
|
||||
@@ -51,7 +51,18 @@ func NewSqlEngineReader(ctx context.Context, dEnv *env.DoltEnv, tableName string
|
||||
return true, nil
|
||||
})
|
||||
|
||||
se, err := engine.NewSqlEngine(ctx, mrEnv, engine.FormatCsv, dbName, false, nil, false)
|
||||
se, err := engine.NewSqlEngine(
|
||||
ctx,
|
||||
mrEnv,
|
||||
engine.FormatCsv,
|
||||
dbName,
|
||||
false,
|
||||
"",
|
||||
"",
|
||||
"root",
|
||||
"",
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -61,6 +72,9 @@ func NewSqlEngineReader(ctx context.Context, dEnv *env.DoltEnv, tableName string
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add root client
|
||||
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
|
||||
|
||||
sch, iter, err := se.Query(sqlCtx, fmt.Sprintf("SELECT * FROM `%s`", tableName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -78,7 +78,18 @@ func NewSqlEngineTableWriter(ctx context.Context, dEnv *env.DoltEnv, createTable
|
||||
})
|
||||
|
||||
// Simplest path would have our import path be a layer over load data
|
||||
se, err := engine.NewSqlEngine(ctx, mrEnv, engine.FormatCsv, dbName, false, nil, false)
|
||||
se, err := engine.NewSqlEngine(
|
||||
ctx,
|
||||
mrEnv,
|
||||
engine.FormatCsv,
|
||||
dbName,
|
||||
false,
|
||||
"",
|
||||
"",
|
||||
"root",
|
||||
"",
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,6 +100,9 @@ func NewSqlEngineTableWriter(ctx context.Context, dEnv *env.DoltEnv, createTable
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add root client
|
||||
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
|
||||
|
||||
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
|
||||
|
||||
err = sqlCtx.Session.SetSessionVariable(sqlCtx, sql.AutoCommitSessionVar, false)
|
||||
|
||||
@@ -78,7 +78,7 @@ func pkRowFromNoms(sch schema.Schema, nomsKey, nomsVal types.Tuple) (Row, error)
|
||||
if col.IsPartOfPK {
|
||||
return false, errors.New("writing columns that are part of the primary key to non-pk values. col:" + col.Name)
|
||||
} else if !types.IsNull(val) {
|
||||
// Column is GeometryKind and received PointKind, LinestringKind, or PolygonKind
|
||||
// Column is GeometryKind and received PointKind, LineStringKind, or PolygonKind
|
||||
if col.Kind == types.GeometryKind && types.IsGeometryKind(val.Kind()) {
|
||||
filteredVals[tag] = val
|
||||
} else if col.Kind == val.Kind() {
|
||||
|
||||
@@ -16,7 +16,6 @@ package schema
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
@@ -55,8 +54,6 @@ type Index interface {
|
||||
// ToTableTuple returns a tuple that may be used to retrieve the original row from the indexed table when given
|
||||
// a full index key (and not a partial index key).
|
||||
ToTableTuple(ctx context.Context, fullKey types.Tuple, format *types.NomsBinFormat) (types.Tuple, error)
|
||||
// VerifyMap returns whether the given map iterator contains all valid keys and values for this index.
|
||||
VerifyMap(ctx context.Context, iter types.MapIterator, nbf *types.NomsBinFormat) error
|
||||
}
|
||||
|
||||
var _ Index = (*indexImpl)(nil)
|
||||
@@ -239,66 +236,6 @@ func (ix *indexImpl) ToTableTuple(ctx context.Context, fullKey types.Tuple, form
|
||||
return types.NewTuple(format, resVals...)
|
||||
}
|
||||
|
||||
// VerifyMap implements Index.
|
||||
func (ix *indexImpl) VerifyMap(ctx context.Context, iter types.MapIterator, nbf *types.NomsBinFormat) error {
|
||||
lastKey := types.EmptyTuple(nbf)
|
||||
var keyVal types.Value
|
||||
var valVal types.Value
|
||||
expectedVal := types.EmptyTuple(nbf)
|
||||
var err error
|
||||
cols := make([]Column, len(ix.allTags))
|
||||
for i, tag := range ix.allTags {
|
||||
var ok bool
|
||||
cols[i], ok = ix.indexColl.colColl.TagToCol[tag]
|
||||
if !ok {
|
||||
return fmt.Errorf("index `%s` has column with tag `%d` which cannot be found", ix.name, tag)
|
||||
}
|
||||
}
|
||||
|
||||
for keyVal, valVal, err = iter.Next(ctx); err == nil && keyVal != nil; keyVal, valVal, err = iter.Next(ctx) {
|
||||
key := keyVal.(types.Tuple)
|
||||
i := 0
|
||||
hasNull := false
|
||||
if key.Len() != uint64(2*len(cols)) {
|
||||
return fmt.Errorf("mismatched value count in key tuple compared to what index `%s` expects", ix.name)
|
||||
}
|
||||
err = key.WalkValues(ctx, func(v types.Value) error {
|
||||
colIndex := i / 2
|
||||
isTag := i%2 == 0
|
||||
if isTag {
|
||||
if !v.Equals(types.Uint(cols[colIndex].Tag)) {
|
||||
return fmt.Errorf("column order of map does not match what index `%s` expects", ix.name)
|
||||
}
|
||||
} else {
|
||||
if types.IsNull(v) {
|
||||
hasNull = true
|
||||
} else if v.Kind() != cols[colIndex].TypeInfo.NomsKind() {
|
||||
return fmt.Errorf("column value in map does not match what index `%s` expects", ix.name)
|
||||
}
|
||||
}
|
||||
i++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ix.isUnique && !hasNull {
|
||||
partialKeysEqual, err := key.PrefixEquals(ctx, lastKey, uint64(len(ix.tags)*2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if partialKeysEqual {
|
||||
return fmt.Errorf("UNIQUE constraint violation while verifying index: %s", ix.name)
|
||||
}
|
||||
}
|
||||
if !expectedVal.Equals(valVal) {
|
||||
return fmt.Errorf("index map value should be empty")
|
||||
}
|
||||
lastKey = key
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// copy returns an exact copy of the calling index.
|
||||
func (ix *indexImpl) copy() *indexImpl {
|
||||
newIx := *ix
|
||||
|
||||
@@ -17,9 +17,9 @@ package schema
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/dolthub/vitess/go/vt/proto/query"
|
||||
"gopkg.in/src-d/go-errors.v1"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
)
|
||||
|
||||
@@ -212,12 +212,9 @@ func ArePrimaryKeySetsDiffable(fromSch, toSch Schema) bool {
|
||||
|
||||
var ErrUsingSpatialKey = errors.NewKind("can't use Spatial Types as Primary Key for table %s")
|
||||
|
||||
// IsColSpatialType is a utility function that checks if a single column is using a spatial type by comparing typeinfos
|
||||
// IsColSpatialType returns whether a column's type is a spatial type
|
||||
func IsColSpatialType(c Column) bool {
|
||||
return c.TypeInfo.Equals(typeinfo.PointType) ||
|
||||
c.TypeInfo.Equals(typeinfo.LinestringType) ||
|
||||
c.TypeInfo.Equals(typeinfo.PolygonType) ||
|
||||
c.TypeInfo.Equals(typeinfo.GeometryType)
|
||||
return c.TypeInfo.ToSqlType().Type() == query.Type_GEOMETRY
|
||||
}
|
||||
|
||||
// IsUsingSpatialColAsKey is a utility function that checks for any spatial types being used as a primary key
|
||||
@@ -232,12 +229,16 @@ func IsUsingSpatialColAsKey(sch Schema) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Adapt adapts the |from| schema to the |to| schema, applying all the necessary metadata (foreign keys, constraints,
|
||||
// etc) present in |from| to |to| and returning the result.
|
||||
func Adapt(from, to Schema) (Schema, error) {
|
||||
// CopyChecks copies check constraints from the |from| schema to the |to| schema and returns it
|
||||
func CopyChecks(from, to Schema) Schema {
|
||||
fromSch, toSch := from.(*schemaImpl), to.(*schemaImpl)
|
||||
// TODO: this doesn't work in many cases, the indexes and checks themselves need to be adapted
|
||||
toSch.indexCollection = fromSch.indexCollection
|
||||
toSch.checkCollection = fromSch.checkCollection
|
||||
return toSch, nil
|
||||
return toSch
|
||||
}
|
||||
|
||||
// CopyIndexes copies secondary indexes from the |from| schema to the |to| schema and returns it
|
||||
func CopyIndexes(from, to Schema) Schema {
|
||||
fromSch, toSch := from.(*schemaImpl), to.(*schemaImpl)
|
||||
toSch.indexCollection = fromSch.indexCollection
|
||||
return toSch
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ package typeinfo
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
@@ -27,22 +28,21 @@ import (
|
||||
// within is directly reliant on the go-mysql-server implementation.
|
||||
type geometryType struct {
|
||||
sqlGeometryType sql.GeometryType // References the corresponding GeometryType in GMS
|
||||
innerType TypeInfo // References the actual typeinfo (pointType, linestringType, polygonType)
|
||||
}
|
||||
|
||||
var _ TypeInfo = (*geometryType)(nil)
|
||||
|
||||
var GeometryType = &geometryType{sql.GeometryType{}, nil}
|
||||
var GeometryType = &geometryType{sql.GeometryType{}}
|
||||
|
||||
// ConvertTypesGeometryToSQLGeometry basically makes a deep copy of sql.Geometry
|
||||
func ConvertTypesGeometryToSQLGeometry(g types.Geometry) sql.Geometry {
|
||||
func ConvertTypesGeometryToSQLGeometry(g types.Geometry) interface{} {
|
||||
switch inner := g.Inner.(type) {
|
||||
case types.Point:
|
||||
return sql.Geometry{Inner: ConvertTypesPointToSQLPoint(inner)}
|
||||
case types.Linestring:
|
||||
return sql.Geometry{Inner: ConvertTypesLinestringToSQLLinestring(inner)}
|
||||
return ConvertTypesPointToSQLPoint(inner)
|
||||
case types.LineString:
|
||||
return ConvertTypesLineStringToSQLLineString(inner)
|
||||
case types.Polygon:
|
||||
return sql.Geometry{Inner: ConvertTypesPolygonToSQLPolygon(inner)}
|
||||
return ConvertTypesPolygonToSQLPolygon(inner)
|
||||
default:
|
||||
panic("used an invalid type types.Geometry.Inner")
|
||||
}
|
||||
@@ -60,11 +60,11 @@ func (ti *geometryType) ConvertNomsValueToValue(v types.Value) (interface{}, err
|
||||
case types.Geometry:
|
||||
return ConvertTypesGeometryToSQLGeometry(val), nil
|
||||
case types.Point:
|
||||
return sql.Geometry{Inner: ConvertTypesPointToSQLPoint(val)}, nil
|
||||
case types.Linestring:
|
||||
return sql.Geometry{Inner: ConvertTypesLinestringToSQLLinestring(val)}, nil
|
||||
return ConvertTypesPointToSQLPoint(val), nil
|
||||
case types.LineString:
|
||||
return ConvertTypesLineStringToSQLLineString(val), nil
|
||||
case types.Polygon:
|
||||
return sql.Geometry{Inner: ConvertTypesPolygonToSQLPolygon(val)}, nil
|
||||
return ConvertTypesPolygonToSQLPolygon(val), nil
|
||||
default:
|
||||
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), v.Kind())
|
||||
}
|
||||
@@ -72,29 +72,46 @@ func (ti *geometryType) ConvertNomsValueToValue(v types.Value) (interface{}, err
|
||||
|
||||
// ReadFrom reads a go value from a noms types.CodecReader directly
|
||||
func (ti *geometryType) ReadFrom(nbf *types.NomsBinFormat, reader types.CodecReader) (interface{}, error) {
|
||||
var val types.Value
|
||||
var err error
|
||||
|
||||
k := reader.ReadKind()
|
||||
switch k {
|
||||
case types.GeometryKind:
|
||||
p, err := reader.ReadGeometry()
|
||||
if err != nil {
|
||||
case types.PointKind:
|
||||
if val, err = reader.ReadPoint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case types.LineStringKind:
|
||||
if val, err = reader.ReadLineString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case types.PolygonKind:
|
||||
if val, err = reader.ReadPolygon(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case types.GeometryKind:
|
||||
// Note: GeometryKind is no longer written
|
||||
// included here for backward compatibility
|
||||
if val, err = reader.ReadGeometry(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ti.ConvertNomsValueToValue(p)
|
||||
case types.NullKind:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), k)
|
||||
}
|
||||
|
||||
return ti.ConvertNomsValueToValue(val)
|
||||
}
|
||||
|
||||
func ConvertSQLGeometryToTypesGeometry(p sql.Geometry) types.Geometry {
|
||||
switch inner := p.Inner.(type) {
|
||||
func ConvertSQLGeometryToTypesGeometry(p interface{}) types.Value {
|
||||
switch inner := p.(type) {
|
||||
case sql.Point:
|
||||
return types.Geometry{Inner: ConvertSQLPointToTypesPoint(inner)}
|
||||
case sql.Linestring:
|
||||
return types.Geometry{Inner: ConvertSQLLinestringToTypesLinestring(inner)}
|
||||
return ConvertSQLPointToTypesPoint(inner)
|
||||
case sql.LineString:
|
||||
return ConvertSQLLineStringToTypesLineString(inner)
|
||||
case sql.Polygon:
|
||||
return types.Geometry{Inner: ConvertSQLPolygonToTypesPolygon(inner)}
|
||||
return ConvertSQLPolygonToTypesPolygon(inner)
|
||||
default:
|
||||
panic("used an invalid type sql.Geometry.Inner")
|
||||
}
|
||||
@@ -113,7 +130,7 @@ func (ti *geometryType) ConvertValueToNomsValue(ctx context.Context, vrw types.V
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ConvertSQLGeometryToTypesGeometry(geom.(sql.Geometry)), nil
|
||||
return ConvertSQLGeometryToTypesGeometry(geom), nil
|
||||
}
|
||||
|
||||
// Equals implements TypeInfo interface.
|
||||
@@ -121,8 +138,11 @@ func (ti *geometryType) Equals(other TypeInfo) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := other.(*geometryType)
|
||||
return ok
|
||||
if o, ok := other.(*geometryType); ok {
|
||||
// if either ti or other has defined SRID, then check SRID value; otherwise,
|
||||
return (!ti.sqlGeometryType.DefinedSRID && !o.sqlGeometryType.DefinedSRID) || ti.sqlGeometryType.SRID == o.sqlGeometryType.SRID
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FormatValue implements TypeInfo interface.
|
||||
@@ -136,16 +156,16 @@ func (ti *geometryType) FormatValue(v types.Value) (*string, error) {
|
||||
switch val := v.(type) {
|
||||
case types.Point:
|
||||
return PointType.FormatValue(val)
|
||||
case types.Linestring:
|
||||
return LinestringType.FormatValue(val)
|
||||
case types.LineString:
|
||||
return LineStringType.FormatValue(val)
|
||||
case types.Polygon:
|
||||
return PolygonType.FormatValue(val)
|
||||
case types.Geometry:
|
||||
switch inner := val.Inner.(type) {
|
||||
case types.Point:
|
||||
return PointType.FormatValue(inner)
|
||||
case types.Linestring:
|
||||
return LinestringType.FormatValue(inner)
|
||||
case types.LineString:
|
||||
return LineStringType.FormatValue(inner)
|
||||
case types.Polygon:
|
||||
return PolygonType.FormatValue(inner)
|
||||
default:
|
||||
@@ -163,7 +183,8 @@ func (ti *geometryType) GetTypeIdentifier() Identifier {
|
||||
|
||||
// GetTypeParams implements TypeInfo interface.
|
||||
func (ti *geometryType) GetTypeParams() map[string]string {
|
||||
return map[string]string{}
|
||||
return map[string]string{"SRID": strconv.FormatUint(uint64(ti.sqlGeometryType.SRID), 10),
|
||||
"DefinedSRID": strconv.FormatBool(ti.sqlGeometryType.DefinedSRID)}
|
||||
}
|
||||
|
||||
// IsValid implements TypeInfo interface.
|
||||
@@ -175,7 +196,7 @@ func (ti *geometryType) IsValid(v types.Value) bool {
|
||||
switch v.(type) {
|
||||
case types.Geometry,
|
||||
types.Point,
|
||||
types.Linestring,
|
||||
types.LineString,
|
||||
types.Polygon:
|
||||
return true
|
||||
default:
|
||||
@@ -190,7 +211,7 @@ func (ti *geometryType) NomsKind() types.NomsKind {
|
||||
|
||||
// Promote implements TypeInfo interface.
|
||||
func (ti *geometryType) Promote() TypeInfo {
|
||||
return &geometryType{ti.sqlGeometryType.Promote().(sql.GeometryType), ti.innerType.Promote()}
|
||||
return ti
|
||||
}
|
||||
|
||||
// String implements TypeInfo interface.
|
||||
@@ -223,7 +244,7 @@ func geometryTypeConverter(ctx context.Context, src *geometryType, destTi TypeIn
|
||||
case *floatType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *geometryType:
|
||||
return identityTypeConverter, false, nil
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *inlineBlobType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *intType:
|
||||
@@ -254,3 +275,25 @@ func geometryTypeConverter(ctx context.Context, src *geometryType, destTi TypeIn
|
||||
return nil, false, UnhandledTypeConversion.New(src.String(), destTi.String())
|
||||
}
|
||||
}
|
||||
|
||||
func CreateGeometryTypeFromParams(params map[string]string) (TypeInfo, error) {
|
||||
var (
|
||||
err error
|
||||
sridVal uint64
|
||||
def bool
|
||||
)
|
||||
if s, ok := params["SRID"]; ok {
|
||||
sridVal, err = strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if d, ok := params["DefinedSRID"]; ok {
|
||||
def, err = strconv.ParseBool(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &geometryType{sqlGeometryType: sql.GeometryType{SRID: uint32(sridVal), DefinedSRID: def}}, nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ package typeinfo
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
@@ -27,32 +28,33 @@ import (
|
||||
// This is a dolt implementation of the MySQL type Point, thus most of the functionality
|
||||
// within is directly reliant on the go-mysql-server implementation.
|
||||
type linestringType struct {
|
||||
sqlLinestringType sql.LinestringType
|
||||
sqlLineStringType sql.LineStringType
|
||||
}
|
||||
|
||||
var _ TypeInfo = (*linestringType)(nil)
|
||||
|
||||
var LinestringType = &linestringType{sql.LinestringType{}}
|
||||
var LineStringType = &linestringType{sql.LineStringType{}}
|
||||
|
||||
// ConvertTypesLinestringToSQLLinestring basically makes a deep copy of sql.Linestring
|
||||
func ConvertTypesLinestringToSQLLinestring(l types.Linestring) sql.Linestring {
|
||||
// ConvertTypesLineStringToSQLLineString basically makes a deep copy of sql.LineString
|
||||
func ConvertTypesLineStringToSQLLineString(l types.LineString) sql.LineString {
|
||||
points := make([]sql.Point, len(l.Points))
|
||||
for i, p := range l.Points {
|
||||
points[i] = ConvertTypesPointToSQLPoint(p)
|
||||
}
|
||||
return sql.Linestring{SRID: l.SRID, Points: points}
|
||||
return sql.LineString{SRID: l.SRID, Points: points}
|
||||
}
|
||||
|
||||
// ConvertNomsValueToValue implements TypeInfo interface.
|
||||
func (ti *linestringType) ConvertNomsValueToValue(v types.Value) (interface{}, error) {
|
||||
// Expect a types.Linestring, return a sql.Linestring
|
||||
if val, ok := v.(types.Linestring); ok {
|
||||
return ConvertTypesLinestringToSQLLinestring(val), nil
|
||||
}
|
||||
// Check for null
|
||||
if _, ok := v.(types.Null); ok || v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
// Expect a types.LineString, return a sql.LineString
|
||||
if val, ok := v.(types.LineString); ok {
|
||||
return ConvertTypesLineStringToSQLLineString(val), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), v.Kind())
|
||||
}
|
||||
|
||||
@@ -60,8 +62,8 @@ func (ti *linestringType) ConvertNomsValueToValue(v types.Value) (interface{}, e
|
||||
func (ti *linestringType) ReadFrom(nbf *types.NomsBinFormat, reader types.CodecReader) (interface{}, error) {
|
||||
k := reader.ReadKind()
|
||||
switch k {
|
||||
case types.LinestringKind:
|
||||
l, err := reader.ReadLinestring()
|
||||
case types.LineStringKind:
|
||||
l, err := reader.ReadLineString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -73,12 +75,12 @@ func (ti *linestringType) ReadFrom(nbf *types.NomsBinFormat, reader types.CodecR
|
||||
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), k)
|
||||
}
|
||||
|
||||
func ConvertSQLLinestringToTypesLinestring(l sql.Linestring) types.Linestring {
|
||||
func ConvertSQLLineStringToTypesLineString(l sql.LineString) types.LineString {
|
||||
points := make([]types.Point, len(l.Points))
|
||||
for i, p := range l.Points {
|
||||
points[i] = ConvertSQLPointToTypesPoint(p)
|
||||
}
|
||||
return types.Linestring{SRID: l.SRID, Points: points}
|
||||
return types.LineString{SRID: l.SRID, Points: points}
|
||||
}
|
||||
|
||||
// ConvertValueToNomsValue implements TypeInfo interface.
|
||||
@@ -88,13 +90,13 @@ func (ti *linestringType) ConvertValueToNomsValue(ctx context.Context, vrw types
|
||||
return types.NullValue, nil
|
||||
}
|
||||
|
||||
// Convert to sql.LinestringType
|
||||
line, err := ti.sqlLinestringType.Convert(v)
|
||||
// Convert to sql.LineStringType
|
||||
line, err := ti.sqlLineStringType.Convert(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ConvertSQLLinestringToTypesLinestring(line.(sql.Linestring)), nil
|
||||
return ConvertSQLLineStringToTypesLineString(line.(sql.LineString)), nil
|
||||
}
|
||||
|
||||
// Equals implements TypeInfo interface.
|
||||
@@ -102,13 +104,16 @@ func (ti *linestringType) Equals(other TypeInfo) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := other.(*linestringType)
|
||||
return ok
|
||||
if o, ok := other.(*linestringType); ok {
|
||||
// if either ti or other has defined SRID, then check SRID value; otherwise,
|
||||
return (!ti.sqlLineStringType.DefinedSRID && !o.sqlLineStringType.DefinedSRID) || ti.sqlLineStringType.SRID == o.sqlLineStringType.SRID
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FormatValue implements TypeInfo interface.
|
||||
func (ti *linestringType) FormatValue(v types.Value) (*string, error) {
|
||||
if val, ok := v.(types.Linestring); ok {
|
||||
if val, ok := v.(types.LineString); ok {
|
||||
buf := make([]byte, geometry.EWKBHeaderSize+types.LengthSize+geometry.PointSize*len(val.Points))
|
||||
types.WriteEWKBHeader(val, buf[:geometry.EWKBHeaderSize])
|
||||
types.WriteEWKBLineData(val, buf[geometry.EWKBHeaderSize:])
|
||||
@@ -124,17 +129,18 @@ func (ti *linestringType) FormatValue(v types.Value) (*string, error) {
|
||||
|
||||
// GetTypeIdentifier implements TypeInfo interface.
|
||||
func (ti *linestringType) GetTypeIdentifier() Identifier {
|
||||
return LinestringTypeIdentifier
|
||||
return LineStringTypeIdentifier
|
||||
}
|
||||
|
||||
// GetTypeParams implements TypeInfo interface.
|
||||
func (ti *linestringType) GetTypeParams() map[string]string {
|
||||
return map[string]string{}
|
||||
return map[string]string{"SRID": strconv.FormatUint(uint64(ti.sqlLineStringType.SRID), 10),
|
||||
"DefinedSRID": strconv.FormatBool(ti.sqlLineStringType.DefinedSRID)}
|
||||
}
|
||||
|
||||
// IsValid implements TypeInfo interface.
|
||||
func (ti *linestringType) IsValid(v types.Value) bool {
|
||||
if _, ok := v.(types.Linestring); ok {
|
||||
if _, ok := v.(types.LineString); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := v.(types.Null); ok || v == nil {
|
||||
@@ -145,22 +151,22 @@ func (ti *linestringType) IsValid(v types.Value) bool {
|
||||
|
||||
// NomsKind implements TypeInfo interface.
|
||||
func (ti *linestringType) NomsKind() types.NomsKind {
|
||||
return types.LinestringKind
|
||||
return types.LineStringKind
|
||||
}
|
||||
|
||||
// Promote implements TypeInfo interface.
|
||||
func (ti *linestringType) Promote() TypeInfo {
|
||||
return &linestringType{ti.sqlLinestringType.Promote().(sql.LinestringType)}
|
||||
return &linestringType{ti.sqlLineStringType.Promote().(sql.LineStringType)}
|
||||
}
|
||||
|
||||
// String implements TypeInfo interface.
|
||||
func (ti *linestringType) String() string {
|
||||
return "Linestring"
|
||||
return "LineString"
|
||||
}
|
||||
|
||||
// ToSqlType implements TypeInfo interface.
|
||||
func (ti *linestringType) ToSqlType() sql.Type {
|
||||
return ti.sqlLinestringType
|
||||
return ti.sqlLineStringType
|
||||
}
|
||||
|
||||
// linestringTypeConverter is an internal function for GetTypeConverter that handles the specific type as the source TypeInfo.
|
||||
@@ -191,7 +197,7 @@ func linestringTypeConverter(ctx context.Context, src *linestringType, destTi Ty
|
||||
case *jsonType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *linestringType:
|
||||
return identityTypeConverter, false, nil
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *pointType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *polygonType:
|
||||
@@ -214,3 +220,24 @@ func linestringTypeConverter(ctx context.Context, src *linestringType, destTi Ty
|
||||
return nil, false, UnhandledTypeConversion.New(src.String(), destTi.String())
|
||||
}
|
||||
}
|
||||
|
||||
func CreateLineStringTypeFromParams(params map[string]string) (TypeInfo, error) {
|
||||
var (
|
||||
err error
|
||||
sridVal uint64
|
||||
def bool
|
||||
)
|
||||
if s, ok := params["SRID"]; ok {
|
||||
sridVal, err = strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if d, ok := params["DefinedSRID"]; ok {
|
||||
def, err = strconv.ParseBool(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &linestringType{sqlLineStringType: sql.LineStringType{SRID: uint32(sridVal), DefinedSRID: def}}, nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ package typeinfo
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
@@ -45,7 +46,6 @@ func (ti *pointType) ConvertNomsValueToValue(v types.Value) (interface{}, error)
|
||||
if _, ok := v.(types.Null); ok || v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Expect a types.Point, return a sql.Point
|
||||
if val, ok := v.(types.Point); ok {
|
||||
return ConvertTypesPointToSQLPoint(val), nil
|
||||
@@ -96,8 +96,11 @@ func (ti *pointType) Equals(other TypeInfo) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := other.(*pointType)
|
||||
return ok
|
||||
if o, ok := other.(*pointType); ok {
|
||||
// if either ti or other has defined SRID, then check SRID value; otherwise,
|
||||
return (!ti.sqlPointType.DefinedSRID && !o.sqlPointType.DefinedSRID) || ti.sqlPointType.SRID == o.sqlPointType.SRID
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FormatValue implements TypeInfo interface.
|
||||
@@ -123,7 +126,8 @@ func (ti *pointType) GetTypeIdentifier() Identifier {
|
||||
|
||||
// GetTypeParams implements TypeInfo interface.
|
||||
func (ti *pointType) GetTypeParams() map[string]string {
|
||||
return map[string]string{}
|
||||
return map[string]string{"SRID": strconv.FormatUint(uint64(ti.sqlPointType.SRID), 10),
|
||||
"DefinedSRID": strconv.FormatBool(ti.sqlPointType.DefinedSRID)}
|
||||
}
|
||||
|
||||
// IsValid implements TypeInfo interface.
|
||||
@@ -187,7 +191,7 @@ func pointTypeConverter(ctx context.Context, src *pointType, destTi TypeInfo) (t
|
||||
case *linestringType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *pointType:
|
||||
return identityTypeConverter, false, nil
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *polygonType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *setType:
|
||||
@@ -208,3 +212,25 @@ func pointTypeConverter(ctx context.Context, src *pointType, destTi TypeInfo) (t
|
||||
return nil, false, UnhandledTypeConversion.New(src.String(), destTi.String())
|
||||
}
|
||||
}
|
||||
|
||||
func CreatePointTypeFromParams(params map[string]string) (TypeInfo, error) {
|
||||
var (
|
||||
err error
|
||||
sridVal uint64
|
||||
def bool
|
||||
)
|
||||
if s, ok := params["SRID"]; ok {
|
||||
sridVal, err = strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if d, ok := params["DefinedSRID"]; ok {
|
||||
def, err = strconv.ParseBool(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &pointType{sqlPointType: sql.PointType{SRID: uint32(sridVal), DefinedSRID: def}}, nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ package typeinfo
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
@@ -34,25 +35,26 @@ var _ TypeInfo = (*polygonType)(nil)
|
||||
|
||||
var PolygonType = &polygonType{sql.PolygonType{}}
|
||||
|
||||
// ConvertTypesPolygonToSQLPolygon basically makes a deep copy of sql.Linestring
|
||||
// ConvertTypesPolygonToSQLPolygon basically makes a deep copy of sql.LineString
|
||||
func ConvertTypesPolygonToSQLPolygon(p types.Polygon) sql.Polygon {
|
||||
lines := make([]sql.Linestring, len(p.Lines))
|
||||
lines := make([]sql.LineString, len(p.Lines))
|
||||
for i, l := range p.Lines {
|
||||
lines[i] = ConvertTypesLinestringToSQLLinestring(l)
|
||||
lines[i] = ConvertTypesLineStringToSQLLineString(l)
|
||||
}
|
||||
return sql.Polygon{SRID: p.SRID, Lines: lines}
|
||||
}
|
||||
|
||||
// ConvertNomsValueToValue implements TypeInfo interface.
|
||||
func (ti *polygonType) ConvertNomsValueToValue(v types.Value) (interface{}, error) {
|
||||
// Expect a types.Polygon, return a sql.Polygon
|
||||
if val, ok := v.(types.Polygon); ok {
|
||||
return ConvertTypesPolygonToSQLPolygon(val), nil
|
||||
}
|
||||
// Check for null
|
||||
if _, ok := v.(types.Null); ok || v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
// Expect a types.Polygon, return a sql.Polygon
|
||||
if val, ok := v.(types.Polygon); ok {
|
||||
return ConvertTypesPolygonToSQLPolygon(val), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), v.Kind())
|
||||
}
|
||||
|
||||
@@ -74,9 +76,9 @@ func (ti *polygonType) ReadFrom(nbf *types.NomsBinFormat, reader types.CodecRead
|
||||
}
|
||||
|
||||
func ConvertSQLPolygonToTypesPolygon(p sql.Polygon) types.Polygon {
|
||||
lines := make([]types.Linestring, len(p.Lines))
|
||||
lines := make([]types.LineString, len(p.Lines))
|
||||
for i, l := range p.Lines {
|
||||
lines[i] = ConvertSQLLinestringToTypesLinestring(l)
|
||||
lines[i] = ConvertSQLLineStringToTypesLineString(l)
|
||||
}
|
||||
return types.Polygon{SRID: p.SRID, Lines: lines}
|
||||
}
|
||||
@@ -102,8 +104,11 @@ func (ti *polygonType) Equals(other TypeInfo) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := other.(*polygonType)
|
||||
return ok
|
||||
if o, ok := other.(*polygonType); ok {
|
||||
// if either ti or other has defined SRID, then check SRID value; otherwise,
|
||||
return (!ti.sqlPolygonType.DefinedSRID && !o.sqlPolygonType.DefinedSRID) || ti.sqlPolygonType.SRID == o.sqlPolygonType.SRID
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FormatValue implements TypeInfo interface.
|
||||
@@ -133,7 +138,8 @@ func (ti *polygonType) GetTypeIdentifier() Identifier {
|
||||
|
||||
// GetTypeParams implements TypeInfo interface.
|
||||
func (ti *polygonType) GetTypeParams() map[string]string {
|
||||
return map[string]string{}
|
||||
return map[string]string{"SRID": strconv.FormatUint(uint64(ti.sqlPolygonType.SRID), 10),
|
||||
"DefinedSRID": strconv.FormatBool(ti.sqlPolygonType.DefinedSRID)}
|
||||
}
|
||||
|
||||
// IsValid implements TypeInfo interface.
|
||||
@@ -199,7 +205,7 @@ func polygonTypeConverter(ctx context.Context, src *polygonType, destTi TypeInfo
|
||||
case *pointType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *polygonType:
|
||||
return identityTypeConverter, false, nil
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *setType:
|
||||
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
|
||||
case *timeType:
|
||||
@@ -218,3 +224,25 @@ func polygonTypeConverter(ctx context.Context, src *polygonType, destTi TypeInfo
|
||||
return nil, false, UnhandledTypeConversion.New(src.String(), destTi.String())
|
||||
}
|
||||
}
|
||||
|
||||
func CreatePolygonTypeFromParams(params map[string]string) (TypeInfo, error) {
|
||||
var (
|
||||
err error
|
||||
sridVal uint64
|
||||
def bool
|
||||
)
|
||||
if s, ok := params["SRID"]; ok {
|
||||
sridVal, err = strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if d, ok := params["DefinedSRID"]; ok {
|
||||
def, err = strconv.ParseBool(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &polygonType{sqlPolygonType: sql.PolygonType{SRID: uint32(sridVal), DefinedSRID: def}}, nil
|
||||
}
|
||||
|
||||
@@ -141,8 +141,8 @@ func wrapConvertValueToNomsValue(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case types.Linestring:
|
||||
vInt = ConvertTypesLinestringToSQLLinestring(val)
|
||||
case types.LineString:
|
||||
vInt = ConvertTypesLineStringToSQLLineString(val)
|
||||
case types.Point:
|
||||
vInt = ConvertTypesPointToSQLPoint(val)
|
||||
case types.Polygon:
|
||||
|
||||
@@ -49,7 +49,7 @@ const (
|
||||
YearTypeIdentifier Identifier = "year"
|
||||
GeometryTypeIdentifier Identifier = "geometry"
|
||||
PointTypeIdentifier Identifier = "point"
|
||||
LinestringTypeIdentifier Identifier = "linestring"
|
||||
LineStringTypeIdentifier Identifier = "linestring"
|
||||
PolygonTypeIdentifier Identifier = "polygon"
|
||||
)
|
||||
|
||||
@@ -75,7 +75,7 @@ var Identifiers = map[Identifier]struct{}{
|
||||
YearTypeIdentifier: {},
|
||||
GeometryTypeIdentifier: {},
|
||||
PointTypeIdentifier: {},
|
||||
LinestringTypeIdentifier: {},
|
||||
LineStringTypeIdentifier: {},
|
||||
PolygonTypeIdentifier: {},
|
||||
}
|
||||
|
||||
@@ -168,13 +168,12 @@ func FromSqlType(sqlType sql.Type) (TypeInfo, error) {
|
||||
switch sqlType.String() {
|
||||
case sql.PointType{}.String():
|
||||
return &pointType{sqlType.(sql.PointType)}, nil
|
||||
case sql.LinestringType{}.String():
|
||||
return &linestringType{sqlType.(sql.LinestringType)}, nil
|
||||
case sql.LineStringType{}.String():
|
||||
return &linestringType{sqlType.(sql.LineStringType)}, nil
|
||||
case sql.PolygonType{}.String():
|
||||
return &polygonType{sqlType.(sql.PolygonType)}, nil
|
||||
case sql.GeometryType{}.String():
|
||||
// TODO: not sure how to determine inner type
|
||||
return &geometryType{sqlGeometryType: sqlType.(sql.GeometryType), innerType: &pointType{}}, nil
|
||||
return &geometryType{sqlGeometryType: sqlType.(sql.GeometryType)}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf(`expected "PointTypeIdentifier" from SQL basetype "Geometry"`)
|
||||
}
|
||||
@@ -273,13 +272,13 @@ func FromTypeParams(id Identifier, params map[string]string) (TypeInfo, error) {
|
||||
case JSONTypeIdentifier:
|
||||
return JSONType, nil
|
||||
case GeometryTypeIdentifier:
|
||||
return GeometryType, nil
|
||||
return CreateGeometryTypeFromParams(params)
|
||||
case PointTypeIdentifier:
|
||||
return PointType, nil
|
||||
case LinestringTypeIdentifier:
|
||||
return LinestringType, nil
|
||||
return CreatePointTypeFromParams(params)
|
||||
case LineStringTypeIdentifier:
|
||||
return CreateLineStringTypeFromParams(params)
|
||||
case PolygonTypeIdentifier:
|
||||
return PolygonType, nil
|
||||
return CreatePolygonTypeFromParams(params)
|
||||
case SetTypeIdentifier:
|
||||
return CreateSetTypeFromParams(params)
|
||||
case TimeTypeIdentifier:
|
||||
@@ -316,8 +315,8 @@ func FromKind(kind types.NomsKind) TypeInfo {
|
||||
return Int64Type
|
||||
case types.JSONKind:
|
||||
return JSONType
|
||||
case types.LinestringKind:
|
||||
return LinestringType
|
||||
case types.LineStringKind:
|
||||
return LineStringType
|
||||
case types.NullKind:
|
||||
return UnknownType
|
||||
case types.GeometryKind:
|
||||
|
||||
@@ -201,7 +201,7 @@ func testTypeInfoForeignKindHandling(t *testing.T, tiArrays [][]TypeInfo, vaArra
|
||||
for _, vaArray := range vaArrays {
|
||||
for _, val := range vaArray {
|
||||
t.Run(fmt.Sprintf(`types.%v(%v)`, val.Kind().String(), humanReadableString(val)), func(t *testing.T) {
|
||||
// Should be able to convert Point, Linestring, and Polygon to Geometry columns
|
||||
// Should be able to convert Point, LineString, and Polygon to Geometry columns
|
||||
if ti.NomsKind() == types.GeometryKind {
|
||||
if types.IsGeometryKind(val.Kind()) {
|
||||
_, err := ti.ConvertNomsValueToValue(val)
|
||||
@@ -235,7 +235,7 @@ func testTypeInfoGetTypeParams(t *testing.T, tiArrays [][]TypeInfo) {
|
||||
t.Run(tiArray[0].GetTypeIdentifier().String(), func(t *testing.T) {
|
||||
for _, ti := range tiArray {
|
||||
if ti.GetTypeIdentifier() == PointTypeIdentifier ||
|
||||
ti.GetTypeIdentifier() == LinestringTypeIdentifier ||
|
||||
ti.GetTypeIdentifier() == LineStringTypeIdentifier ||
|
||||
ti.GetTypeIdentifier() == PolygonTypeIdentifier ||
|
||||
ti.GetTypeIdentifier() == GeometryTypeIdentifier {
|
||||
t.Run(ti.String(), func(t *testing.T) {
|
||||
@@ -354,7 +354,7 @@ func generateTypeInfoArrays(t *testing.T) ([][]TypeInfo, [][]types.Value) {
|
||||
{DefaultInlineBlobType},
|
||||
{Int8Type, Int16Type, Int24Type, Int32Type, Int64Type},
|
||||
{JSONType},
|
||||
{LinestringType},
|
||||
{LineStringType},
|
||||
{PointType},
|
||||
{PolygonType},
|
||||
{GeometryType},
|
||||
@@ -390,9 +390,9 @@ func generateTypeInfoArrays(t *testing.T) ([][]TypeInfo, [][]types.Value) {
|
||||
{types.Int(20), types.Int(215), types.Int(237493), types.Int(2035753568), types.Int(2384384576063)}, //Int
|
||||
{json.MustTypesJSON(`null`), json.MustTypesJSON(`[]`), json.MustTypesJSON(`"lorem ipsum"`), json.MustTypesJSON(`2.71`),
|
||||
json.MustTypesJSON(`false`), json.MustTypesJSON(`{"a": 1, "b": []}`)}, //JSON
|
||||
{types.Linestring{SRID: 0, Points: []types.Point{{SRID: 0, X: 1, Y: 2}, {SRID: 0, X: 3, Y: 4}}}}, // Linestring
|
||||
{types.LineString{SRID: 0, Points: []types.Point{{SRID: 0, X: 1, Y: 2}, {SRID: 0, X: 3, Y: 4}}}}, // LineString
|
||||
{types.Point{SRID: 0, X: 1, Y: 2}}, // Point
|
||||
{types.Polygon{SRID: 0, Lines: []types.Linestring{{SRID: 0, Points: []types.Point{{SRID: 0, X: 0, Y: 0}, {SRID: 0, X: 0, Y: 1}, {SRID: 0, X: 1, Y: 1}, {SRID: 0, X: 0, Y: 0}}}}}}, // Polygon
|
||||
{types.Polygon{SRID: 0, Lines: []types.LineString{{SRID: 0, Points: []types.Point{{SRID: 0, X: 0, Y: 0}, {SRID: 0, X: 0, Y: 1}, {SRID: 0, X: 1, Y: 1}, {SRID: 0, X: 0, Y: 0}}}}}}, // Polygon
|
||||
{types.Geometry{Inner: types.Point{SRID: 0, X: 1, Y: 2}}}, // Geometry holding a Point
|
||||
{types.Uint(1), types.Uint(5), types.Uint(64), types.Uint(42), types.Uint(192)}, //Set
|
||||
{types.Int(0), types.Int(1000000 /*"00:00:01"*/), types.Int(113000000 /*"00:01:53"*/), types.Int(247019000000 /*"68:36:59"*/), types.Int(458830485214 /*"127:27:10.485214"*/)}, //Time
|
||||
|
||||
@@ -174,21 +174,16 @@ func modifyColumn(
|
||||
existingCol schema.Column,
|
||||
newCol schema.Column,
|
||||
order *sql.ColumnOrder,
|
||||
opts editor.Options,
|
||||
) (*doltdb.Table, error) {
|
||||
sch, err := tbl.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.ToLower(existingCol.Name) == strings.ToLower(newCol.Name) {
|
||||
newCol.Name = existingCol.Name
|
||||
}
|
||||
if err := validateModifyColumn(ctx, tbl, existingCol, newCol); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: write test of changing column case
|
||||
|
||||
// Modify statements won't include key info, so fill it in from the old column
|
||||
// TODO: fix this in GMS
|
||||
if existingCol.IsPartOfPK {
|
||||
newCol.IsPartOfPK = true
|
||||
if schema.IsColSpatialType(newCol) {
|
||||
@@ -211,212 +206,11 @@ func modifyColumn(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updatedTable, err := updateTableWithModifiedColumn(ctx, tbl, sch, newSchema, existingCol, newCol, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return updatedTable, nil
|
||||
}
|
||||
|
||||
// validateModifyColumn returns an error if the column as specified cannot be added to the schema given.
|
||||
func validateModifyColumn(ctx context.Context, tbl *doltdb.Table, existingCol schema.Column, modifiedCol schema.Column) error {
|
||||
sch, err := tbl.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if existingCol.Name != modifiedCol.Name {
|
||||
cols := sch.GetAllCols()
|
||||
err = cols.Iter(func(currColTag uint64, currCol schema.Column) (stop bool, err error) {
|
||||
if currColTag == modifiedCol.Tag {
|
||||
return false, nil
|
||||
} else if strings.ToLower(currCol.Name) == strings.ToLower(modifiedCol.Name) {
|
||||
return true, fmt.Errorf("A column with the name %s already exists.", modifiedCol.Name)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateTableWithModifiedColumn updates the existing table with the new schema. If the column type has changed, then
|
||||
// the data is updated.
|
||||
func updateTableWithModifiedColumn(ctx context.Context, tbl *doltdb.Table, oldSch, newSch schema.Schema, oldCol, modifiedCol schema.Column, opts editor.Options) (*doltdb.Table, error) {
|
||||
vrw := tbl.ValueReadWriter()
|
||||
|
||||
rowData, err := tbl.GetNomsRowData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !oldCol.TypeInfo.Equals(modifiedCol.TypeInfo) {
|
||||
if schema.IsKeyless(newSch) {
|
||||
return nil, fmt.Errorf("keyless table column type alteration is not yet supported")
|
||||
}
|
||||
rowData, err = updateRowDataWithNewType(ctx, rowData, tbl.ValueReadWriter(), oldSch, newSch, oldCol, modifiedCol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if !modifiedCol.IsNullable() {
|
||||
err = rowData.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
|
||||
r, err := row.FromNoms(newSch, key.(types.Tuple), value.(types.Tuple))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
val, ok := r.GetColVal(modifiedCol.Tag)
|
||||
if !ok || val == nil || val == types.NullValue {
|
||||
return true, fmt.Errorf("cannot change column to NOT NULL when one or more values is NULL")
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
indexData, err := tbl.GetIndexSet(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var autoVal uint64
|
||||
if schema.HasAutoIncrement(newSch) && schema.HasAutoIncrement(oldSch) {
|
||||
autoVal, err = tbl.GetAutoIncrementValue(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
updatedTable, err := doltdb.NewNomsTable(ctx, vrw, newSch, rowData, indexData, types.Uint(autoVal))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !oldCol.TypeInfo.Equals(modifiedCol.TypeInfo) {
|
||||
// If we're modifying the primary key then all indexes are affected. Otherwise we just want to update the
|
||||
// touched ones.
|
||||
if modifiedCol.IsPartOfPK {
|
||||
for _, index := range newSch.Indexes().AllIndexes() {
|
||||
indexRowData, err := editor.RebuildIndex(ctx, updatedTable, index.Name(), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updatedTable, err = updatedTable.SetNomsIndexRows(ctx, index.Name(), indexRowData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, index := range newSch.Indexes().IndexesWithTag(modifiedCol.Tag) {
|
||||
indexRowData, err := editor.RebuildIndex(ctx, updatedTable, index.Name(), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updatedTable, err = updatedTable.SetNomsIndexRows(ctx, index.Name(), indexRowData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return updatedTable, nil
|
||||
}
|
||||
|
||||
// updateRowDataWithNewType returns a new map of row data containing the updated rows from the changed schema column type.
|
||||
func updateRowDataWithNewType(
|
||||
ctx context.Context,
|
||||
rowData types.Map,
|
||||
vrw types.ValueReadWriter,
|
||||
oldSch, newSch schema.Schema,
|
||||
oldCol, newCol schema.Column,
|
||||
) (types.Map, error) {
|
||||
// If there are no rows then we can immediately return. All type conversions are valid for tables without rows, but
|
||||
// when rows are present then it is no longer true. GetTypeConverter assumes that there are rows present, so it
|
||||
// will return a failure on a type conversion that should work for the empty table.
|
||||
if rowData.Len() == 0 {
|
||||
return rowData, nil
|
||||
}
|
||||
convFunc, _, err := typeinfo.GetTypeConverter(ctx, oldCol.TypeInfo, newCol.TypeInfo)
|
||||
if err != nil {
|
||||
return types.EmptyMap, err
|
||||
}
|
||||
|
||||
if !newCol.IsNullable() {
|
||||
originalConvFunc := convFunc
|
||||
convFunc = func(ctx context.Context, vrw types.ValueReadWriter, v types.Value) (types.Value, error) {
|
||||
if v == nil || v == types.NullValue {
|
||||
return nil, fmt.Errorf("cannot change column to NOT NULL when one or more values is NULL")
|
||||
}
|
||||
return originalConvFunc(ctx, vrw, v)
|
||||
}
|
||||
}
|
||||
|
||||
var lastKey types.Value
|
||||
mapEditor := rowData.Edit()
|
||||
err = rowData.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
|
||||
r, err := row.FromNoms(oldSch, key.(types.Tuple), value.(types.Tuple))
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
taggedVals, err := r.TaggedValues()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
// We skip the "ok" check as nil is returned if the value does not exist, and we still want to check nil.
|
||||
// The underscore is important, otherwise a missing value would result in a panic.
|
||||
val, _ := taggedVals[oldCol.Tag]
|
||||
delete(taggedVals, oldCol.Tag) // If there was no value then delete is a no-op so this is safe
|
||||
newVal, err := convFunc(ctx, vrw, val)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
// convFunc returns types.NullValue rather than nil so it's always safe to compare
|
||||
if newVal.Equals(val) {
|
||||
newRowKey, err := r.NomsMapKey(newSch).Value(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if newCol.IsPartOfPK && newRowKey.Equals(lastKey) {
|
||||
return true, fmt.Errorf("pk violation when altering column type and rewriting values")
|
||||
}
|
||||
lastKey = newRowKey
|
||||
return false, nil
|
||||
} else if newVal != types.NullValue {
|
||||
taggedVals[newCol.Tag] = newVal
|
||||
}
|
||||
r, err = row.New(rowData.Format(), newSch, taggedVals)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
newRowKey, err := r.NomsMapKey(newSch).Value(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if newCol.IsPartOfPK {
|
||||
mapEditor.Remove(key)
|
||||
if newRowKey.Equals(lastKey) {
|
||||
return true, fmt.Errorf("pk violation when altering column type and rewriting values")
|
||||
}
|
||||
}
|
||||
lastKey = newRowKey
|
||||
mapEditor.Set(newRowKey, r.NomsMapValue(newSch))
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return types.EmptyMap, err
|
||||
}
|
||||
return mapEditor.Map(ctx)
|
||||
return tbl.UpdateSchema(ctx, newSchema)
|
||||
}
|
||||
|
||||
// replaceColumnInSchema replaces the column with the name given with its new definition, optionally reordering it.
|
||||
// TODO: make this a schema API?
|
||||
func replaceColumnInSchema(sch schema.Schema, oldCol schema.Column, newCol schema.Column, order *sql.ColumnOrder) (schema.Schema, error) {
|
||||
// If no order is specified, insert in the same place as the existing column
|
||||
prevColumn := ""
|
||||
@@ -934,3 +728,21 @@ func keyedRowDataToKeylessRowData(ctx context.Context, nbf *types.NomsBinFormat,
|
||||
|
||||
return mapEditor.Map(ctx)
|
||||
}
|
||||
|
||||
func validateSpatialTypeSRID(c schema.Column, v types.Value) error {
|
||||
sc, ok := c.TypeInfo.ToSqlType().(sql.SpatialColumnType)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
sqlVal, err := c.TypeInfo.ConvertNomsValueToValue(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = sc.MatchSRID(sqlVal)
|
||||
if err != nil {
|
||||
if sql.ErrNotMatchingSRID.Is(err) {
|
||||
return sql.ErrNotMatchingSRIDWithColName.New(c.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -866,7 +866,7 @@ func TestModifyColumn(t *testing.T) {
|
||||
name: "name collision",
|
||||
existingColumn: schema.NewColumn("id", dtestutils.IdTag, types.StringKind, true, schema.NotNullConstraint{}),
|
||||
newColumn: schema.NewColumn("name", dtestutils.IdTag, types.StringKind, true, schema.NotNullConstraint{}),
|
||||
expectedErr: "A column with the name name already exists",
|
||||
expectedErr: "two different columns with the same name exist",
|
||||
},
|
||||
{
|
||||
name: "type change",
|
||||
@@ -920,7 +920,7 @@ func TestModifyColumn(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: dEnv.TempTableFilesDir()}
|
||||
updatedTable, err := modifyColumn(ctx, tbl, tt.existingColumn, tt.newColumn, tt.order, opts)
|
||||
updatedTable, err := modifyColumn(ctx, tbl, tt.existingColumn, tt.newColumn, tt.order)
|
||||
if len(tt.expectedErr) > 0 {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.expectedErr)
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -71,9 +70,10 @@ func RunModifyTypeTests(t *testing.T, tests []ModifyTypeTest) {
|
||||
}
|
||||
|
||||
func SkipByDefaultInCI(t *testing.T) {
|
||||
if os.Getenv("CI") != "" && os.Getenv("DOLT_TEST_RUN_NON_RACE_TESTS") == "" {
|
||||
t.Skip()
|
||||
}
|
||||
// if os.Getenv("CI") != "" && os.Getenv("DOLT_TEST_RUN_NON_RACE_TESTS") == "" {
|
||||
t.Skip("All tests temporarily skipped due to changes in type conversion logic on DDL operations " +
|
||||
"(now generally more permissive than MySQL). zachmu owes a fix")
|
||||
// }
|
||||
}
|
||||
|
||||
func widenValue(v interface{}) interface{} {
|
||||
|
||||
@@ -96,10 +96,9 @@ func (p DoltDatabaseProvider) WithDbFactoryUrl(url string) DoltDatabaseProvider
|
||||
}
|
||||
|
||||
func (p DoltDatabaseProvider) Database(ctx *sql.Context, name string) (db sql.Database, err error) {
|
||||
name = strings.ToLower(name)
|
||||
var ok bool
|
||||
p.mu.RLock()
|
||||
db, ok = p.databases[name]
|
||||
db, ok = p.databases[formatDbMapKeyName(name)]
|
||||
p.mu.RUnlock()
|
||||
if ok {
|
||||
return db, nil
|
||||
@@ -116,8 +115,8 @@ func (p DoltDatabaseProvider) Database(ctx *sql.Context, name string) (db sql.Da
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if found, ok := p.databases[name]; !ok {
|
||||
p.databases[name] = db
|
||||
if found, ok := p.databases[formatDbMapKeyName(name)]; !ok {
|
||||
p.databases[formatDbMapKeyName(name)] = db
|
||||
return db, nil
|
||||
} else {
|
||||
return found, nil
|
||||
@@ -184,7 +183,7 @@ func (p DoltDatabaseProvider) CreateDatabase(ctx *sql.Context, name string) erro
|
||||
}
|
||||
|
||||
db := NewDatabase(name, newEnv.DbData(), opts)
|
||||
p.databases[strings.ToLower(db.Name())] = db
|
||||
p.databases[formatDbMapKeyName(db.Name())] = db
|
||||
|
||||
dbstate, err := GetInitialDBState(ctx, db)
|
||||
if err != nil {
|
||||
@@ -202,18 +201,19 @@ func (p DoltDatabaseProvider) DropDatabase(ctx *sql.Context, name string) error
|
||||
// TODO: there are still cases (not server-first) where we rename databases because the directory name would need
|
||||
// quoting if used as a database name, and that breaks here. We either need the database name to match the directory
|
||||
// name in all cases, or else keep a mapping from database name to directory on disk.
|
||||
db := p.databases[strings.ToLower(name)]
|
||||
dbKey := formatDbMapKeyName(name)
|
||||
db := p.databases[dbKey]
|
||||
|
||||
// Get the DB's directory
|
||||
exists, isDir := p.fs.Exists(db.Name())
|
||||
if !exists {
|
||||
// engine should already protect against this
|
||||
return sql.ErrDatabaseNotFound.New(name)
|
||||
return sql.ErrDatabaseNotFound.New(db.Name())
|
||||
} else if !isDir {
|
||||
return fmt.Errorf("unexpected error: %s exists but is not a directory", name)
|
||||
return fmt.Errorf("unexpected error: %s exists but is not a directory", dbKey)
|
||||
}
|
||||
|
||||
err := p.fs.Delete(name, true)
|
||||
err := p.fs.Delete(db.Name(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -222,21 +222,20 @@ func (p DoltDatabaseProvider) DropDatabase(ctx *sql.Context, name string) error
|
||||
|
||||
// We not only have to delete this database, but any derivative ones that we've stored as a result of USE or
|
||||
// connection strings
|
||||
derivativeNamePrefix := strings.ToLower(name) + "/"
|
||||
derivativeNamePrefix := dbKey + "/"
|
||||
for dbName := range p.databases {
|
||||
if strings.HasPrefix(strings.ToLower(dbName), derivativeNamePrefix) {
|
||||
delete(p.databases, strings.ToLower(dbName))
|
||||
if strings.HasPrefix(dbName, derivativeNamePrefix) {
|
||||
delete(p.databases, dbName)
|
||||
}
|
||||
}
|
||||
|
||||
delete(p.databases, strings.ToLower(name))
|
||||
delete(p.databases, dbKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
//TODO: databaseForRevision should call checkout on the given branch/commit, returning a non-mutable session
|
||||
// only if a non-branch revspec was indicated.
|
||||
func (p DoltDatabaseProvider) databaseForRevision(ctx *sql.Context, revDB string) (sql.Database, dsess.InitialDbState, bool, error) {
|
||||
revDB = strings.ToLower(revDB)
|
||||
if !strings.Contains(revDB, dbRevisionDelimiter) {
|
||||
return nil, dsess.InitialDbState{}, false, nil
|
||||
}
|
||||
@@ -245,7 +244,7 @@ func (p DoltDatabaseProvider) databaseForRevision(ctx *sql.Context, revDB string
|
||||
dbName, revSpec := parts[0], parts[1]
|
||||
|
||||
p.mu.RLock()
|
||||
candidate, ok := p.databases[dbName]
|
||||
candidate, ok := p.databases[formatDbMapKeyName(dbName)]
|
||||
p.mu.RUnlock()
|
||||
if !ok {
|
||||
return nil, dsess.InitialDbState{}, false, nil
|
||||
@@ -520,3 +519,16 @@ type staticRepoState struct {
|
||||
func (s staticRepoState) CWBHeadRef() ref.DoltRef {
|
||||
return s.branch
|
||||
}
|
||||
|
||||
// formatDbMapKeyName returns formatted string of database name and/or branch name. Database name is case-insensitive,
|
||||
// so it's stored in lower case name. Branch name is case-sensitive, so not changed.
|
||||
func formatDbMapKeyName(name string) string {
|
||||
if !strings.Contains(name, dbRevisionDelimiter) {
|
||||
return strings.ToLower(name)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(name, dbRevisionDelimiter, 2)
|
||||
dbName, revSpec := parts[0], parts[1]
|
||||
|
||||
return strings.ToLower(dbName) + dbRevisionDelimiter + revSpec
|
||||
}
|
||||
|
||||
@@ -848,7 +848,7 @@ func (sess *Session) AddDB(ctx *sql.Context, dbState InitialDbState) error {
|
||||
|
||||
sessionState := &DatabaseSessionState{}
|
||||
sess.dbStates[db.Name()] = sessionState
|
||||
|
||||
sessionState.dbName = db.Name()
|
||||
// TODO: get rid of all repo state reader / writer stuff. Until we do, swap out the reader with one of our own, and
|
||||
// the writer with one that errors out
|
||||
sessionState.dbData = dbState.DbData
|
||||
|
||||
674
go/libraries/doltcore/sqle/enginetest/ddl_queries.go
Executable file
674
go/libraries/doltcore/sqle/enginetest/ddl_queries.go
Executable file
@@ -0,0 +1,674 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package enginetest
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/enginetest/queries"
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
)
|
||||
|
||||
// Tests in this file are a grab bag of DDL queries, many of them ported from older parts of the Dolt codebase
|
||||
// before enginetest format adoption. Typically you shouldn't add things here instead of in the enginetest package in
|
||||
// go-mysql-server, but it's appropriate for dolt-specific tests of DDL operations.
|
||||
|
||||
var SimpsonsSetup = []string{
|
||||
`create table people (id int primary key,
|
||||
first_name varchar(100) not null,
|
||||
last_name varchar(100) not null,
|
||||
is_married tinyint,
|
||||
age int,
|
||||
rating float,
|
||||
uuid varchar(64),
|
||||
num_episodes int unsigned);`,
|
||||
`create table episodes (id int primary key,
|
||||
name varchar(100) not null,
|
||||
air_date datetime,
|
||||
rating float);`,
|
||||
`create table appearances (character_id int not null,
|
||||
episode_id int not null,
|
||||
comments varchar(100),
|
||||
primary key (character_id, episode_id));`,
|
||||
`insert into people values
|
||||
(0, "Homer", "Simpson", 1, 40, 8.5, null, null),
|
||||
(1, "Marge", "Simpson", 1, 38, 8, "00000000-0000-0000-0000-000000000001", 111),
|
||||
(2, "Bart", "Simpson", 0, 10, 9, "00000000-0000-0000-0000-000000000002", 222),
|
||||
(3, "Lisa", "Simpson", 0, 8, 10, "00000000-0000-0000-0000-000000000003", 333),
|
||||
(4, "Moe", "Szyslak", 0, 48, 6.5, "00000000-0000-0000-0000-000000000004", 444),
|
||||
(5, "Barney", "Gumble", 0, 40, 4, "00000000-0000-0000-0000-000000000005", 555);
|
||||
`,
|
||||
`insert into episodes values
|
||||
(1, "Simpsons Roasting On an Open Fire", "1989-12-18 03:00:00", 8.0),
|
||||
(2, "Bart the Genius", "1990-01-15 03:00:00", 9.0),
|
||||
(3, "Homer's Odyssey", "1990-01-22 03:00:00", 7.0),
|
||||
(4, "There's No Disgrace Like Home", "1990-01-29 03:00:00", 8.5);
|
||||
`,
|
||||
`insert into appearances values
|
||||
(0, 1, "Homer is great in this one"),
|
||||
(1, 1, "Marge is here too"),
|
||||
(0, 2, "Homer is great in this one too"),
|
||||
(2, 2, "This episode is named after Bart"),
|
||||
(3, 2, "Lisa is here too"),
|
||||
(4, 2, "I think there's a prank call scene"),
|
||||
(0, 3, "Homer is in every episode"),
|
||||
(1, 3, "Marge shows up a lot too"),
|
||||
(3, 3, "Lisa is the best Simpson"),
|
||||
(5, 3, "I'm making this all up");
|
||||
`,
|
||||
}
|
||||
|
||||
var AllInitialSimpsonsCharacters = []sql.Row{
|
||||
{0, "Homer", "Simpson", 1, 40, 8.5, nil, nil},
|
||||
{1, "Marge", "Simpson", 1, 38, 8.0, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{2, "Bart", "Simpson", 0, 10, 9.0, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{3, "Lisa", "Simpson", 0, 8, 10.0, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{4, "Moe", "Szyslak", 0, 48, 6.5, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
{5, "Barney", "Gumble", 0, 40, 4.0, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
}
|
||||
|
||||
var ModifyAndChangeColumnScripts = []queries.ScriptTest{
|
||||
{
|
||||
Name: "alter modify column reorder middle",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people modify column first_name varchar(16383) not null after last_name",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{sql.Row{"people", "CREATE TABLE `people` (\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `first_name` varchar(16383) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `rating` float,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by 1",
|
||||
Expected: []sql.Row{
|
||||
{0, "Simpson", "Homer", 1, 40, 8.5, nil, nil},
|
||||
{1, "Simpson", "Marge", 1, 38, 8.0, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{2, "Simpson", "Bart", 0, 10, 9.0, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{3, "Simpson", "Lisa", 0, 8, 10.0, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{4, "Szyslak", "Moe", 0, 48, 6.5, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
{5, "Gumble", "Barney", 0, 40, 4.0, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column reorder first",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people modify column first_name varchar(16383) not null first",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{sql.Row{"people", "CREATE TABLE `people` (\n" +
|
||||
" `first_name` varchar(16383) NOT NULL,\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `rating` float,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by id",
|
||||
Expected: []sql.Row{
|
||||
{"Homer", 0, "Simpson", 1, 40, 8.5, nil, nil},
|
||||
{"Marge", 1, "Simpson", 1, 38, 8.0, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{"Bart", 2, "Simpson", 0, 10, 9.0, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{"Lisa", 3, "Simpson", 0, 8, 10.0, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{"Moe", 4, "Szyslak", 0, 48, 6.5, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
{"Barney", 5, "Gumble", 0, 40, 4.0, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column drop null constraint",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people modify column first_name varchar(16383) null",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{sql.Row{"people", "CREATE TABLE `people` (\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `first_name` varchar(16383),\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `rating` float,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by id",
|
||||
Expected: AllInitialSimpsonsCharacters,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter change column rename and reorder",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people change first_name christian_name varchar(16383) not null after last_name",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{sql.Row{"people", "CREATE TABLE `people` (\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `christian_name` varchar(16383) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `rating` float,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by id",
|
||||
Expected: []sql.Row{
|
||||
{0, "Simpson", "Homer", 1, 40, 8.5, nil, nil},
|
||||
{1, "Simpson", "Marge", 1, 38, 8.0, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{2, "Simpson", "Bart", 0, 10, 9.0, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{3, "Simpson", "Lisa", 0, 8, 10.0, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{4, "Szyslak", "Moe", 0, 48, 6.5, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
{5, "Gumble", "Barney", 0, 40, 4.0, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter change column rename and reorder first",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people change column first_name christian_name varchar(16383) not null first",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{sql.Row{"people", "CREATE TABLE `people` (\n" +
|
||||
" `christian_name` varchar(16383) NOT NULL,\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `rating` float,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by id",
|
||||
Expected: []sql.Row{
|
||||
{"Homer", 0, "Simpson", 1, 40, 8.5, nil, nil},
|
||||
{"Marge", 1, "Simpson", 1, 38, 8.0, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{"Bart", 2, "Simpson", 0, 10, 9.0, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{"Lisa", 3, "Simpson", 0, 8, 10.0, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{"Moe", 4, "Szyslak", 0, 48, 6.5, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
{"Barney", 5, "Gumble", 0, 40, 4.0, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter change column drop null constraint",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people change column first_name first_name varchar(16383) null",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{sql.Row{"people", "CREATE TABLE `people` (\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `first_name` varchar(16383),\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `rating` float,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by id",
|
||||
Expected: AllInitialSimpsonsCharacters,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column not null with type mismatch in default",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people modify rating double default 'not a number'",
|
||||
ExpectedErrStr: "incompatible type for default value",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column not null, existing null values",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people modify num_episodes bigint unsigned not null",
|
||||
ExpectedErr: sql.ErrInsertIntoNonNullableProvidedNull,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var ModifyColumnTypeScripts = []queries.ScriptTest{
|
||||
{
|
||||
Name: "alter modify column type similar types",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"insert into test values (0, 3), (1, 2)",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column v1 int",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table test",
|
||||
Expected: []sql.Row{{"test", "CREATE TABLE `test` (\n" +
|
||||
" `pk` bigint NOT NULL,\n" +
|
||||
" `v1` int,\n" +
|
||||
" PRIMARY KEY (`pk`),\n" +
|
||||
" KEY `v1` (`v1`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by pk",
|
||||
Expected: []sql.Row{{0, 3}, {1, 2}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test where v1 = 3",
|
||||
Expected: []sql.Row{{0, 3}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type different types",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"insert into test values (0, 3), (1, 2)",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column v1 varchar(20)",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table test",
|
||||
Expected: []sql.Row{{"test", "CREATE TABLE `test` (\n" +
|
||||
" `pk` bigint NOT NULL,\n" +
|
||||
" `v1` varchar(20),\n" +
|
||||
" PRIMARY KEY (`pk`),\n" +
|
||||
" KEY `v1` (`v1`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by pk",
|
||||
Expected: []sql.Row{{0, "3"}, {1, "2"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test where v1 = '3'",
|
||||
Expected: []sql.Row{{0, "3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type different types reversed",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 varchar(20), index (v1))",
|
||||
`insert into test values (0, "3"), (1, "2")`,
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column v1 bigint",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table test",
|
||||
Expected: []sql.Row{{"test", "CREATE TABLE `test` (\n" +
|
||||
" `pk` bigint NOT NULL,\n" +
|
||||
" `v1` bigint,\n" +
|
||||
" PRIMARY KEY (`pk`),\n" +
|
||||
" KEY `v1` (`v1`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by pk",
|
||||
Expected: []sql.Row{{0, 3}, {1, 2}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test where v1 = 3",
|
||||
Expected: []sql.Row{{0, 3}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type primary key",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"insert into test values (0, 3), (1, 2)",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column pk varchar(20)",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table test",
|
||||
Expected: []sql.Row{{"test", "CREATE TABLE `test` (\n" +
|
||||
" `pk` varchar(20) NOT NULL,\n" +
|
||||
" `v1` bigint,\n" +
|
||||
" PRIMARY KEY (`pk`),\n" +
|
||||
" KEY `v1` (`v1`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by pk",
|
||||
Expected: []sql.Row{{"0", 3}, {"1", 2}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test where v1 = 3",
|
||||
Expected: []sql.Row{{"0", 3}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type incompatible types with empty table",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 bit(20), index (v1))",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column pk datetime",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table test",
|
||||
Expected: []sql.Row{{"test", "CREATE TABLE `test` (\n" +
|
||||
" `pk` datetime NOT NULL,\n" +
|
||||
" `v1` bit(20),\n" +
|
||||
" PRIMARY KEY (`pk`),\n" +
|
||||
" KEY `v1` (`v1`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by pk",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type incompatible types with non-empty table",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 bit(20), index (v1))",
|
||||
"insert into test values (1, 1)",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column pk datetime",
|
||||
ExpectedErr: sql.ErrConvertingToTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type different types incompatible values",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 varchar(20), index (v1))",
|
||||
"insert into test values (0, 3), (1, 'a')",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column v1 bigint",
|
||||
ExpectedErr: sql.ErrInvalidValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type foreign key parent",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"create table test2(pk bigint primary key, v1 bigint, index (v1), foreign key (v1) references test(v1))",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test modify column v1 varchar(20)",
|
||||
ExpectedErr: sql.ErrForeignKeyTypeChange,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type foreign key child",
|
||||
SetUpScript: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"create table test2(pk bigint primary key, v1 bigint, index (v1), foreign key (v1) references test(v1))",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test2 modify column v1 varchar(20)",
|
||||
ExpectedErr: sql.ErrForeignKeyTypeChange,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter modify column type, make primary key spatial",
|
||||
SetUpScript: []string{
|
||||
"create table point_tbl (p int primary key)",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table point_tbl modify column p point primary key",
|
||||
ExpectedErr: schema.ErrUsingSpatialKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var DropColumnScripts = []queries.ScriptTest{
|
||||
{
|
||||
Name: "alter drop column",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people drop rating",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{{"people", "CREATE TABLE `people` (\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `first_name` varchar(100) NOT NULL,\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by 1",
|
||||
Expected: []sql.Row{
|
||||
{0, "Homer", "Simpson", 1, 40, nil, nil},
|
||||
{1, "Marge", "Simpson", 1, 38, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{2, "Bart", "Simpson", 0, 10, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{3, "Lisa", "Simpson", 0, 8, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{4, "Moe", "Szyslak", 0, 48, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
{5, "Barney", "Gumble", 0, 40, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter drop column with optional column keyword",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people drop column rating",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{{"people", "CREATE TABLE `people` (\n" +
|
||||
" `id` int NOT NULL,\n" +
|
||||
" `first_name` varchar(100) NOT NULL,\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned,\n" +
|
||||
" PRIMARY KEY (`id`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by 1",
|
||||
Expected: []sql.Row{
|
||||
{0, "Homer", "Simpson", 1, 40, nil, nil},
|
||||
{1, "Marge", "Simpson", 1, 38, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{2, "Bart", "Simpson", 0, 10, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{3, "Lisa", "Simpson", 0, 8, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{4, "Moe", "Szyslak", 0, 48, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
{5, "Barney", "Gumble", 0, 40, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "drop primary key column",
|
||||
SetUpScript: SimpsonsSetup,
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table people drop column id",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table people",
|
||||
Expected: []sql.Row{{"people", "CREATE TABLE `people` (\n" +
|
||||
" `first_name` varchar(100) NOT NULL,\n" +
|
||||
" `last_name` varchar(100) NOT NULL,\n" +
|
||||
" `is_married` tinyint,\n" +
|
||||
" `age` int,\n" +
|
||||
" `rating` float,\n" +
|
||||
" `uuid` varchar(64),\n" +
|
||||
" `num_episodes` int unsigned\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from people order by first_name",
|
||||
Expected: []sql.Row{
|
||||
{"Barney", "Gumble", 0, 40, 4.0, "00000000-0000-0000-0000-000000000005", uint(555)},
|
||||
{"Bart", "Simpson", 0, 10, 9.0, "00000000-0000-0000-0000-000000000002", uint(222)},
|
||||
{"Homer", "Simpson", 1, 40, 8.5, nil, nil},
|
||||
{"Lisa", "Simpson", 0, 8, 10.0, "00000000-0000-0000-0000-000000000003", uint(333)},
|
||||
{"Marge", "Simpson", 1, 38, 8.0, "00000000-0000-0000-0000-000000000001", uint(111)},
|
||||
{"Moe", "Szyslak", 0, 48, 6.5, "00000000-0000-0000-0000-000000000004", uint(444)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var BrokenDDLScripts = []queries.ScriptTest{
|
||||
{
|
||||
Name: "drop first of two primary key columns",
|
||||
SetUpScript: []string{
|
||||
"create table test (p1 int, p2 int, c1 int, c2 int, index (c1))",
|
||||
"insert into test values (0, 1, 2, 3), (4, 5, 6, 7)",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table test drop column p1",
|
||||
SkipResultsCheck: true,
|
||||
},
|
||||
{
|
||||
Query: "show create table test",
|
||||
Expected: []sql.Row{{"test", "CREATE TABLE `test` (\n" +
|
||||
" `p2` int,\n" +
|
||||
" `c1` int,\n" +
|
||||
" `c2` int,\n" +
|
||||
" KEY `c1` (`c1`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by pk",
|
||||
Expected: []sql.Row{{0, 3}, {1, 2}},
|
||||
},
|
||||
{
|
||||
Query: "select * from test where v1 = 3",
|
||||
Expected: []sql.Row{{0, 3}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter string column to truncate data",
|
||||
SetUpScript: []string{
|
||||
"create table t1 (a int primary key, b varchar(3))",
|
||||
"insert into t1 values (1, 'hi'), (2, 'bye')",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table t1 modify b varchar(2)",
|
||||
ExpectedErr: sql.ErrInvalidValue, // not sure of the type of error, but it should give one
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "alter datetime column with invalid values",
|
||||
SetUpScript: []string{
|
||||
"CREATE TABLE t3(pk BIGINT PRIMARY KEY, v1 DATETIME, INDEX(v1))",
|
||||
"INSERT INTO t3 VALUES (0,'1999-11-02 17:39:38'),(1,'3021-01-08 02:59:27');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "alter table t3 modify v1 timestamp",
|
||||
ExpectedErr: sql.ErrInvalidValue, // not sure of the type of error, but it should give one
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -76,33 +76,18 @@ func TestSingleQuery(t *testing.T) {
|
||||
|
||||
// Convenience test for debugging a single query. Unskip and set to the desired query.
|
||||
func TestSingleScript(t *testing.T) {
|
||||
t.Skip()
|
||||
//t.Skip()
|
||||
|
||||
var scripts = []queries.ScriptTest{
|
||||
{
|
||||
Name: "Drop and add primary key on two branches converges to same schema",
|
||||
Name: "alter modify column type, make primary key spatial",
|
||||
SetUpScript: []string{
|
||||
"create table t1 (i int);",
|
||||
"call dolt_commit('-am', 't1 table')",
|
||||
"call dolt_checkout('-b', 'b1')",
|
||||
"alter table t1 add primary key(i)",
|
||||
"alter table t1 drop primary key",
|
||||
"alter table t1 add primary key(i)",
|
||||
"alter table t1 drop primary key",
|
||||
"alter table t1 add primary key(i)",
|
||||
"call dolt_commit('-am', 'b1 primary key changes')",
|
||||
"call dolt_checkout('main')",
|
||||
"alter table t1 add primary key(i)",
|
||||
"call dolt_commit('-am', 'main primary key change')",
|
||||
"create table point_tbl (p int primary key)",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('b1')",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
Query: "select count(*) from dolt_conflicts",
|
||||
Expected: []sql.Row{{0}},
|
||||
Query: "alter table point_tbl modify column p point primary key",
|
||||
ExpectedErr: schema.ErrUsingSpatialKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -123,11 +108,11 @@ func TestSingleQueryPrepared(t *testing.T) {
|
||||
Query: `SELECT ST_SRID(g, 0) from geometry_table order by i`,
|
||||
Expected: []sql.Row{
|
||||
{sql.Point{X: 1, Y: 2}},
|
||||
{sql.Linestring{Points: []sql.Point{{X: 1, Y: 2}, {X: 3, Y: 4}}}},
|
||||
{sql.Polygon{Lines: []sql.Linestring{{Points: []sql.Point{{X: 0, Y: 0}, {X: 0, Y: 1}, {X: 1, Y: 1}, {X: 0, Y: 0}}}}}},
|
||||
{sql.LineString{Points: []sql.Point{{X: 1, Y: 2}, {X: 3, Y: 4}}}},
|
||||
{sql.Polygon{Lines: []sql.LineString{{Points: []sql.Point{{X: 0, Y: 0}, {X: 0, Y: 1}, {X: 1, Y: 1}, {X: 0, Y: 0}}}}}},
|
||||
{sql.Point{X: 1, Y: 2}},
|
||||
{sql.Linestring{Points: []sql.Point{{X: 1, Y: 2}, {X: 3, Y: 4}}}},
|
||||
{sql.Polygon{Lines: []sql.Linestring{{Points: []sql.Point{{X: 0, Y: 0}, {X: 0, Y: 1}, {X: 1, Y: 1}, {X: 0, Y: 0}}}}}},
|
||||
{sql.LineString{Points: []sql.Point{{X: 1, Y: 2}, {X: 3, Y: 4}}}},
|
||||
{sql.Polygon{Lines: []sql.LineString{{Points: []sql.Point{{X: 0, Y: 0}, {X: 0, Y: 1}, {X: 1, Y: 1}, {X: 0, Y: 0}}}}}},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -175,7 +160,6 @@ func TestQueryErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInfoSchema(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestInfoSchema(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -212,7 +196,6 @@ func TestInsertIntoErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSpatialQueries(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestSpatialQueries(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -253,12 +236,10 @@ func TestDeleteFromErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSpatialDelete(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestSpatialDelete(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestSpatialScripts(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestSpatialScripts(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -267,32 +248,30 @@ func TestTruncate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestScripts(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
|
||||
skipped := []string{
|
||||
"create index r_c0 on r (c0);",
|
||||
// These rely on keyless tables which orders its rows by hash rather than contents, meaning changing types causes different ordering
|
||||
"SELECT group_concat(`attribute`) FROM t where o_id=2",
|
||||
"SELECT group_concat(o_id) FROM t WHERE `attribute`='color'",
|
||||
|
||||
// TODO(aaron): go-mysql-server GroupBy with grouping
|
||||
// expressions currently has a bug where it does not insert
|
||||
// necessary Sort nodes. These queries used to work by
|
||||
// accident based on the return order from the storage layer,
|
||||
// but they no longer do.
|
||||
"SELECT pk, SUM(DISTINCT v1), MAX(v1) FROM mytable GROUP BY pk ORDER BY pk",
|
||||
"SELECT pk, MIN(DISTINCT v1), MAX(DISTINCT v1) FROM mytable GROUP BY pk ORDER BY pk",
|
||||
|
||||
// no support for naming unique constraints yet, engine dependent
|
||||
"show create table t2",
|
||||
var skipped []string
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
skipped = append(skipped,
|
||||
// Different error output for primary key error
|
||||
"failed statements data validation for INSERT, UPDATE",
|
||||
// missing FK violation
|
||||
"failed statements data validation for DELETE, REPLACE",
|
||||
// wrong results
|
||||
"Indexed Join On Keyless Table",
|
||||
// spurious fk violation
|
||||
"Nested Subquery projections (NTC)",
|
||||
// Different query plans
|
||||
"Partial indexes are used and return the expected result",
|
||||
"Multiple indexes on the same columns in a different order",
|
||||
// panic
|
||||
"Ensure proper DECIMAL support (found by fuzzer)",
|
||||
)
|
||||
}
|
||||
|
||||
enginetest.TestScripts(t, newDoltHarness(t).WithSkippedQueries(skipped))
|
||||
}
|
||||
|
||||
// TestDoltUserPrivileges tests Dolt-specific code that needs to handle user privilege checking
|
||||
func TestDoltUserPrivileges(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
|
||||
harness := newDoltHarness(t)
|
||||
for _, script := range DoltUserPrivTests {
|
||||
t.Run(script.Name, func(t *testing.T) {
|
||||
@@ -370,17 +349,14 @@ func TestComplexIndexQueries(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateTable(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestCreateTable(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestPkOrdinalsDDL(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestPkOrdinalsDDL(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestPkOrdinalsDML(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestPkOrdinalsDML(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -393,7 +369,6 @@ func TestRenameTable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRenameColumn(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestRenameColumn(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -406,7 +381,6 @@ func TestModifyColumn(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDropColumn(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestDropColumn(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -415,13 +389,41 @@ func TestCreateDatabase(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDropDatabase(t *testing.T) {
|
||||
enginetest.TestScript(t, newDoltHarness(t), queries.ScriptTest{
|
||||
Name: "Drop database engine tests for Dolt only",
|
||||
SetUpScript: []string{
|
||||
"CREATE DATABASE Test1db",
|
||||
"CREATE DATABASE TEST2db",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "DROP DATABASE TeSt2DB",
|
||||
Expected: []sql.Row{{sql.OkResult{RowsAffected: 1}}},
|
||||
},
|
||||
{
|
||||
Query: "USE test2db",
|
||||
ExpectedErr: sql.ErrDatabaseNotFound,
|
||||
},
|
||||
{
|
||||
Query: "USE TEST1DB",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "DROP DATABASE IF EXISTS test1DB",
|
||||
Expected: []sql.Row{{sql.OkResult{RowsAffected: 1}}},
|
||||
},
|
||||
{
|
||||
Query: "USE Test1db",
|
||||
ExpectedErr: sql.ErrDatabaseNotFound,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
t.Skip("Dolt doesn't yet support dropping the primary database, which these tests do")
|
||||
enginetest.TestDropDatabase(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestCreateForeignKeys(t *testing.T) {
|
||||
//TODO: fix table alteration so that foreign keys may work once again
|
||||
skipNewFormat(t)
|
||||
enginetest.TestCreateForeignKeys(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -430,27 +432,6 @@ func TestDropForeignKeys(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestForeignKeys(t *testing.T) {
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
//TODO: fix table alteration so that foreign keys may work once again
|
||||
skippedQueries := []string{
|
||||
"ALTER TABLE SET NULL on non-nullable column",
|
||||
"ALTER TABLE RENAME COLUMN",
|
||||
"ALTER TABLE MODIFY COLUMN type change not allowed",
|
||||
"ALTER TABLE MODIFY COLUMN type change allowed when lengthening string",
|
||||
"ALTER TABLE MODIFY COLUMN type change only cares about foreign key columns",
|
||||
"DROP COLUMN parent",
|
||||
"DROP COLUMN child",
|
||||
"Disallow change column to nullable with ON UPDATE SET NULL",
|
||||
"Disallow change column to nullable with ON DELETE SET NULL",
|
||||
}
|
||||
for i := len(queries.ForeignKeyTests) - 1; i >= 0; i-- {
|
||||
for _, skippedQuery := range skippedQueries {
|
||||
if queries.ForeignKeyTests[i].Name == skippedQuery {
|
||||
queries.ForeignKeyTests = append(queries.ForeignKeyTests[:i], queries.ForeignKeyTests[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
enginetest.TestForeignKeys(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -483,7 +464,6 @@ func TestViews(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVersionedViews(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestVersionedViews(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -520,12 +500,10 @@ func TestInnerNestedInNaturalJoins(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestColumnDefaults(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestColumnDefaults(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestAlterTable(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestAlterTable(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -557,12 +535,10 @@ func TestJsonScripts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTriggers(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestTriggers(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestRollbackTriggers(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestRollbackTriggers(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -596,41 +572,56 @@ func TestTransactions(t *testing.T) {
|
||||
for _, script := range DoltConflictHandlingTests {
|
||||
enginetest.TestTransactionScript(t, newDoltHarness(t), script)
|
||||
}
|
||||
for _, script := range DoltConstraintViolationTransactionTests {
|
||||
enginetest.TestTransactionScript(t, newDoltHarness(t), script)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentTransactions(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestConcurrentTransactions(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestDoltScripts(t *testing.T) {
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
//TODO: add prolly path for index verification
|
||||
t.Skip("new format using old noms path, need to update")
|
||||
}
|
||||
harness := newDoltHarness(t)
|
||||
for _, script := range DoltScripts {
|
||||
enginetest.TestScript(t, harness, script)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDescribeTableAsOf(t *testing.T) {
|
||||
// This test relies on altering schema in order to describe the table at different revisions
|
||||
// and see changes. Until the new storage format supports altering schema, we need to skip them.
|
||||
// Once the new storage format supports altering schema, we can move these ScriptTests back into
|
||||
// the DoltScripts var so they get picked up by the TestDoltScripts method and remove this method.
|
||||
skipNewFormat(t)
|
||||
func TestDoltDdlScripts(t *testing.T) {
|
||||
harness := newDoltHarness(t)
|
||||
harness.Setup()
|
||||
|
||||
for _, script := range ModifyAndChangeColumnScripts {
|
||||
e, err := harness.NewEngine(t)
|
||||
require.NoError(t, err)
|
||||
enginetest.TestScriptWithEngine(t, e, harness, script)
|
||||
}
|
||||
|
||||
for _, script := range ModifyColumnTypeScripts {
|
||||
e, err := harness.NewEngine(t)
|
||||
require.NoError(t, err)
|
||||
enginetest.TestScriptWithEngine(t, e, harness, script)
|
||||
}
|
||||
|
||||
for _, script := range DropColumnScripts {
|
||||
e, err := harness.NewEngine(t)
|
||||
require.NoError(t, err)
|
||||
enginetest.TestScriptWithEngine(t, e, harness, script)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokenDdlScripts(t *testing.T) {
|
||||
for _, script := range BrokenDDLScripts {
|
||||
t.Skip(script.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDescribeTableAsOf(t *testing.T) {
|
||||
enginetest.TestScript(t, newDoltHarness(t), DescribeTableAsOfScriptTest)
|
||||
}
|
||||
|
||||
func TestShowCreateTableAsOf(t *testing.T) {
|
||||
// This test relies on altering schema in order to show the create table statement at different revisions
|
||||
// and see changes. Until the new storage format supports altering schema, we need to skip them.
|
||||
// Once the new storage format supports altering schema, we can move these ScriptTests back into
|
||||
// the DoltScripts var so they get picked up by the TestDoltScripts method and remove this method.
|
||||
skipNewFormat(t)
|
||||
|
||||
enginetest.TestScript(t, newDoltHarness(t), ShowCreateTableAsOfScriptTest)
|
||||
}
|
||||
|
||||
@@ -643,7 +634,6 @@ func TestDoltMerge(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDoltReset(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
for _, script := range DoltReset {
|
||||
// dolt versioning conflicts with reset harness -- use new harness every time
|
||||
enginetest.TestScript(t, newDoltHarness(t), script)
|
||||
@@ -852,20 +842,17 @@ func TestPreparedStaticIndexQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSpatialQueriesPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
skipPreparedTests(t)
|
||||
|
||||
enginetest.TestSpatialQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestVersionedQueriesPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
skipPreparedTests(t)
|
||||
enginetest.TestVersionedQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestInfoSchemaPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
skipPreparedTests(t)
|
||||
enginetest.TestInfoSchemaPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
@@ -913,9 +900,30 @@ func TestDeleteQueriesPrepared(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestScriptsPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
var skipped []string
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
skipped = append(skipped,
|
||||
// Different error output for primary key error
|
||||
"failed statements data validation for INSERT, UPDATE",
|
||||
// missing FK violation
|
||||
"failed statements data validation for DELETE, REPLACE",
|
||||
// wrong results
|
||||
"Indexed Join On Keyless Table",
|
||||
// spurious fk violation
|
||||
"Nested Subquery projections (NTC)",
|
||||
// Different query plans
|
||||
"Partial indexes are used and return the expected result",
|
||||
"Multiple indexes on the same columns in a different order",
|
||||
// panic
|
||||
"Ensure proper DECIMAL support (found by fuzzer)",
|
||||
)
|
||||
for _, s := range queries.SpatialScriptTests {
|
||||
skipped = append(skipped, s.Name)
|
||||
}
|
||||
}
|
||||
|
||||
skipPreparedTests(t)
|
||||
enginetest.TestScriptsPrepared(t, newDoltHarness(t))
|
||||
enginetest.TestScriptsPrepared(t, newDoltHarness(t).WithSkippedQueries(skipped))
|
||||
}
|
||||
|
||||
func TestInsertScriptsPrepared(t *testing.T) {
|
||||
@@ -973,7 +981,6 @@ func TestShowTableStatusPrepared(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
skipPreparedTests(t)
|
||||
enginetest.TestPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
@@ -986,7 +993,6 @@ func TestPreparedInsert(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddDropPrimaryKeys(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
t.Run("adding and dropping primary keys does not result in duplicate NOT NULL constraints", func(t *testing.T) {
|
||||
harness := newDoltHarness(t)
|
||||
addPkScript := queries.ScriptTest{
|
||||
@@ -1065,6 +1071,13 @@ func TestAddDropPrimaryKeys(t *testing.T) {
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
enginetest.TestScript(t, harness, script)
|
||||
@@ -1077,13 +1090,11 @@ func TestAddDropPrimaryKeys(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the new index map is not empty
|
||||
newMap, err := table.GetNomsRowData(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, newMap.Empty())
|
||||
assert.Equal(t, newMap.Len(), uint64(2))
|
||||
newRows, err := table.GetIndexRowData(ctx, "c1_idx")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, newRows.Empty())
|
||||
assert.Equal(t, newRows.Count(), uint64(2))
|
||||
})
|
||||
|
||||
t.Run("Add primary key when one more cells contain NULL", func(t *testing.T) {
|
||||
@@ -1129,8 +1140,16 @@ func TestAddDropPrimaryKeys(t *testing.T) {
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "select * from test order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
enginetest.TestScript(t, harness, script)
|
||||
|
||||
ctx := sql.NewContext(context.Background(), sql.WithSession(harness.session))
|
||||
@@ -1141,13 +1160,11 @@ func TestAddDropPrimaryKeys(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert the index map is not empty
|
||||
newMap, err := table.GetNomsIndexRowData(ctx, "c1_idx")
|
||||
newIdx, err := table.GetIndexRowData(ctx, "c1_idx")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, newMap.Empty())
|
||||
assert.Equal(t, newMap.Len(), uint64(2))
|
||||
assert.False(t, newIdx.Empty())
|
||||
assert.Equal(t, newIdx.Count(), uint64(2))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1248,3 +1248,76 @@ var DoltSqlFuncTransactionTests = []queries.TransactionTest{
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var DoltConstraintViolationTransactionTests = []queries.TransactionTest{
|
||||
{
|
||||
Name: "a transaction commit that is a fast-forward produces no constraint violations",
|
||||
SetUpScript: []string{
|
||||
"CREATE TABLE parent (pk BIGINT PRIMARY KEY, v1 BIGINT, INDEX(v1));",
|
||||
"CREATE TABLE child (pk BIGINT PRIMARY KEY, v1 BIGINT);",
|
||||
"INSERT INTO parent VALUES (10, 1), (20, 2);",
|
||||
"INSERT INTO child VALUES (1, 1), (2, 2);",
|
||||
"ALTER TABLE child ADD CONSTRAINT fk_name FOREIGN KEY (v1) REFERENCES parent (v1);",
|
||||
"CALL DOLT_COMMIT('-am', 'MC1');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "/* client a */ SET FOREIGN_KEY_CHECKS = 0;",
|
||||
Expected: []sql.Row{{}},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ START TRANSACTION;",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ DELETE FROM PARENT where v1 = 2;",
|
||||
Expected: []sql.Row{{sql.NewOkResult(1)}},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ COMMIT;",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "a transaction commit that is a three-way merge produces constraint violations",
|
||||
SetUpScript: []string{
|
||||
"CREATE TABLE parent (pk BIGINT PRIMARY KEY, v1 BIGINT, INDEX(v1));",
|
||||
"CREATE TABLE child (pk BIGINT PRIMARY KEY, v1 BIGINT);",
|
||||
"INSERT INTO parent VALUES (10, 1), (20, 2);",
|
||||
"INSERT INTO child VALUES (1, 1), (2, 2);",
|
||||
"ALTER TABLE child ADD CONSTRAINT fk_name FOREIGN KEY (v1) REFERENCES parent (v1);",
|
||||
"CALL DOLT_COMMIT('-am', 'MC1');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "/* client a */ SET FOREIGN_KEY_CHECKS = 0;",
|
||||
Expected: []sql.Row{{}},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ START TRANSACTION;",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ START TRANSACTION;",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ DELETE FROM PARENT where v1 = 2;",
|
||||
Expected: []sql.Row{{sql.NewOkResult(1)}},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ INSERT INTO parent VALUES (30, 3);",
|
||||
Expected: []sql.Row{{sql.NewOkResult(1)}},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ COMMIT;",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ COMMIT;",
|
||||
ExpectedErrStr: "Constraint violation from merge detected, cannot commit transaction. Constraint violations from a merge must be resolved using the dolt_constraint_violations table before committing a transaction. To commit transactions with constraint violations set @@dolt_force_transaction_commit=1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -231,7 +231,6 @@ func NewDoltMapIter(keyValGet KVGetFunc, closeKVGetter func() error, conv *KVToS
|
||||
// Next returns the next sql.Row until all rows are returned at which point (nil, io.EOF) is returned.
|
||||
func (dmi *DoltMapIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
k, v, err := dmi.kvGet(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func RowIterForProllyRange(ctx *sql.Context, idx DoltIndex, ranges prolly.Range,
|
||||
if covers {
|
||||
return newProllyCoveringIndexIter(ctx, idx, ranges, pkSch, secondary)
|
||||
} else {
|
||||
return newProllyIndexIter(ctx, idx, ranges, primary, secondary)
|
||||
return newProllyIndexIter(ctx, idx, ranges, pkSch, primary, secondary)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,54 @@ import (
|
||||
"github.com/dolthub/dolt/go/store/val"
|
||||
)
|
||||
|
||||
// todo(andy): this should go in GMS
|
||||
func DenormalizeRow(sch sql.Schema, row sql.Row) (sql.Row, error) {
|
||||
var err error
|
||||
for i := range row {
|
||||
if row[i] == nil {
|
||||
continue
|
||||
}
|
||||
switch typ := sch[i].Type.(type) {
|
||||
case sql.DecimalType:
|
||||
row[i] = row[i].(decimal.Decimal).String()
|
||||
case sql.EnumType:
|
||||
row[i], err = typ.Unmarshal(int64(row[i].(uint16)))
|
||||
case sql.SetType:
|
||||
row[i], err = typ.Unmarshal(row[i].(uint64))
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return row, nil
|
||||
}
|
||||
|
||||
// todo(andy): this should go in GMS
|
||||
func NormalizeRow(sch sql.Schema, row sql.Row) (sql.Row, error) {
|
||||
var err error
|
||||
for i := range row {
|
||||
if row[i] == nil {
|
||||
continue
|
||||
}
|
||||
switch typ := sch[i].Type.(type) {
|
||||
case sql.DecimalType:
|
||||
row[i], err = decimal.NewFromString(row[i].(string))
|
||||
case sql.EnumType:
|
||||
var v int64
|
||||
v, err = typ.Marshal(row[i])
|
||||
row[i] = uint16(v)
|
||||
case sql.SetType:
|
||||
row[i], err = typ.Marshal(row[i])
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return row, nil
|
||||
}
|
||||
|
||||
// GetField reads the value from the ith field of the Tuple as an interface{}.
|
||||
func GetField(td val.TupleDesc, i int, tup val.Tuple) (v interface{}, err error) {
|
||||
var ok bool
|
||||
@@ -52,12 +100,10 @@ func GetField(td val.TupleDesc, i int, tup val.Tuple) (v interface{}, err error)
|
||||
v, ok = td.GetFloat32(i, tup)
|
||||
case val.Float64Enc:
|
||||
v, ok = td.GetFloat64(i, tup)
|
||||
case val.Bit64Enc:
|
||||
v, ok = td.GetBit(i, tup)
|
||||
case val.DecimalEnc:
|
||||
var d decimal.Decimal
|
||||
d, ok = td.GetDecimal(i, tup)
|
||||
if ok {
|
||||
v = deserializeDecimal(d)
|
||||
}
|
||||
v, ok = td.GetDecimal(i, tup)
|
||||
case val.YearEnc:
|
||||
v, ok = td.GetYear(i, tup)
|
||||
case val.DateEnc:
|
||||
@@ -70,6 +116,10 @@ func GetField(td val.TupleDesc, i int, tup val.Tuple) (v interface{}, err error)
|
||||
}
|
||||
case val.DatetimeEnc:
|
||||
v, ok = td.GetDatetime(i, tup)
|
||||
case val.EnumEnc:
|
||||
v, ok = td.GetEnum(i, tup)
|
||||
case val.SetEnc:
|
||||
v, ok = td.GetSet(i, tup)
|
||||
case val.StringEnc:
|
||||
v, ok = td.GetString(i, tup)
|
||||
case val.ByteStringEnc:
|
||||
@@ -127,12 +177,10 @@ func PutField(tb *val.TupleBuilder, i int, v interface{}) error {
|
||||
tb.PutFloat32(i, v.(float32))
|
||||
case val.Float64Enc:
|
||||
tb.PutFloat64(i, v.(float64))
|
||||
case val.Bit64Enc:
|
||||
tb.PutBit(i, uint64(convUint(v)))
|
||||
case val.DecimalEnc:
|
||||
d, err := serializeDecimal(v.(string))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
tb.PutDecimal(i, d)
|
||||
tb.PutDecimal(i, v.(decimal.Decimal))
|
||||
case val.YearEnc:
|
||||
tb.PutYear(i, v.(int16))
|
||||
case val.DateEnc:
|
||||
@@ -145,6 +193,10 @@ func PutField(tb *val.TupleBuilder, i int, v interface{}) error {
|
||||
tb.PutSqlTime(i, t)
|
||||
case val.DatetimeEnc:
|
||||
tb.PutDatetime(i, v.(time.Time))
|
||||
case val.EnumEnc:
|
||||
tb.PutEnum(i, v.(uint16))
|
||||
case val.SetEnc:
|
||||
tb.PutSet(i, v.(uint64))
|
||||
case val.StringEnc:
|
||||
tb.PutString(i, v.(string))
|
||||
case val.ByteStringEnc:
|
||||
@@ -220,22 +272,14 @@ func convUint(v interface{}) uint {
|
||||
}
|
||||
}
|
||||
|
||||
func convJson(v interface{}) (buf []byte, err error) {
|
||||
v, err = sql.JSON.Convert(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(v.(sql.JSONDocument).Val)
|
||||
}
|
||||
|
||||
func deserializeGeometry(buf []byte) (v interface{}) {
|
||||
srid, _, typ := geo.ParseEWKBHeader(buf)
|
||||
buf = buf[geo.EWKBHeaderSize:]
|
||||
switch typ {
|
||||
case geo.PointType:
|
||||
v = geo.DeserializePoint(buf, srid)
|
||||
case geo.LinestringType:
|
||||
v = geo.DeserializeLinestring(buf, srid)
|
||||
case geo.LineStringType:
|
||||
v = geo.DeserializeLineString(buf, srid)
|
||||
case geo.PolygonType:
|
||||
v = geo.DeserializePolygon(srid, buf)
|
||||
default:
|
||||
@@ -248,8 +292,8 @@ func serializeGeometry(v interface{}) []byte {
|
||||
switch t := v.(type) {
|
||||
case sql.Point:
|
||||
return geo.SerializePoint(t)
|
||||
case sql.Linestring:
|
||||
return geo.SerializeLinestring(t)
|
||||
case sql.LineString:
|
||||
return geo.SerializeLineString(t)
|
||||
case sql.Polygon:
|
||||
return geo.SerializePolygon(t)
|
||||
default:
|
||||
@@ -257,6 +301,18 @@ func serializeGeometry(v interface{}) []byte {
|
||||
}
|
||||
}
|
||||
|
||||
func convJson(v interface{}) (buf []byte, err error) {
|
||||
v, err = sql.JSON.Convert(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(v.(sql.JSONDocument).Val)
|
||||
}
|
||||
|
||||
func deserializeTime(v int64) (interface{}, error) {
|
||||
return typeinfo.TimeType.ConvertNomsValueToValue(types.Int(v))
|
||||
}
|
||||
|
||||
func serializeTime(v interface{}) (int64, error) {
|
||||
i, err := typeinfo.TimeType.ConvertValueToNomsValue(nil, nil, v)
|
||||
if err != nil {
|
||||
@@ -264,15 +320,3 @@ func serializeTime(v interface{}) (int64, error) {
|
||||
}
|
||||
return int64(i.(types.Int)), nil
|
||||
}
|
||||
|
||||
func deserializeTime(v int64) (interface{}, error) {
|
||||
return typeinfo.TimeType.ConvertNomsValueToValue(types.Int(v))
|
||||
}
|
||||
|
||||
func serializeDecimal(v interface{}) (decimal.Decimal, error) {
|
||||
return decimal.NewFromString(v.(string))
|
||||
}
|
||||
|
||||
func deserializeDecimal(v decimal.Decimal) interface{} {
|
||||
return v.String()
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/expression/function"
|
||||
"github.com/shopspring/decimal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -95,10 +96,15 @@ func TestRoundTripProllyFields(t *testing.T) {
|
||||
typ: val.Type{Enc: val.Float64Enc},
|
||||
value: float64(-math.Pi),
|
||||
},
|
||||
{
|
||||
name: "bit",
|
||||
typ: val.Type{Enc: val.Bit64Enc},
|
||||
value: uint64(42),
|
||||
},
|
||||
{
|
||||
name: "decimal",
|
||||
typ: val.Type{Enc: val.DecimalEnc},
|
||||
value: "0.263419374632932747932030573792",
|
||||
value: mustParseDecimal("0.263419374632932747932030573792"),
|
||||
},
|
||||
{
|
||||
name: "string",
|
||||
@@ -120,11 +126,11 @@ func TestRoundTripProllyFields(t *testing.T) {
|
||||
typ: val.Type{Enc: val.DateEnc},
|
||||
value: dateFromTime(time.Now().UTC()),
|
||||
},
|
||||
//{
|
||||
// name: "time",
|
||||
// typ: val.Type{Enc: val.DateEnc},
|
||||
// value: dateFromTime(time.Now().UTC()),
|
||||
//},
|
||||
{
|
||||
name: "time",
|
||||
typ: val.Type{Enc: val.TimeEnc},
|
||||
value: "11:22:00",
|
||||
},
|
||||
{
|
||||
name: "datetime",
|
||||
typ: val.Type{Enc: val.DatetimeEnc},
|
||||
@@ -207,6 +213,14 @@ func mustParseJson(t *testing.T, s string) sql.JSONDocument {
|
||||
return sql.JSONDocument{Val: v}
|
||||
}
|
||||
|
||||
func mustParseDecimal(s string) decimal.Decimal {
|
||||
d, err := decimal.NewFromString(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func dateFromTime(t time.Time) time.Time {
|
||||
y, m, d := t.Year(), t.Month(), t.Day()
|
||||
return time.Date(y, m, d, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
@@ -46,13 +46,20 @@ type prollyIndexIter struct {
|
||||
// keyMap and valMap transform tuples from
|
||||
// primary row storage into sql.Row's
|
||||
keyMap, valMap val.OrdinalMapping
|
||||
sqlSch sql.Schema
|
||||
}
|
||||
|
||||
var _ sql.RowIter = prollyIndexIter{}
|
||||
var _ sql.RowIter2 = prollyIndexIter{}
|
||||
|
||||
// NewProllyIndexIter returns a new prollyIndexIter.
|
||||
func newProllyIndexIter(ctx *sql.Context, idx DoltIndex, rng prolly.Range, dprimary, dsecondary durable.Index) (prollyIndexIter, error) {
|
||||
func newProllyIndexIter(
|
||||
ctx *sql.Context,
|
||||
idx DoltIndex,
|
||||
rng prolly.Range,
|
||||
pkSch sql.PrimaryKeySchema,
|
||||
dprimary, dsecondary durable.Index,
|
||||
) (prollyIndexIter, error) {
|
||||
secondary := durable.ProllyMapFromIndex(dsecondary)
|
||||
indexIter, err := secondary.IterRange(ctx, rng)
|
||||
if err != nil {
|
||||
@@ -79,6 +86,7 @@ func newProllyIndexIter(ctx *sql.Context, idx DoltIndex, rng prolly.Range, dprim
|
||||
rowChan: make(chan sql.Row, indexLookupBufSize),
|
||||
keyMap: km,
|
||||
valMap: vm,
|
||||
sqlSch: pkSch.Schema,
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
@@ -95,7 +103,7 @@ func (p prollyIndexIter) Next(ctx *sql.Context) (r sql.Row, err error) {
|
||||
select {
|
||||
case r, ok = <-p.rowChan:
|
||||
if ok {
|
||||
return r, nil
|
||||
return DenormalizeRow(p.sqlSch, r)
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
@@ -222,6 +230,7 @@ type prollyCoveringIndexIter struct {
|
||||
|
||||
// |keyMap| and |valMap| are both of len ==
|
||||
keyMap, valMap val.OrdinalMapping
|
||||
sqlSch sql.Schema
|
||||
}
|
||||
|
||||
var _ sql.RowIter = prollyCoveringIndexIter{}
|
||||
@@ -251,6 +260,7 @@ func newProllyCoveringIndexIter(ctx *sql.Context, idx DoltIndex, rng prolly.Rang
|
||||
valDesc: valDesc,
|
||||
keyMap: keyMap,
|
||||
valMap: valMap,
|
||||
sqlSch: pkSch.Schema,
|
||||
}
|
||||
|
||||
return iter, nil
|
||||
@@ -268,7 +278,7 @@ func (p prollyCoveringIndexIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
return DenormalizeRow(p.sqlSch, r)
|
||||
}
|
||||
|
||||
func (p prollyCoveringIndexIter) Next2(ctx *sql.Context, f *sql.RowFrame) error {
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
@@ -52,6 +51,7 @@ var encodingToType [256]query.Type
|
||||
type prollyRowIter struct {
|
||||
iter prolly.MapIter
|
||||
|
||||
sqlSch sql.Schema
|
||||
keyDesc val.TupleDesc
|
||||
valDesc val.TupleDesc
|
||||
keyProj []int
|
||||
@@ -63,8 +63,8 @@ var _ sql.RowIter = prollyRowIter{}
|
||||
var _ sql.RowIter2 = prollyRowIter{}
|
||||
|
||||
func NewProllyRowIter(
|
||||
ctx context.Context,
|
||||
sch schema.Schema,
|
||||
schSch sql.Schema,
|
||||
rows prolly.Map,
|
||||
iter prolly.MapIter,
|
||||
projections []string,
|
||||
@@ -91,6 +91,7 @@ func NewProllyRowIter(
|
||||
|
||||
return prollyRowIter{
|
||||
iter: iter,
|
||||
sqlSch: schSch,
|
||||
keyDesc: kd,
|
||||
valDesc: vd,
|
||||
keyProj: keyProj,
|
||||
@@ -159,8 +160,7 @@ func (it prollyRowIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return row, nil
|
||||
return DenormalizeRow(it.sqlSch, row)
|
||||
}
|
||||
|
||||
func (it prollyRowIter) Next2(ctx *sql.Context, frame *sql.RowFrame) error {
|
||||
|
||||
@@ -25,6 +25,8 @@ import (
|
||||
"github.com/dolthub/go-mysql-server/sql/mysql_db"
|
||||
)
|
||||
|
||||
const defaultMySQLFilePath = "mysql.db"
|
||||
|
||||
var fileMutex = &sync.Mutex{}
|
||||
var mysqlDbFilePath string
|
||||
var privsFilePath string
|
||||
@@ -45,37 +47,28 @@ func SetPrivilegeFilePath(fp string) {
|
||||
fileMutex.Lock()
|
||||
defer fileMutex.Unlock()
|
||||
|
||||
// Panic if some strange unknown failure occurs (not just that it doesn't exist)
|
||||
_, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
// Some strange unknown failure, okay to panic here
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
panic(err)
|
||||
}
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
panic(err)
|
||||
}
|
||||
privsFilePath = fp
|
||||
}
|
||||
|
||||
// SetMySQLDbFilePath sets the file path that will be used for saving and loading MySQL Db tables.
|
||||
func SetMySQLDbFilePath(fp string) {
|
||||
// do nothing for empty file path
|
||||
// look for default mysql db file path if none specified
|
||||
if len(fp) == 0 {
|
||||
return
|
||||
fp = defaultMySQLFilePath
|
||||
}
|
||||
|
||||
fileMutex.Lock()
|
||||
defer fileMutex.Unlock()
|
||||
|
||||
// Panic if some strange unknown failure occurs (not just that it doesn't exist)
|
||||
_, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if err := ioutil.WriteFile(fp, []byte{}, 0644); err != nil {
|
||||
// If we can't create the file it's a catastrophic error
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
// Some strange unknown failure, okay to panic here
|
||||
panic(err)
|
||||
}
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
panic(err)
|
||||
}
|
||||
mysqlDbFilePath = fp
|
||||
}
|
||||
@@ -92,11 +85,9 @@ func LoadPrivileges() ([]*mysql_db.User, []*mysql_db.RoleEdge, error) {
|
||||
fileMutex.Lock()
|
||||
defer fileMutex.Unlock()
|
||||
|
||||
// read from privsFilePath, error if something other than not-exists
|
||||
fileContents, err := ioutil.ReadFile(privsFilePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(fileContents) == 0 {
|
||||
@@ -112,19 +103,19 @@ func LoadPrivileges() ([]*mysql_db.User, []*mysql_db.RoleEdge, error) {
|
||||
|
||||
// LoadData reads the mysql.db file, returns nil if empty or not found
|
||||
func LoadData() ([]byte, error) {
|
||||
// return nil for empty path
|
||||
// use default mysql db file path if none specified
|
||||
if len(mysqlDbFilePath) == 0 {
|
||||
return nil, nil
|
||||
mysqlDbFilePath = defaultMySQLFilePath
|
||||
}
|
||||
|
||||
fileMutex.Lock()
|
||||
defer fileMutex.Unlock()
|
||||
|
||||
// read from mysqldbFilePath, error if something other than not-exists
|
||||
buf, err := ioutil.ReadFile(mysqlDbFilePath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(buf) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -139,5 +130,11 @@ func SaveData(ctx *sql.Context, data []byte) error {
|
||||
fileMutex.Lock()
|
||||
defer fileMutex.Unlock()
|
||||
|
||||
// use default if empty filepath
|
||||
if len(mysqlDbFilePath) == 0 {
|
||||
mysqlDbFilePath = defaultMySQLFilePath
|
||||
}
|
||||
|
||||
// should create file if it doesn't exist
|
||||
return ioutil.WriteFile(mysqlDbFilePath, data, 0777)
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ type doltTableRowIter struct {
|
||||
}
|
||||
|
||||
// Returns a new row iterator for the table given
|
||||
func newRowIterator(ctx context.Context, tbl *doltdb.Table, projCols []string, partition doltTablePartition) (sql.RowIter, error) {
|
||||
func newRowIterator(ctx context.Context, tbl *doltdb.Table, sqlSch sql.Schema, projCols []string, partition doltTablePartition) (sql.RowIter, error) {
|
||||
sch, err := tbl.GetSchema(ctx)
|
||||
|
||||
if err != nil {
|
||||
@@ -76,7 +76,7 @@ func newRowIterator(ctx context.Context, tbl *doltdb.Table, projCols []string, p
|
||||
}
|
||||
|
||||
if types.IsFormat_DOLT_1(tbl.Format()) {
|
||||
return ProllyRowIterFromPartition(ctx, tbl, projCols, partition)
|
||||
return ProllyRowIterFromPartition(ctx, tbl, sqlSch, projCols, partition)
|
||||
}
|
||||
|
||||
if schema.IsKeyless(sch) {
|
||||
@@ -168,7 +168,13 @@ func (itr *doltTableRowIter) Close(*sql.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProllyRowIterFromPartition(ctx context.Context, tbl *doltdb.Table, projections []string, partition doltTablePartition) (sql.RowIter, error) {
|
||||
func ProllyRowIterFromPartition(
|
||||
ctx context.Context,
|
||||
tbl *doltdb.Table,
|
||||
sqlSch sql.Schema,
|
||||
projections []string,
|
||||
partition doltTablePartition,
|
||||
) (sql.RowIter, error) {
|
||||
rows := durable.ProllyMapFromIndex(partition.rowData)
|
||||
sch, err := tbl.GetSchema(ctx)
|
||||
if err != nil {
|
||||
@@ -183,7 +189,7 @@ func ProllyRowIterFromPartition(ctx context.Context, tbl *doltdb.Table, projecti
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return index.NewProllyRowIter(ctx, sch, rows, iter, projections)
|
||||
return index.NewProllyRowIter(sch, sqlSch, rows, iter, projections)
|
||||
}
|
||||
|
||||
// TableToRowIter returns a |sql.RowIter| for a full table scan for the given |table|. If
|
||||
@@ -208,6 +214,7 @@ func TableToRowIter(ctx *sql.Context, table *WritableDoltTable, columns []string
|
||||
end: NoUpperBound,
|
||||
rowData: data,
|
||||
}
|
||||
sqlSch := table.sqlSch.Schema
|
||||
|
||||
return newRowIterator(ctx, t, columns, p)
|
||||
return newRowIterator(ctx, t, sqlSch, columns, p)
|
||||
}
|
||||
|
||||
@@ -546,431 +546,6 @@ func TestAddColumn(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyAndChangeColumn(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query string
|
||||
expectedSchema schema.Schema
|
||||
expectedRows []row.Row
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "alter modify column reorder middle",
|
||||
query: "alter table people modify column first_name varchar(16383) not null after last_name",
|
||||
expectedSchema: dtestutils.CreateSchema(
|
||||
schema.NewColumn("id", IdTag, types.IntKind, true, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("last_name", LastNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("first_name", FirstNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false),
|
||||
schema.NewColumn("age", AgeTag, types.IntKind, false),
|
||||
schema.NewColumn("rating", RatingTag, types.FloatKind, false),
|
||||
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
|
||||
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
|
||||
),
|
||||
expectedRows: AllPeopleRows,
|
||||
},
|
||||
{
|
||||
name: "alter modify column reorder first",
|
||||
query: "alter table people modify column first_name varchar(16383) not null first",
|
||||
expectedSchema: dtestutils.CreateSchema(
|
||||
schema.NewColumn("first_name", FirstNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("id", IdTag, types.IntKind, true, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("last_name", LastNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false),
|
||||
schema.NewColumn("age", AgeTag, types.IntKind, false),
|
||||
schema.NewColumn("rating", RatingTag, types.FloatKind, false),
|
||||
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
|
||||
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
|
||||
),
|
||||
expectedRows: AllPeopleRows,
|
||||
},
|
||||
{
|
||||
name: "alter modify column drop null constraint",
|
||||
query: "alter table people modify column first_name varchar(16383) null",
|
||||
expectedSchema: dtestutils.CreateSchema(
|
||||
schema.NewColumn("id", IdTag, types.IntKind, true, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("first_name", FirstNameTag, types.StringKind, false),
|
||||
schema.NewColumn("last_name", LastNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false),
|
||||
schema.NewColumn("age", AgeTag, types.IntKind, false),
|
||||
schema.NewColumn("rating", RatingTag, types.FloatKind, false),
|
||||
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
|
||||
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
|
||||
),
|
||||
expectedRows: AllPeopleRows,
|
||||
},
|
||||
{
|
||||
name: "alter change column rename and reorder",
|
||||
query: "alter table people change first_name christian_name varchar(16383) not null after last_name",
|
||||
expectedSchema: dtestutils.CreateSchema(
|
||||
schema.NewColumn("id", IdTag, types.IntKind, true, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("last_name", LastNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("christian_name", FirstNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false),
|
||||
schema.NewColumn("age", AgeTag, types.IntKind, false),
|
||||
schema.NewColumn("rating", RatingTag, types.FloatKind, false),
|
||||
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
|
||||
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
|
||||
),
|
||||
expectedRows: AllPeopleRows,
|
||||
},
|
||||
{
|
||||
name: "alter change column rename and reorder first",
|
||||
query: "alter table people change column first_name christian_name varchar(16383) not null first",
|
||||
expectedSchema: dtestutils.CreateSchema(
|
||||
schema.NewColumn("christian_name", FirstNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("id", IdTag, types.IntKind, true, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("last_name", LastNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false),
|
||||
schema.NewColumn("age", AgeTag, types.IntKind, false),
|
||||
schema.NewColumn("rating", RatingTag, types.FloatKind, false),
|
||||
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
|
||||
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
|
||||
),
|
||||
expectedRows: AllPeopleRows,
|
||||
},
|
||||
{
|
||||
name: "alter change column drop null constraint",
|
||||
query: "alter table people change column first_name first_name varchar(16383) null",
|
||||
expectedSchema: dtestutils.CreateSchema(
|
||||
schema.NewColumn("id", IdTag, types.IntKind, true, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("first_name", FirstNameTag, types.StringKind, false),
|
||||
schema.NewColumn("last_name", LastNameTag, types.StringKind, false, schema.NotNullConstraint{}),
|
||||
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false),
|
||||
schema.NewColumn("age", AgeTag, types.IntKind, false),
|
||||
schema.NewColumn("rating", RatingTag, types.FloatKind, false),
|
||||
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
|
||||
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
|
||||
),
|
||||
expectedRows: AllPeopleRows,
|
||||
},
|
||||
{
|
||||
name: "alter modify column not null with type mismatch in default",
|
||||
query: "alter table people modify rating double default 'not a number'",
|
||||
expectedErr: "incompatible type for default value",
|
||||
},
|
||||
{
|
||||
name: "alter modify column not null, existing null values",
|
||||
query: "alter table people modify num_episodes bigint unsigned not null",
|
||||
expectedErr: "cannot change column to NOT NULL",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dEnv := dtestutils.CreateTestEnv()
|
||||
CreateTestDatabase(dEnv, t)
|
||||
ctx := context.Background()
|
||||
root, _ := dEnv.WorkingRoot(ctx)
|
||||
|
||||
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
|
||||
|
||||
if tt.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.expectedErr)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NotNil(t, updatedRoot)
|
||||
table, _, err := updatedRoot.GetTable(ctx, PeopleTableName)
|
||||
assert.NoError(t, err)
|
||||
sch, err := table.GetSchema(ctx)
|
||||
assert.NoError(t, err)
|
||||
equalSchemas(t, tt.expectedSchema, sch)
|
||||
|
||||
updatedTable, ok, err := updatedRoot.GetTable(ctx, "people")
|
||||
assert.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
rowData, err := updatedTable.GetNomsRowData(ctx)
|
||||
assert.NoError(t, err)
|
||||
var foundRows []row.Row
|
||||
err = rowData.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
|
||||
r, err := row.FromNoms(tt.expectedSchema, key.(types.Tuple), value.(types.Tuple))
|
||||
assert.NoError(t, err)
|
||||
foundRows = append(foundRows, r)
|
||||
return false, nil
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedRows, foundRows)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyColumnType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupStmts []string
|
||||
alterStmt string
|
||||
tableName string
|
||||
expectedRows [][]types.Value
|
||||
expectedIdxRows [][]types.Value
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "alter modify column type similar types",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"insert into test values (0, 3), (1, 2)",
|
||||
},
|
||||
alterStmt: "alter table test modify column v1 int",
|
||||
tableName: "test",
|
||||
expectedRows: [][]types.Value{
|
||||
{types.Int(0), types.Int(3)},
|
||||
{types.Int(1), types.Int(2)},
|
||||
},
|
||||
expectedIdxRows: [][]types.Value{
|
||||
{types.Int(2), types.Int(1)},
|
||||
{types.Int(3), types.Int(0)},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "alter modify column type different types",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"insert into test values (0, 3), (1, 2)",
|
||||
},
|
||||
alterStmt: "alter table test modify column v1 varchar(20)",
|
||||
tableName: "test",
|
||||
expectedRows: [][]types.Value{
|
||||
{types.Int(0), types.String("3")},
|
||||
{types.Int(1), types.String("2")},
|
||||
},
|
||||
expectedIdxRows: [][]types.Value{
|
||||
{types.String("2"), types.Int(1)},
|
||||
{types.String("3"), types.Int(0)},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "alter modify column type different types reversed",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 varchar(20), index (v1))",
|
||||
`insert into test values (0, "3"), (1, "2")`,
|
||||
},
|
||||
alterStmt: "alter table test modify column v1 bigint",
|
||||
tableName: "test",
|
||||
expectedRows: [][]types.Value{
|
||||
{types.Int(0), types.Int(3)},
|
||||
{types.Int(1), types.Int(2)},
|
||||
},
|
||||
expectedIdxRows: [][]types.Value{
|
||||
{types.Int(2), types.Int(1)},
|
||||
{types.Int(3), types.Int(0)},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "alter modify column type primary key",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"insert into test values (0, 3), (1, 2)",
|
||||
},
|
||||
alterStmt: "alter table test modify column pk varchar(20)",
|
||||
tableName: "test",
|
||||
expectedRows: [][]types.Value{
|
||||
{types.String("0"), types.Int(3)},
|
||||
{types.String("1"), types.Int(2)},
|
||||
},
|
||||
expectedIdxRows: [][]types.Value{
|
||||
{types.Int(2), types.String("1")},
|
||||
{types.Int(3), types.String("0")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "alter modify column type incompatible types with empty table",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 bit(20), index (v1))",
|
||||
},
|
||||
alterStmt: "alter table test modify column pk datetime",
|
||||
tableName: "test",
|
||||
expectedRows: [][]types.Value(nil),
|
||||
expectedIdxRows: [][]types.Value(nil),
|
||||
},
|
||||
{
|
||||
name: "alter modify column type incompatible types with non-empty table",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 bit(20), index (v1))",
|
||||
"insert into test values (1, 1)",
|
||||
},
|
||||
alterStmt: "alter table test modify column pk datetime",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "alter modify column type different types incompatible values",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 varchar(20), index (v1))",
|
||||
"insert into test values (0, 3), (1, 'a')",
|
||||
},
|
||||
alterStmt: "alter table test modify column v1 bigint",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "alter modify column type foreign key parent",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"create table test2(pk bigint primary key, v1 bigint, index (v1), foreign key (v1) references test(v1))",
|
||||
},
|
||||
alterStmt: "alter table test modify column v1 varchar(20)",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "alter modify column type foreign key child",
|
||||
setupStmts: []string{
|
||||
"create table test(pk bigint primary key, v1 bigint, index (v1))",
|
||||
"create table test2(pk bigint primary key, v1 bigint, index (v1), foreign key (v1) references test(v1))",
|
||||
},
|
||||
alterStmt: "alter table test2 modify column v1 varchar(20)",
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
dEnv := dtestutils.CreateTestEnv()
|
||||
ctx := context.Background()
|
||||
root, _ := dEnv.WorkingRoot(ctx)
|
||||
var err error
|
||||
|
||||
for _, stmt := range test.setupStmts {
|
||||
root, err = ExecuteSql(t, dEnv, root, stmt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
root, err = ExecuteSql(t, dEnv, root, test.alterStmt)
|
||||
if test.expectedErr == false {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
table, _, err := root.GetTable(ctx, test.tableName)
|
||||
require.NoError(t, err)
|
||||
sch, err := table.GetSchema(ctx)
|
||||
require.NoError(t, err)
|
||||
rowData, err := table.GetNomsRowData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
var foundRows [][]types.Value
|
||||
err = rowData.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
|
||||
r, err := row.FromNoms(sch, key.(types.Tuple), value.(types.Tuple))
|
||||
require.NoError(t, err)
|
||||
var vals []types.Value
|
||||
_, _ = r.IterSchema(sch, func(tag uint64, val types.Value) (stop bool, err error) {
|
||||
vals = append(vals, val)
|
||||
return false, nil
|
||||
})
|
||||
foundRows = append(foundRows, vals)
|
||||
return false, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectedRows, foundRows)
|
||||
|
||||
foundRows = nil
|
||||
idx := sch.Indexes().AllIndexes()[0]
|
||||
idxRowData, err := table.GetNomsIndexRowData(ctx, idx.Name())
|
||||
require.NoError(t, err)
|
||||
err = idxRowData.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
|
||||
r, err := row.FromNoms(idx.Schema(), key.(types.Tuple), value.(types.Tuple))
|
||||
require.NoError(t, err)
|
||||
var vals []types.Value
|
||||
_, _ = r.IterSchema(idx.Schema(), func(tag uint64, val types.Value) (stop bool, err error) {
|
||||
vals = append(vals, val)
|
||||
return false, nil
|
||||
})
|
||||
foundRows = append(foundRows, vals)
|
||||
return false, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectedIdxRows, foundRows)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropColumnStatements(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query string
|
||||
expectedSchema schema.Schema
|
||||
expectedRows []row.Row
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "alter drop column",
|
||||
query: "alter table people drop rating",
|
||||
expectedSchema: dtestutils.RemoveColumnFromSchema(PeopleTestSchema, RatingTag),
|
||||
expectedRows: dtestutils.ConvertToSchema(dtestutils.RemoveColumnFromSchema(PeopleTestSchema, RatingTag), AllPeopleRows...),
|
||||
},
|
||||
{
|
||||
name: "alter drop column with optional column keyword",
|
||||
query: "alter table people drop column rating",
|
||||
expectedSchema: dtestutils.RemoveColumnFromSchema(PeopleTestSchema, RatingTag),
|
||||
expectedRows: dtestutils.ConvertToSchema(dtestutils.RemoveColumnFromSchema(PeopleTestSchema, RatingTag), AllPeopleRows...),
|
||||
},
|
||||
{
|
||||
name: "drop primary key",
|
||||
query: "alter table people drop column id",
|
||||
expectedErr: "Cannot drop column in primary key",
|
||||
},
|
||||
{
|
||||
name: "table not found",
|
||||
query: "alter table notFound drop column id",
|
||||
expectedErr: "table not found: notFound",
|
||||
},
|
||||
{
|
||||
name: "column not found",
|
||||
query: "alter table people drop column notFound",
|
||||
expectedErr: `table "people" does not have column "notFound"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dEnv := dtestutils.CreateTestEnv()
|
||||
CreateTestDatabase(dEnv, t)
|
||||
ctx := context.Background()
|
||||
root, _ := dEnv.WorkingRoot(ctx)
|
||||
|
||||
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
|
||||
|
||||
if tt.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.expectedErr)
|
||||
return
|
||||
}
|
||||
|
||||
require.NotNil(t, updatedRoot)
|
||||
table, _, err := updatedRoot.GetTable(ctx, PeopleTableName)
|
||||
assert.NoError(t, err)
|
||||
sch, err := table.GetSchema(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedSchema, sch)
|
||||
|
||||
updatedTable, ok, err := updatedRoot.GetTable(ctx, "people")
|
||||
assert.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
rowData, err := updatedTable.GetNomsRowData(ctx)
|
||||
assert.NoError(t, err)
|
||||
var foundRows []row.Row
|
||||
err = rowData.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
|
||||
updatedSch, err := updatedTable.GetSchema(ctx)
|
||||
assert.NoError(t, err)
|
||||
r, err := row.FromNoms(updatedSch, key.(types.Tuple), value.(types.Tuple))
|
||||
assert.NoError(t, err)
|
||||
foundRows = append(foundRows, r)
|
||||
return false, nil
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedRows, foundRows)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenameColumn(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -268,7 +268,7 @@ func WriteEWKBHeader(v interface{}, buf []byte) {
|
||||
// Write SRID and type
|
||||
binary.LittleEndian.PutUint32(buf[0:4], v.SRID)
|
||||
binary.LittleEndian.PutUint32(buf[5:9], 1)
|
||||
case sql.Linestring:
|
||||
case sql.LineString:
|
||||
binary.LittleEndian.PutUint32(buf[0:4], v.SRID)
|
||||
binary.LittleEndian.PutUint32(buf[5:9], 2)
|
||||
case sql.Polygon:
|
||||
@@ -285,7 +285,7 @@ func WriteEWKBPointData(p sql.Point, buf []byte) {
|
||||
}
|
||||
|
||||
// WriteEWKBLineData converts a Line into a byte array in EWKB format
|
||||
func WriteEWKBLineData(l sql.Linestring, buf []byte) {
|
||||
func WriteEWKBLineData(l sql.LineString, buf []byte) {
|
||||
// Write length of linestring
|
||||
binary.LittleEndian.PutUint32(buf[:4], uint32(len(l.Points)))
|
||||
// Append each point
|
||||
@@ -346,14 +346,12 @@ func SqlColToStr(ctx context.Context, col interface{}) string {
|
||||
}
|
||||
case time.Time:
|
||||
return typedCol.Format("2006-01-02 15:04:05.999999 -0700 MST")
|
||||
case sql.Geometry:
|
||||
return SqlColToStr(ctx, typedCol.Inner)
|
||||
case sql.Point:
|
||||
buf := make([]byte, 25)
|
||||
WriteEWKBHeader(typedCol, buf)
|
||||
WriteEWKBPointData(typedCol, buf[9:])
|
||||
return SqlColToStr(ctx, buf)
|
||||
case sql.Linestring:
|
||||
case sql.LineString:
|
||||
buf := make([]byte, 9+4+16*len(typedCol.Points))
|
||||
WriteEWKBHeader(typedCol, buf)
|
||||
WriteEWKBLineData(typedCol, buf[9:])
|
||||
|
||||
@@ -325,7 +325,7 @@ func (t *DoltTable) PartitionRows(ctx *sql.Context, partition sql.Partition) (sq
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return partitionRows(ctx, table, t.projectedCols, partition)
|
||||
return partitionRows(ctx, table, t.sqlSch.Schema, t.projectedCols, partition)
|
||||
}
|
||||
|
||||
func (t DoltTable) PartitionRows2(ctx *sql.Context, part sql.Partition) (sql.RowIter2, error) {
|
||||
@@ -334,7 +334,7 @@ func (t DoltTable) PartitionRows2(ctx *sql.Context, part sql.Partition) (sql.Row
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iter, err := partitionRows(ctx, table, t.projectedCols, part)
|
||||
iter, err := partitionRows(ctx, table, t.sqlSch.Schema, t.projectedCols, part)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -342,12 +342,12 @@ func (t DoltTable) PartitionRows2(ctx *sql.Context, part sql.Partition) (sql.Row
|
||||
return iter.(sql.RowIter2), err
|
||||
}
|
||||
|
||||
func partitionRows(ctx *sql.Context, t *doltdb.Table, projCols []string, partition sql.Partition) (sql.RowIter, error) {
|
||||
func partitionRows(ctx *sql.Context, t *doltdb.Table, sqlSch sql.Schema, projCols []string, partition sql.Partition) (sql.RowIter, error) {
|
||||
switch typedPartition := partition.(type) {
|
||||
case doltTablePartition:
|
||||
return newRowIterator(ctx, t, projCols, typedPartition)
|
||||
return newRowIterator(ctx, t, sqlSch, projCols, typedPartition)
|
||||
case index.SinglePartition:
|
||||
return newRowIterator(ctx, t, projCols, doltTablePartition{rowData: typedPartition.RowData, end: NoUpperBound})
|
||||
return newRowIterator(ctx, t, sqlSch, projCols, doltTablePartition{rowData: typedPartition.RowData, end: NoUpperBound})
|
||||
}
|
||||
|
||||
return nil, errors.New("unsupported partition type")
|
||||
@@ -958,20 +958,81 @@ func (t *AlterableDoltTable) ShouldRewriteTable(
|
||||
ctx *sql.Context,
|
||||
oldSchema sql.PrimaryKeySchema,
|
||||
newSchema sql.PrimaryKeySchema,
|
||||
modifiedColumn *sql.Column,
|
||||
oldColumn *sql.Column,
|
||||
newColumn *sql.Column,
|
||||
) bool {
|
||||
// TODO: this could be a lot more specific, we don't always need to rewrite on schema changes in the new format
|
||||
return types.IsFormat_DOLT_1(t.nbf) ||
|
||||
len(oldSchema.Schema) < len(newSchema.Schema) ||
|
||||
(len(newSchema.PkOrdinals) != len(oldSchema.PkOrdinals))
|
||||
return t.isIncompatibleTypeChange(oldColumn, newColumn) ||
|
||||
orderChanged(oldSchema, newSchema, oldColumn, newColumn) ||
|
||||
isColumnDrop(oldSchema, newSchema) ||
|
||||
isPrimaryKeyChange(oldSchema, newSchema)
|
||||
}
|
||||
|
||||
func orderChanged(oldSchema, newSchema sql.PrimaryKeySchema, oldColumn, newColumn *sql.Column) bool {
|
||||
if oldColumn == nil || newColumn == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return oldSchema.Schema.IndexOfColName(oldColumn.Name) != newSchema.Schema.IndexOfColName(newColumn.Name)
|
||||
}
|
||||
|
||||
func (t *AlterableDoltTable) isIncompatibleTypeChange(oldColumn *sql.Column, newColumn *sql.Column) bool {
|
||||
if oldColumn == nil || newColumn == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
existingCol, _ := t.sch.GetAllCols().GetByNameCaseInsensitive(oldColumn.Name)
|
||||
newCol, err := sqlutil.ToDoltCol(schema.SystemTableReservedMin, newColumn)
|
||||
if err != nil {
|
||||
panic(err) // should be impossible, we check compatibility before this point
|
||||
}
|
||||
|
||||
if !existingCol.TypeInfo.Equals(newCol.TypeInfo) {
|
||||
if types.IsFormat_DOLT_1(t.Format()) {
|
||||
// This is overly broad, we could narrow this down a bit
|
||||
return true
|
||||
}
|
||||
if existingCol.Kind != newCol.Kind {
|
||||
return true
|
||||
} else if schema.IsColSpatialType(newCol) {
|
||||
// TODO: we need to do this because some spatial type changes require a full table check, but not all.
|
||||
// We could narrow this check down.
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isColumnDrop(oldSchema sql.PrimaryKeySchema, newSchema sql.PrimaryKeySchema) bool {
|
||||
return len(oldSchema.Schema) > len(newSchema.Schema)
|
||||
}
|
||||
|
||||
func getDroppedColumn(oldSchema sql.PrimaryKeySchema, newSchema sql.PrimaryKeySchema) *sql.Column {
|
||||
for _, col := range oldSchema.Schema {
|
||||
if newSchema.IndexOf(col.Name, col.Source) < 0 {
|
||||
return col
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isPrimaryKeyChange(oldSchema sql.PrimaryKeySchema,
|
||||
newSchema sql.PrimaryKeySchema) bool {
|
||||
return len(newSchema.PkOrdinals) != len(oldSchema.PkOrdinals)
|
||||
}
|
||||
|
||||
func (t *AlterableDoltTable) RewriteInserter(
|
||||
ctx *sql.Context,
|
||||
oldSchema sql.PrimaryKeySchema,
|
||||
newSchema sql.PrimaryKeySchema,
|
||||
modifiedColumn *sql.Column,
|
||||
oldColumn *sql.Column,
|
||||
newColumn *sql.Column,
|
||||
) (sql.RowInserter, error) {
|
||||
err := validateSchemaChange(t.Name(), oldSchema, newSchema, oldColumn, newColumn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
|
||||
// Begin by creating a new table with the same name and the new schema, then removing all its existing rows
|
||||
@@ -1011,9 +1072,37 @@ func (t *AlterableDoltTable) RewriteInserter(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newSch, err = schema.Adapt(oldSch, newSch) // improvise, overcome
|
||||
if err != nil {
|
||||
return nil, err
|
||||
newSch = schema.CopyChecks(oldSch, newSch)
|
||||
|
||||
if isColumnDrop(oldSchema, newSchema) {
|
||||
newSch = schema.CopyIndexes(oldSch, newSch)
|
||||
droppedCol := getDroppedColumn(oldSchema, newSchema)
|
||||
for _, index := range newSch.Indexes().IndexesWithColumn(droppedCol.Name) {
|
||||
_, err = newSch.Indexes().RemoveIndex(index.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else if newColumn != nil && oldColumn != nil { // modify column
|
||||
// It may be possible to optimize this and not always rewrite every index, but since we're already truncating the
|
||||
// table to rewrite it we also truncate all the indexes. Much easier to get right.
|
||||
for _, index := range oldSch.Indexes().AllIndexes() {
|
||||
var colNames []string
|
||||
for _, colName := range index.ColumnNames() {
|
||||
if strings.ToLower(oldColumn.Name) == strings.ToLower(colName) {
|
||||
colNames = append(colNames, newColumn.Name)
|
||||
} else {
|
||||
colNames = append(colNames, colName)
|
||||
}
|
||||
}
|
||||
newSch.Indexes().AddIndexByColNames(index.Name(), colNames, schema.IndexProperties{
|
||||
IsUnique: index.IsUnique(),
|
||||
IsUserDefined: index.IsUserDefined(),
|
||||
Comment: index.Comment(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
newSch = schema.CopyIndexes(oldSch, newSch)
|
||||
}
|
||||
|
||||
// If we have an auto increment column, we need to set it here before we begin the rewrite process (it may have changed)
|
||||
@@ -1067,6 +1156,27 @@ func (t *AlterableDoltTable) RewriteInserter(
|
||||
return ed, nil
|
||||
}
|
||||
|
||||
// validateSchemaChange returns an error if the schema change given is not legal
|
||||
func validateSchemaChange(
|
||||
tableName string,
|
||||
oldSchema sql.PrimaryKeySchema,
|
||||
newSchema sql.PrimaryKeySchema,
|
||||
oldColumn *sql.Column,
|
||||
newColumn *sql.Column,
|
||||
) error {
|
||||
if newColumn != nil {
|
||||
newCol, err := sqlutil.ToDoltCol(schema.SystemTableReservedMin, newColumn)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if newCol.IsPartOfPK && schema.IsColSpatialType(newCol) {
|
||||
return schema.ErrUsingSpatialKey.New(tableName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *AlterableDoltTable) adjustForeignKeysForDroppedPk(ctx *sql.Context, root *doltdb.RootValue) (*doltdb.RootValue, error) {
|
||||
if t.autoIncCol.AutoIncrement {
|
||||
return nil, sql.ErrWrongAutoKey.New()
|
||||
@@ -1210,12 +1320,9 @@ func (t *AlterableDoltTable) dropColumnData(ctx *sql.Context, updatedTable *dolt
|
||||
return updatedTable.UpdateNomsRows(ctx, newMapData)
|
||||
}
|
||||
|
||||
// ModifyColumn implements sql.AlterableTable
|
||||
// ModifyColumn implements sql.AlterableTable. ModifyColumn operations are only used for operations that change only
|
||||
// the schema of a table, not the data. For those operations, |RewriteInserter| is used.
|
||||
func (t *AlterableDoltTable) ModifyColumn(ctx *sql.Context, columnName string, column *sql.Column, order *sql.ColumnOrder) error {
|
||||
if types.IsFormat_DOLT_1(t.nbf) {
|
||||
return nil
|
||||
}
|
||||
|
||||
ws, err := t.db.GetWorkingSet(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1242,6 +1349,7 @@ func (t *AlterableDoltTable) ModifyColumn(ctx *sql.Context, columnName string, c
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: move this logic into ShouldRewrite
|
||||
if !existingCol.TypeInfo.Equals(col.TypeInfo) {
|
||||
if existingCol.Kind != col.Kind { // We only change the tag when the underlying Noms kind changes
|
||||
tags, err := root.GenerateTagsForNewColumns(ctx, t.tableName, []string{col.Name}, []types.NomsKind{col.Kind}, nil)
|
||||
@@ -1255,56 +1363,18 @@ func (t *AlterableDoltTable) ModifyColumn(ctx *sql.Context, columnName string, c
|
||||
}
|
||||
}
|
||||
|
||||
updatedTable, err := modifyColumn(ctx, table, existingCol, col, order, t.opts)
|
||||
updatedTable, err := modifyColumn(ctx, table, existingCol, col, order)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For auto columns modified to be auto increment, we have more work to do
|
||||
if !existingCol.AutoIncrement && col.AutoIncrement {
|
||||
updatedSch, err := updatedTable.GetSchema(ctx)
|
||||
seq, err := t.getFirstAutoIncrementValue(ctx, columnName, column.Type, updatedTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rowData, err := updatedTable.GetRowData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note that we aren't calling the public PartitionRows, because it always gets the table data from the session
|
||||
// root, which hasn't been updated yet
|
||||
rowIter, err := partitionRows(ctx, updatedTable, t.projectedCols, index.SinglePartition{RowData: rowData})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
initialValue := column.Type.Zero()
|
||||
colIdx := updatedSch.GetAllCols().IndexOf(columnName)
|
||||
|
||||
for {
|
||||
r, err := rowIter.Next(ctx)
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmp, err := column.Type.Compare(initialValue, r[colIdx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cmp < 0 {
|
||||
initialValue = r[colIdx]
|
||||
}
|
||||
}
|
||||
|
||||
seq, err := globalstate.CoerceAutoIncrementValue(initialValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seq++
|
||||
|
||||
updatedTable, err = updatedTable.SetAutoIncrementValue(ctx, seq)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1314,6 +1384,8 @@ func (t *AlterableDoltTable) ModifyColumn(ctx *sql.Context, columnName string, c
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: this isn't transactional, and it should be
|
||||
ait.AddNewTable(t.tableName)
|
||||
ait.Set(t.tableName, seq)
|
||||
}
|
||||
@@ -1335,6 +1407,61 @@ func (t *AlterableDoltTable) ModifyColumn(ctx *sql.Context, columnName string, c
|
||||
// return t.updateFromRoot(ctx, newRoot)
|
||||
}
|
||||
|
||||
// getFirstAutoIncrementValue returns the next auto increment value for a table that just acquired one through an
|
||||
// ALTER statement.
|
||||
// TODO: this could use an index and avoid a full table scan in many cases
|
||||
func (t *AlterableDoltTable) getFirstAutoIncrementValue(
|
||||
ctx *sql.Context,
|
||||
columnName string,
|
||||
columnType sql.Type,
|
||||
table *doltdb.Table,
|
||||
) (uint64, error) {
|
||||
updatedSch, err := table.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
rowData, err := table.GetRowData(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Note that we aren't calling the public PartitionRows, because it always gets the table data from the session
|
||||
// root, which hasn't been updated yet
|
||||
rowIter, err := partitionRows(ctx, table, t.sqlSch.Schema, t.projectedCols, index.SinglePartition{RowData: rowData})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
initialValue := columnType.Zero()
|
||||
colIdx := updatedSch.GetAllCols().IndexOf(columnName)
|
||||
|
||||
for {
|
||||
r, err := rowIter.Next(ctx)
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
cmp, err := columnType.Compare(initialValue, r[colIdx])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if cmp < 0 {
|
||||
initialValue = r[colIdx]
|
||||
}
|
||||
}
|
||||
|
||||
seq, err := globalstate.CoerceAutoIncrementValue(initialValue)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
seq++
|
||||
|
||||
return seq, nil
|
||||
}
|
||||
|
||||
func increment(val types.Value) types.Value {
|
||||
switch val := val.(type) {
|
||||
case types.Int:
|
||||
|
||||
@@ -152,7 +152,7 @@ func (t *TempTable) PartitionRows(ctx *sql.Context, partition sql.Partition) (sq
|
||||
if t.lookup != nil {
|
||||
return index.RowIterForIndexLookup(ctx, t.table, t.lookup, t.pkSch, nil)
|
||||
} else {
|
||||
return partitionRows(ctx, t.table, nil, partition)
|
||||
return partitionRows(ctx, t.table, t.sqlSchema().Schema, nil, partition)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/row"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped"
|
||||
"github.com/dolthub/dolt/go/store/datas"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
)
|
||||
@@ -77,15 +76,12 @@ const (
|
||||
)
|
||||
|
||||
var PeopleTestSchema = createPeopleTestSchema()
|
||||
var untypedPeopleSch, _ = untyped.UntypeUnkeySchema(PeopleTestSchema)
|
||||
var PeopleTableName = "people"
|
||||
|
||||
var EpisodesTestSchema = createEpisodesTestSchema()
|
||||
var untypedEpisodesSch, _ = untyped.UntypeUnkeySchema(EpisodesTestSchema)
|
||||
var EpisodesTableName = "episodes"
|
||||
|
||||
var AppearancesTestSchema = createAppearancesTestSchema()
|
||||
var untypedAppearacesSch, _ = untyped.UntypeUnkeySchema(AppearancesTestSchema)
|
||||
var AppearancesTableName = "appearances"
|
||||
|
||||
func createPeopleTestSchema() schema.Schema {
|
||||
@@ -261,20 +257,6 @@ func Rs(rows ...row.Row) []row.Row {
|
||||
return rows
|
||||
}
|
||||
|
||||
// Returns the index of the first row in the list that has the same primary key as the one given, or -1 otherwise.
|
||||
func FindRowIndex(find row.Row, rows []row.Row) int {
|
||||
idx := -1
|
||||
for i, updatedRow := range rows {
|
||||
rowId, _ := find.GetColVal(IdTag)
|
||||
updatedId, _ := updatedRow.GetColVal(IdTag)
|
||||
if rowId.Equals(updatedId) {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
return idx
|
||||
}
|
||||
|
||||
// Mutates the row given with pairs of {tag,value} given in the varargs param. Converts built-in types to noms types.
|
||||
func MutateRow(sch schema.Schema, r row.Row, tagsAndVals ...interface{}) row.Row {
|
||||
if len(tagsAndVals)%2 != 0 {
|
||||
|
||||
@@ -77,13 +77,13 @@ func (te *nomsTableWriter) duplicateKeyErrFunc(keyString, indexName string, k, v
|
||||
}
|
||||
|
||||
func (te *nomsTableWriter) Insert(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
if !schema.IsKeyless(te.sch) {
|
||||
k, v, tagToVal, err := sqlutil.DoltKeyValueAndMappingFromSqlRow(ctx, te.vrw, sqlRow, te.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return te.tableEditor.InsertKeyVal(ctx, k, v, tagToVal, te.duplicateKeyErrFunc)
|
||||
if schema.IsKeyless(te.sch) {
|
||||
return te.keylessInsert(ctx, sqlRow)
|
||||
}
|
||||
return te.keyedInsert(ctx, sqlRow)
|
||||
}
|
||||
|
||||
func (te *nomsTableWriter) keylessInsert(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
dRow, err := sqlutil.SqlRowToDoltRow(ctx, te.vrw, sqlRow, te.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -91,6 +91,14 @@ func (te *nomsTableWriter) Insert(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
return te.tableEditor.InsertRow(ctx, dRow, te.duplicateKeyErrFunc)
|
||||
}
|
||||
|
||||
func (te *nomsTableWriter) keyedInsert(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
k, v, tagToVal, err := sqlutil.DoltKeyValueAndMappingFromSqlRow(ctx, te.vrw, sqlRow, te.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return te.tableEditor.InsertKeyVal(ctx, k, v, tagToVal, te.duplicateKeyErrFunc)
|
||||
}
|
||||
|
||||
func (te *nomsTableWriter) Delete(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
if !schema.IsKeyless(te.sch) {
|
||||
k, tagToVal, err := sqlutil.DoltKeyAndMappingFromSqlRow(ctx, te.vrw, sqlRow, te.sch)
|
||||
|
||||
@@ -86,6 +86,7 @@ func (n prollyFkIndexer) PartitionRows(ctx *sql.Context, _ sql.Partition) (sql.R
|
||||
rangeIter: rangeIter,
|
||||
idxToPkMap: idxToPkMap,
|
||||
primary: primary,
|
||||
sqlSch: n.writer.sqlSch,
|
||||
}, nil
|
||||
} else {
|
||||
rangeIter, err := idxWriter.(prollyKeylessSecondaryWriter).mut.IterRange(ctx, n.pRange)
|
||||
@@ -95,6 +96,7 @@ func (n prollyFkIndexer) PartitionRows(ctx *sql.Context, _ sql.Partition) (sql.R
|
||||
return &prollyFkKeylessRowIter{
|
||||
rangeIter: rangeIter,
|
||||
primary: n.writer.primary.(prollyKeylessWriter),
|
||||
sqlSch: n.writer.sqlSch,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
@@ -104,6 +106,7 @@ type prollyFkPkRowIter struct {
|
||||
rangeIter prolly.MapIter
|
||||
idxToPkMap map[int]int
|
||||
primary prollyIndexWriter
|
||||
sqlSch sql.Schema
|
||||
}
|
||||
|
||||
var _ sql.RowIter = prollyFkPkRowIter{}
|
||||
@@ -140,7 +143,10 @@ func (iter prollyFkPkRowIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nextRow, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return index.DenormalizeRow(iter.sqlSch, nextRow)
|
||||
}
|
||||
|
||||
// Close implements the interface sql.RowIter.
|
||||
@@ -152,6 +158,7 @@ func (iter prollyFkPkRowIter) Close(ctx *sql.Context) error {
|
||||
type prollyFkKeylessRowIter struct {
|
||||
rangeIter prolly.MapIter
|
||||
primary prollyKeylessWriter
|
||||
sqlSch sql.Schema
|
||||
}
|
||||
|
||||
var _ sql.RowIter = prollyFkKeylessRowIter{}
|
||||
@@ -179,7 +186,10 @@ func (iter prollyFkKeylessRowIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nextRow, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return index.DenormalizeRow(iter.sqlSch, nextRow)
|
||||
}
|
||||
|
||||
// Close implements the interface sql.RowIter.
|
||||
|
||||
19
go/libraries/doltcore/sqle/writer/prolly_table_writer.go
Normal file → Executable file
19
go/libraries/doltcore/sqle/writer/prolly_table_writer.go
Normal file → Executable file
@@ -122,7 +122,11 @@ func getSecondaryKeylessProllyWriters(ctx context.Context, t *doltdb.Table, sqlS
|
||||
}
|
||||
|
||||
// Insert implements TableWriter.
|
||||
func (w *prollyTableWriter) Insert(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
func (w *prollyTableWriter) Insert(ctx *sql.Context, sqlRow sql.Row) (err error) {
|
||||
if sqlRow, err = index.NormalizeRow(w.sqlSch, sqlRow); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.primary.Insert(ctx, sqlRow); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -138,7 +142,11 @@ func (w *prollyTableWriter) Insert(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
}
|
||||
|
||||
// Delete implements TableWriter.
|
||||
func (w *prollyTableWriter) Delete(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
func (w *prollyTableWriter) Delete(ctx *sql.Context, sqlRow sql.Row) (err error) {
|
||||
if sqlRow, err = index.NormalizeRow(w.sqlSch, sqlRow); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, wr := range w.secondary {
|
||||
if err := wr.Delete(ctx, sqlRow); err != nil {
|
||||
return err
|
||||
@@ -152,6 +160,13 @@ func (w *prollyTableWriter) Delete(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
|
||||
// Update implements TableWriter.
|
||||
func (w *prollyTableWriter) Update(ctx *sql.Context, oldRow sql.Row, newRow sql.Row) (err error) {
|
||||
if oldRow, err = index.NormalizeRow(w.sqlSch, oldRow); err != nil {
|
||||
return err
|
||||
}
|
||||
if newRow, err = index.NormalizeRow(w.sqlSch, newRow); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, wr := range w.secondary {
|
||||
if err := wr.Update(ctx, oldRow, newRow); err != nil {
|
||||
if sql.ErrUniqueKeyViolation.Is(err) {
|
||||
|
||||
@@ -79,14 +79,16 @@ func CreateIndex(
|
||||
}
|
||||
|
||||
// if an index was already created for the column set but was not generated by the user then we replace it
|
||||
replacingIndex := false
|
||||
existingIndex, ok := sch.Indexes().GetIndexByColumnNames(realColNames...)
|
||||
if ok && !existingIndex.IsUserDefined() {
|
||||
replacingIndex = true
|
||||
_, err = sch.Indexes().RemoveIndex(existingIndex.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
table, err = table.DeleteIndexRowData(ctx, existingIndex.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the index metadata, will error if index names are taken or an index with the same columns in the same order exists
|
||||
@@ -109,27 +111,18 @@ func CreateIndex(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if replacingIndex { // verify that the pre-existing index data is valid
|
||||
newTable, err = newTable.RenameIndexRowData(ctx, existingIndex.Name(), index.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO (dhruv) this seems like it would fail?
|
||||
err = newTable.VerifyIndexRowData(ctx, index.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else { // set the index row data and get a new root with the updated table
|
||||
indexRows, err := BuildSecondaryIndex(ctx, newTable, index, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newTable, err = newTable.SetIndexRows(ctx, index.Name(), indexRows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: in the case that we're replacing an implicit index with one the user specified, we could do this more
|
||||
// cheaply in some cases by just renaming it, rather than building it from scratch. But that's harder to get right.
|
||||
indexRows, err := BuildSecondaryIndex(ctx, newTable, index, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newTable, err = newTable.SetIndexRows(ctx, index.Name(), indexRows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CreateIndexReturn{
|
||||
NewTable: newTable,
|
||||
Sch: sch,
|
||||
|
||||
58
go/performance/serverbench/diff_bench_test.go
Normal file
58
go/performance/serverbench/diff_bench_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package serverbench
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/gocraft/dbr/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
connStr := fmt.Sprintf("%v:%v@tcp(%v:%v)/%s",
|
||||
"root", "", "127.0.0.1", 3306, "diffbench")
|
||||
|
||||
conn, err := dbr.Open("mysql", connStr, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sess = conn.NewSession(&dbr.NullEventReceiver{})
|
||||
}
|
||||
|
||||
var sess *dbr.Session
|
||||
|
||||
func BenchmarkServerDiff(b *testing.B) {
|
||||
b.Run("point diff", func(b *testing.B) {
|
||||
benchmarkQuery(b, "SELECT count(*) "+
|
||||
"FROM dolt_commit_diff_difftbl "+
|
||||
"WHERE to_commit=HASHOF('HEAD') "+
|
||||
"AND from_commit=HASHOF('HEAD^')")
|
||||
})
|
||||
b.Run("point lookup", func(b *testing.B) {
|
||||
benchmarkQuery(b, "SELECT * FROM difftbl WHERE pk = 12345")
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkQuery(b *testing.B, query string) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
r, err := sess.Query(query)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, r.Close())
|
||||
}
|
||||
}
|
||||
45
go/serial/encoding.fbs
Normal file
45
go/serial/encoding.fbs
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2021 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
namespace serial;
|
||||
|
||||
enum Encoding : uint8 {
|
||||
// fixed width
|
||||
Null = 0,
|
||||
Int8 = 1,
|
||||
Uint8 = 2,
|
||||
Int16 = 3,
|
||||
Uint16 = 4,
|
||||
Int32 = 7,
|
||||
Uint32 = 8,
|
||||
Int64 = 9,
|
||||
Uint64 = 10,
|
||||
Float32 = 11,
|
||||
Float64 = 12,
|
||||
Bit64 = 13,
|
||||
Hash128 = 14,
|
||||
Year = 15,
|
||||
Date = 16,
|
||||
Time = 17,
|
||||
Datetime = 18,
|
||||
Enum = 19,
|
||||
Set = 20,
|
||||
|
||||
// variable width
|
||||
String = 128,
|
||||
Bytes = 129,
|
||||
Decimal = 130,
|
||||
JSON = 131,
|
||||
Geometry = 133,
|
||||
}
|
||||
@@ -20,7 +20,8 @@ flatc -o $GEN_DIR --gen-onefile --filename-suffix "" --gen-mutable --go-namespac
|
||||
storeroot.fbs \
|
||||
table.fbs \
|
||||
tag.fbs \
|
||||
workingset.fbs
|
||||
workingset.fbs \
|
||||
encoding.fbs
|
||||
|
||||
# prefix files with copyright header
|
||||
for FILE in $GEN_DIR/*.go;
|
||||
|
||||
@@ -12,73 +12,56 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
include "encoding.fbs";
|
||||
|
||||
namespace serial;
|
||||
|
||||
table Column {
|
||||
name:string (required);
|
||||
storage_order:uint16;
|
||||
schema_order:uint16;
|
||||
|
||||
type:Type (required);
|
||||
nullable:bool;
|
||||
primary_key:bool;
|
||||
auto_increment:bool;
|
||||
default:ColumnDefault;
|
||||
constraints:[ColumnConstraint] (required);
|
||||
comment:string (required);
|
||||
}
|
||||
|
||||
// based on schema_marshalling.go:encodeTypeInfo()
|
||||
table Type {
|
||||
type:string (required);
|
||||
param_keys:[string] (required);
|
||||
param_values:[string] (required);
|
||||
}
|
||||
|
||||
table ColumnDefault {
|
||||
expression:string (required);
|
||||
}
|
||||
|
||||
table ColumnConstraint {
|
||||
name:string (required);
|
||||
expression:string (required);
|
||||
enforced:bool;
|
||||
}
|
||||
|
||||
table TableSchema {
|
||||
columns:[Column] (required);
|
||||
indexes:[IndexSchema] (required);
|
||||
columns:[Column];
|
||||
indexes:[Index];
|
||||
checks:[string];
|
||||
|
||||
}
|
||||
|
||||
table IndexSchema {
|
||||
table Column {
|
||||
// column name
|
||||
name:string (required);
|
||||
columns:[string] (required);
|
||||
|
||||
// full sql column definition
|
||||
definition:string (required);
|
||||
|
||||
// sql display order
|
||||
display_order:int16;
|
||||
|
||||
// storage encoding
|
||||
encoding:Encoding;
|
||||
|
||||
// column meta
|
||||
primary_key:bool;
|
||||
nullable:bool;
|
||||
auto_increment:bool;
|
||||
hidden:bool;
|
||||
generated:bool;
|
||||
virtual:bool;
|
||||
}
|
||||
|
||||
table Index {
|
||||
// index name
|
||||
name:string (required);
|
||||
|
||||
// full sql index definition
|
||||
definition:string (required);
|
||||
|
||||
// key columns of the index stored as
|
||||
// indicies into the schema columns array
|
||||
key_columns:[uint16] (required);
|
||||
|
||||
// values columns of the index stored as
|
||||
// indicies into the schema columns array
|
||||
value_columns:[uint16];
|
||||
|
||||
// index meta
|
||||
unique:bool;
|
||||
system_defined:bool;
|
||||
comment:string (required);
|
||||
}
|
||||
|
||||
enum ForeignKeyReferenceOption : uint8 {
|
||||
DefaultAction,
|
||||
Cascade,
|
||||
NoAction,
|
||||
Restrict,
|
||||
SetNull,
|
||||
}
|
||||
|
||||
table ForeignKey {
|
||||
name:string (required);
|
||||
|
||||
child_table:string (required);
|
||||
child_columns:[string] (required);
|
||||
child_index:string (required);
|
||||
|
||||
parent_table:string (required);
|
||||
parent_columns:[string] (required);
|
||||
parent_index:string (required);
|
||||
|
||||
on_update:ForeignKeyReferenceOption;
|
||||
on_delete:ForeignKeyReferenceOption;
|
||||
|
||||
// todo(andy): "resolved details" (consult with Daylon)
|
||||
}
|
||||
|
||||
@@ -51,3 +51,5 @@ const FormatDolt1String = "__DOLT_1__"
|
||||
const FormatDoltDevString = "__DOLT_DEV__"
|
||||
|
||||
var FormatDefaultString = FormatLD1String
|
||||
|
||||
// var FormatDefaultString = FormatDolt1String
|
||||
|
||||
@@ -41,7 +41,7 @@ func DeserializePoint(buf []byte, srid uint32) (p sql.Point) {
|
||||
return
|
||||
}
|
||||
|
||||
func DeserializeLinestring(buf []byte, srid uint32) (l sql.Linestring) {
|
||||
func DeserializeLineString(buf []byte, srid uint32) (l sql.LineString) {
|
||||
l.SRID = srid
|
||||
l.Points = readPointSlice(buf, srid)
|
||||
return
|
||||
@@ -68,8 +68,8 @@ func readPointSlice(buf []byte, srid uint32) (points []sql.Point) {
|
||||
return
|
||||
}
|
||||
|
||||
func readLineSlice(buf []byte, srid uint32) (lines []sql.Linestring) {
|
||||
lines = make([]sql.Linestring, readCount(buf))
|
||||
func readLineSlice(buf []byte, srid uint32) (lines []sql.LineString) {
|
||||
lines = make([]sql.LineString, readCount(buf))
|
||||
buf = buf[CountSize:]
|
||||
for i := range lines {
|
||||
lines[i].SRID = srid
|
||||
|
||||
@@ -33,7 +33,7 @@ const (
|
||||
|
||||
const (
|
||||
PointType = 1
|
||||
LinestringType = 2
|
||||
LineStringType = 2
|
||||
PolygonType = 3
|
||||
)
|
||||
|
||||
@@ -59,9 +59,9 @@ func SerializePoint(p sql.Point) (buf []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
func SerializeLinestring(l sql.Linestring) (buf []byte) {
|
||||
func SerializeLineString(l sql.LineString) (buf []byte) {
|
||||
buf = allocateBuffer(len(l.Points), 1)
|
||||
WriteEWKBHeader(buf[:EWKBHeaderSize], l.SRID, LinestringType)
|
||||
WriteEWKBHeader(buf[:EWKBHeaderSize], l.SRID, LineStringType)
|
||||
writePointSlice(buf[EWKBHeaderSize:], l.Points)
|
||||
return
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func writePointSlice(buf []byte, points []sql.Point) {
|
||||
}
|
||||
}
|
||||
|
||||
func writeLineSlice(buf []byte, lines []sql.Linestring) {
|
||||
func writeLineSlice(buf []byte, lines []sql.LineString) {
|
||||
writeCount(buf, uint32(len(lines)))
|
||||
buf = buf[CountSize:]
|
||||
for _, l := range lines {
|
||||
|
||||
@@ -244,7 +244,7 @@ func (it *orderedTreeIter[K, V]) Next(ctx context.Context) (key K, value V, err
|
||||
k, v := tree.CurrentCursorItems(it.curr)
|
||||
key, value = K(k), V(v)
|
||||
|
||||
_, err = it.curr.Advance(ctx)
|
||||
err = it.curr.Advance(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -266,7 +266,7 @@ func (it *orderedTreeIter[K, V]) current() (key K, value V) {
|
||||
}
|
||||
|
||||
func (it *orderedTreeIter[K, V]) iterate(ctx context.Context) (err error) {
|
||||
_, err = it.curr.Advance(ctx)
|
||||
err = it.curr.Advance(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -132,20 +132,11 @@ func encodingFromSqlType(typ query.Type) val.Encoding {
|
||||
|
||||
// todo(andy): replace temp encodings
|
||||
switch typ {
|
||||
case query.Type_DECIMAL:
|
||||
return val.DecimalEnc
|
||||
case query.Type_GEOMETRY:
|
||||
return val.GeometryEnc
|
||||
case query.Type_BIT:
|
||||
return val.Uint64Enc
|
||||
case query.Type_BLOB:
|
||||
return val.ByteStringEnc
|
||||
// todo: temporary hack for enginetests
|
||||
return val.StringEnc
|
||||
case query.Type_TEXT:
|
||||
return val.StringEnc
|
||||
case query.Type_ENUM:
|
||||
return val.StringEnc
|
||||
case query.Type_SET:
|
||||
return val.StringEnc
|
||||
case query.Type_JSON:
|
||||
return val.JSONEnc
|
||||
}
|
||||
@@ -175,6 +166,10 @@ func encodingFromSqlType(typ query.Type) val.Encoding {
|
||||
return val.Float32Enc
|
||||
case query.Type_FLOAT64:
|
||||
return val.Float64Enc
|
||||
case query.Type_BIT:
|
||||
return val.Uint64Enc
|
||||
case query.Type_DECIMAL:
|
||||
return val.DecimalEnc
|
||||
case query.Type_YEAR:
|
||||
return val.YearEnc
|
||||
case query.Type_DATE:
|
||||
@@ -185,6 +180,10 @@ func encodingFromSqlType(typ query.Type) val.Encoding {
|
||||
return val.DatetimeEnc
|
||||
case query.Type_DATETIME:
|
||||
return val.DatetimeEnc
|
||||
case query.Type_ENUM:
|
||||
return val.EnumEnc
|
||||
case query.Type_SET:
|
||||
return val.SetEnc
|
||||
case query.Type_BINARY:
|
||||
return val.ByteStringEnc
|
||||
case query.Type_VARBINARY:
|
||||
@@ -193,6 +192,8 @@ func encodingFromSqlType(typ query.Type) val.Encoding {
|
||||
return val.StringEnc
|
||||
case query.Type_VARCHAR:
|
||||
return val.StringEnc
|
||||
case query.Type_GEOMETRY:
|
||||
return val.GeometryEnc
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown encoding %v", typ))
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func newChunker[S message.Serializer](ctx context.Context, cur *Cursor, level in
|
||||
}
|
||||
|
||||
if cur != nil {
|
||||
if err := sc.resume(ctx); err != nil {
|
||||
if err := sc.processPrefix(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func newChunker[S message.Serializer](ctx context.Context, cur *Cursor, level in
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func (tc *chunker[S]) resume(ctx context.Context) (err error) {
|
||||
func (tc *chunker[S]) processPrefix(ctx context.Context) (err error) {
|
||||
if tc.cur.parent != nil && tc.parent == nil {
|
||||
if err := tc.createParentChunker(ctx); err != nil {
|
||||
return err
|
||||
@@ -108,7 +108,7 @@ func (tc *chunker[S]) resume(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tc.cur.Advance(ctx)
|
||||
err = tc.cur.Advance(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -137,116 +137,118 @@ func (tc *chunker[S]) DeletePair(ctx context.Context, _, _ Item) error {
|
||||
return tc.skip(ctx)
|
||||
}
|
||||
|
||||
// AdvanceTo advances the chunker to |next|, the nextMutation mutation point.
|
||||
// AdvanceTo progresses the chunker until its tracking cursor catches up with
|
||||
// |next|, a cursor indicating next key where an edit will be applied.
|
||||
//
|
||||
// The method proceeds from the deepest chunker recursively into its
|
||||
// linked list parents:
|
||||
//
|
||||
// (1) If the current cursor and all of its parents are aligned with |next|,
|
||||
// we are done.
|
||||
//
|
||||
// (2) In lockstep, a) append to the chunker and b) increment the cursor until
|
||||
// we either meet condition (1) and return, or we synchronize and progress to
|
||||
// (3) or (4). Synchronizing means that the current tree being built has
|
||||
// reached a chunk boundary that aligns with a chunk boundary in the old tree
|
||||
// being mutated. Synchronization means chunks between this boundary and
|
||||
// |next| at the current cursor level will be unchanged and can be skipped.
|
||||
//
|
||||
// (3) All parent cursors are (1) current or (2) synchronized, or there are no
|
||||
// parents, and we are done.
|
||||
//
|
||||
// (4) The parent cursors are not aligned. Recurse into the parent. After
|
||||
// parents are aligned, we need to reprocess the prefix of the current node in
|
||||
// anticipation of impending edits that may edit the current chunk. Note that
|
||||
// processPrefix is only necessary for the "fast forward" case where we
|
||||
// synchronized the tree level before reaching |next|.
|
||||
func (tc *chunker[S]) AdvanceTo(ctx context.Context, next *Cursor) error {
|
||||
// There a four cases to handle when advancing the tree chunker
|
||||
// (1) |tc.cur| and |next| are aligned, we're done
|
||||
//
|
||||
// (2) |tc.cur| is "ahead" of |next|. This can be caused by advances
|
||||
// at a lower Level of the tree. In this case, Advance |next|
|
||||
// until it is even with |tc.cur|.
|
||||
//
|
||||
// (3) |tc.cur| is behind |next|, we must consume elements between the
|
||||
// two cursors until |tc.cur| catches up with |next|.
|
||||
//
|
||||
// (4) This is a special case of (3) where we can "Fast-Forward" |tc.cur|
|
||||
// towards |next|. As we consume elements between the two cursors, if
|
||||
// we re-synchronize with the previous tree, we can skip over the
|
||||
// chunks between the re-synchronization boundary and |next|.
|
||||
|
||||
cmp := tc.cur.Compare(next)
|
||||
|
||||
if cmp == 0 { // Case (1)
|
||||
if cmp == 0 { // step (1)
|
||||
return nil
|
||||
}
|
||||
|
||||
if cmp > 0 { // Case (2)
|
||||
} else if cmp > 0 {
|
||||
//todo(max): this appears to be a result of a seek() bug, where
|
||||
// we navigate to the end of the previous chunk rather than the
|
||||
// beginning of the next chunk. I think this is basically a one-off
|
||||
// error.
|
||||
for tc.cur.Compare(next) > 0 {
|
||||
if _, err := next.Advance(ctx); err != nil {
|
||||
if err := next.Advance(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fastForward := false
|
||||
split, err := tc.append(ctx, tc.cur.CurrentKey(), tc.cur.CurrentValue(), tc.cur.currentSubtreeSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for tc.cur.Compare(next) < 0 { // Case (3) or (4)
|
||||
|
||||
// append items until we catchup with |next|, or until
|
||||
// we resynchronize with the previous tree.
|
||||
ok, err := tc.append(ctx,
|
||||
tc.cur.CurrentKey(),
|
||||
tc.cur.CurrentValue(),
|
||||
tc.cur.currentSubtreeSize())
|
||||
for !(split && tc.cur.atNodeEnd()) { // step (2)
|
||||
err = tc.cur.Advance(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: if |ok| is true, but |tc.cur.atNodeEnd()| is false,
|
||||
// then we've de-synchronized with the previous tree.
|
||||
|
||||
if ok && tc.cur.atNodeEnd() { // re-synchronized at |tc.Level|
|
||||
|
||||
if tc.cur.parent != nil {
|
||||
if tc.cur.parent.Compare(next.parent) < 0 { // Case (4)
|
||||
// |tc| re-synchronized at |tc.Level|, but we're still behind |next|.
|
||||
// We can Advance |tc| at Level+1 to get to |next| faster.
|
||||
fastForward = true
|
||||
}
|
||||
|
||||
// Here we need to Advance the chunker's cursor, but calling
|
||||
// tc.cur.Advance() would needlessly fetch another chunk at the
|
||||
// current Level. Instead, we only Advance the parent.
|
||||
_, err := tc.cur.parent.advanceInBounds(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// |tc.cur| is now inconsistent with its parent, Invalidate it.
|
||||
tc.cur.Invalidate()
|
||||
}
|
||||
|
||||
break
|
||||
if cmp = tc.cur.Compare(next); cmp >= 0 {
|
||||
// we caught up before synchronizing
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := tc.cur.Advance(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if tc.parent != nil && next.parent != nil {
|
||||
// At this point we've either caught up to |next|, or we've
|
||||
// re-synchronized at |tc.Level| and we're fast-forwarding
|
||||
err := tc.parent.AdvanceTo(ctx, next.parent)
|
||||
split, err = tc.append(ctx, tc.cur.CurrentKey(), tc.cur.CurrentValue(), tc.cur.currentSubtreeSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We may have invalidated cursors as we re-synchronized,
|
||||
// so copy |next| here.
|
||||
if tc.cur.parent == nil || next.parent == nil { // step (3)
|
||||
// end of tree
|
||||
tc.cur.copy(next)
|
||||
return nil
|
||||
}
|
||||
|
||||
if tc.cur.parent.Compare(next.parent) == 0 { // step (3)
|
||||
// (rare) new tree synchronized with old tree at the
|
||||
// same time as the cursor caught up to the next mutation point
|
||||
tc.cur.copy(next)
|
||||
return nil
|
||||
}
|
||||
|
||||
// step(4)
|
||||
|
||||
// This optimization is logically equivalent to advancing
|
||||
// current cursor. Because we just wrote a chunk, we are
|
||||
// at a boundary and can simply increment the parent.
|
||||
err = tc.cur.parent.Advance(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tc.cur.invalidate()
|
||||
|
||||
// no more pending chunks at this level, recurse
|
||||
// into parent
|
||||
err = tc.parent.AdvanceTo(ctx, next.parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// fast forward to the edit index at this level
|
||||
tc.cur.copy(next)
|
||||
|
||||
if fastForward { // Case (4)
|
||||
// we fast-forwarded to the current chunk, so we
|
||||
// need to process its prefix
|
||||
if err := tc.resume(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
// incoming edit can affect the entire chunk, process the prefix
|
||||
err = tc.processPrefix(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *chunker[S]) skip(ctx context.Context) error {
|
||||
_, err := tc.cur.Advance(ctx)
|
||||
err := tc.cur.Advance(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// Append adds a new key-value pair to the chunker, validating the new pair to ensure
|
||||
// that chunks are well-formed. Key-value pairs are appended atomically a chunk boundary
|
||||
// may be made before or after the pair, but not between them.
|
||||
// may be made before or after the pair, but not between them. Returns true if chunk boundary
|
||||
// was split.
|
||||
func (tc *chunker[S]) append(ctx context.Context, key, value Item, subtree uint64) (bool, error) {
|
||||
// When adding new key-value pairs to an in-progress chunk, we must enforce 3 invariants
|
||||
// (1) Key-value pairs are stored in the same Node.
|
||||
@@ -374,12 +376,12 @@ func (tc *chunker[S]) Done(ctx context.Context) (Node, error) {
|
||||
|
||||
// At this point, we know |tc.keys| contains every item at this Level of the tree.
|
||||
// To see this, consider that there are two ways items can enter |tc.keys|.
|
||||
// (1) as the result of resume() with the cursor on anything other than the first item in the Node
|
||||
// (1) as the result of processPrefix() with the cursor on anything other than the first item in the Node
|
||||
// (2) as a result of a child chunker hitting an explicit chunk boundary during either Append() or finalize().
|
||||
//
|
||||
// The only way there can be no items in some parent chunker's |tc.keys| is if this chunker began with
|
||||
// a cursor within its first existing chunk (and thus all parents resume()'d with a cursor on their first item) and
|
||||
// continued through all sebsequent items without creating any explicit chunk boundaries (and thus never sent any
|
||||
// a cursor within its first existing chunk (and thus all parents processPrefix()'d with a cursor on their first item) and
|
||||
// continued through all subsequent items without creating any explicit chunk boundaries (and thus never sent any
|
||||
// items up to a parent as a result of chunking). Therefore, this chunker's |tc.keys| must contain all items
|
||||
// within the current Node.
|
||||
|
||||
@@ -414,14 +416,14 @@ func (tc *chunker[S]) finalizeCursor(ctx context.Context) (err error) {
|
||||
break // boundary occurred at same place in old & new Node
|
||||
}
|
||||
|
||||
_, err = tc.cur.Advance(ctx)
|
||||
err = tc.cur.Advance(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if tc.cur.parent != nil {
|
||||
_, err := tc.cur.parent.Advance(ctx)
|
||||
err := tc.cur.parent.Advance(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -89,8 +89,7 @@ func iterTree(ctx context.Context, ns NodeStore, nd Node, cb func(item Item) err
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
for ok {
|
||||
for !cur.outOfBounds() {
|
||||
err = cb(cur.CurrentKey())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -101,7 +100,7 @@ func iterTree(ctx context.Context, ns NodeStore, nd Node, cb func(item Item) err
|
||||
return err
|
||||
}
|
||||
|
||||
ok, err = cur.Advance(ctx)
|
||||
err = cur.Advance(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func sendRemoved(ctx context.Context, from *Cursor) (diff Diff, err error) {
|
||||
From: from.CurrentValue(),
|
||||
}
|
||||
|
||||
if _, err = from.Advance(ctx); err != nil {
|
||||
if err = from.Advance(ctx); err != nil {
|
||||
return Diff{}, err
|
||||
}
|
||||
return
|
||||
@@ -109,7 +109,7 @@ func sendAdded(ctx context.Context, to *Cursor) (diff Diff, err error) {
|
||||
To: to.CurrentValue(),
|
||||
}
|
||||
|
||||
if _, err = to.Advance(ctx); err != nil {
|
||||
if err = to.Advance(ctx); err != nil {
|
||||
return Diff{}, err
|
||||
}
|
||||
return
|
||||
@@ -123,10 +123,10 @@ func sendModified(ctx context.Context, from, to *Cursor) (diff Diff, err error)
|
||||
To: to.CurrentValue(),
|
||||
}
|
||||
|
||||
if _, err = from.Advance(ctx); err != nil {
|
||||
if err = from.Advance(ctx); err != nil {
|
||||
return Diff{}, err
|
||||
}
|
||||
if _, err = to.Advance(ctx); err != nil {
|
||||
if err = to.Advance(ctx); err != nil {
|
||||
return Diff{}, err
|
||||
}
|
||||
return
|
||||
@@ -160,10 +160,10 @@ func skipCommon(ctx context.Context, from, to *Cursor) (err error) {
|
||||
// case we need to Compare parents again.
|
||||
parentsAreNew = from.atNodeEnd() || to.atNodeEnd()
|
||||
|
||||
if _, err = from.Advance(ctx); err != nil {
|
||||
if err = from.Advance(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = to.Advance(ctx); err != nil {
|
||||
if err = to.Advance(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -183,7 +183,7 @@ func skipCommonParents(ctx context.Context, from, to *Cursor) (err error) {
|
||||
}
|
||||
from.skipToNodeStart()
|
||||
} else {
|
||||
from.Invalidate()
|
||||
from.invalidate()
|
||||
}
|
||||
|
||||
if to.parent.Valid() {
|
||||
@@ -192,7 +192,7 @@ func skipCommonParents(ctx context.Context, from, to *Cursor) (err error) {
|
||||
}
|
||||
to.skipToNodeStart()
|
||||
} else {
|
||||
to.Invalidate()
|
||||
to.invalidate()
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
@@ -26,6 +26,32 @@ type MutationIter interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// ApplyMutations applies a sorted series of edits to a NodeStore,
|
||||
// returning the new root Node.
|
||||
//
|
||||
// The algorithm is structured as follows:
|
||||
//
|
||||
// - Create a new chunker, the main interface for building a new
|
||||
// tree.
|
||||
// - Create two cursors into the previous tree. Both cursors
|
||||
// track key indexes in the old keyspace. The first tracks where
|
||||
// a new edit will be applied relative to the old keyspace.
|
||||
// The second indicates the most recent edit in the new tree
|
||||
// relative to the old keyspace. The second cursor is embedded in
|
||||
// the chunker, maintained by the chunker, and necessary precedes
|
||||
// the first.
|
||||
//
|
||||
// - For every edit, first identify the key index in the old keyspace
|
||||
// where the edit will be applied, and move the tracking cursor to
|
||||
// that index.
|
||||
// - Advance the chunker and the second cursor to the new edit point.
|
||||
// Refer to the chunker.AdvanceTo docstring for details.
|
||||
// - Add the edit to the chunker. This applies the edit to the in-progress
|
||||
// NodeStore. The new NodeStore may expand or shrink relative to the
|
||||
// old tree, but these details are internal to the chunker.
|
||||
// - Repeat for every edit.
|
||||
//
|
||||
// - Finalize the chunker and resolve the tree's new root Node.
|
||||
func ApplyMutations[S message.Serializer](
|
||||
ctx context.Context,
|
||||
ns NodeStore,
|
||||
@@ -67,11 +93,7 @@ func ApplyMutations[S message.Serializer](
|
||||
}
|
||||
|
||||
// check for no-op mutations
|
||||
if oldValue == nil && newValue == nil {
|
||||
newKey, newValue = edits.NextMutation(ctx)
|
||||
continue // already non-present
|
||||
}
|
||||
if oldValue != nil && equalValues(newValue, oldValue) {
|
||||
if equalValues(newValue, oldValue) {
|
||||
newKey, newValue = edits.NextMutation(ctx)
|
||||
continue // same newValue
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ package tree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
@@ -91,11 +90,11 @@ func NewCursorPastEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor,
|
||||
}
|
||||
|
||||
// Advance |cur| past the end
|
||||
ok, err := cur.Advance(ctx)
|
||||
err = cur.Advance(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
if cur.idx != int(cur.nd.count) {
|
||||
panic("expected |ok| to be false")
|
||||
}
|
||||
|
||||
@@ -215,10 +214,6 @@ func (cur *Cursor) Valid() bool {
|
||||
cur.idx < int(cur.nd.count)
|
||||
}
|
||||
|
||||
func (cur *Cursor) Invalidate() {
|
||||
cur.idx = math.MinInt32
|
||||
}
|
||||
|
||||
func (cur *Cursor) CurrentKey() Item {
|
||||
return cur.nd.GetKey(cur.idx)
|
||||
}
|
||||
@@ -273,6 +268,8 @@ func (cur *Cursor) atNodeStart() bool {
|
||||
return cur.idx == 0
|
||||
}
|
||||
|
||||
// atNodeEnd returns true if the cursor's current |idx|
|
||||
// points to the last node item
|
||||
func (cur *Cursor) atNodeEnd() bool {
|
||||
lastKeyIdx := int(cur.nd.count - 1)
|
||||
return cur.idx == lastKeyIdx
|
||||
@@ -287,16 +284,20 @@ func (cur *Cursor) level() uint64 {
|
||||
return uint64(cur.nd.Level())
|
||||
}
|
||||
|
||||
func (cur *Cursor) seek(ctx context.Context, item Item, cb CompareFn) (err error) {
|
||||
// seek updates the cursor's node to one whose range spans the key's value, or the last
|
||||
// node if the key is greater than all existing keys.
|
||||
// If a node does not contain the key, we recurse upwards to the parent cursor. If the
|
||||
// node contains a key, we recurse downwards into child nodes.
|
||||
func (cur *Cursor) seek(ctx context.Context, key Item, cb CompareFn) (err error) {
|
||||
inBounds := true
|
||||
if cur.parent != nil {
|
||||
inBounds = inBounds && cb(item, cur.firstKey()) >= 0
|
||||
inBounds = inBounds && cb(item, cur.lastKey()) <= 0
|
||||
inBounds = inBounds && cb(key, cur.firstKey()) >= 0
|
||||
inBounds = inBounds && cb(key, cur.lastKey()) <= 0
|
||||
}
|
||||
|
||||
if !inBounds {
|
||||
// |item| is outside the bounds of |cur.nd|, search up the tree
|
||||
err = cur.parent.seek(ctx, item, cb)
|
||||
err = cur.parent.seek(ctx, key, cb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -309,7 +310,7 @@ func (cur *Cursor) seek(ctx context.Context, item Item, cb CompareFn) (err error
|
||||
}
|
||||
}
|
||||
|
||||
cur.idx = cur.search(item, cb)
|
||||
cur.idx = cur.search(key, cb)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -324,112 +325,119 @@ func (cur *Cursor) search(item Item, cb CompareFn) (idx int) {
|
||||
return idx
|
||||
}
|
||||
|
||||
// todo(andy): improve the combined interface of Advance() and advanceInBounds().
|
||||
// currently the returned boolean indicates if the cursor was able to Advance,
|
||||
// which isn't usually useful information
|
||||
// invalidate sets the cursor's index to the node count.
|
||||
func (cur *Cursor) invalidate() {
|
||||
cur.idx = int(cur.nd.count)
|
||||
}
|
||||
|
||||
func (cur *Cursor) Advance(ctx context.Context) (bool, error) {
|
||||
ok, err := cur.advanceInBounds(ctx)
|
||||
// hasNext returns true if we do not need to recursively
|
||||
// check the parent to know that the current cursor
|
||||
// has more keys. hasNext can be false even if parent
|
||||
// cursors are not exhausted.
|
||||
func (cur *Cursor) hasNext() bool {
|
||||
return cur.idx < int(cur.nd.count)-1
|
||||
}
|
||||
|
||||
// hasPrev returns true if the current node has preceding
|
||||
// keys. hasPrev can be false even in a parent node has
|
||||
// preceding keys.
|
||||
func (cur *Cursor) hasPrev() bool {
|
||||
return cur.idx > 0
|
||||
}
|
||||
|
||||
// outOfBounds returns true if the current cursor and
|
||||
// all parents are exhausted.
|
||||
func (cur *Cursor) outOfBounds() bool {
|
||||
return cur.idx < 0 || cur.idx >= int(cur.nd.count)
|
||||
}
|
||||
|
||||
// Advance either increments the current key index by one,
|
||||
// or has reached the end of the current node and skips to the next
|
||||
// child of the parent cursor, recursively if necessary, returning
|
||||
// either an error or nil.
|
||||
//
|
||||
// More specifically, one of three things happens:
|
||||
//
|
||||
// 1) The current chunk still has keys, iterate to
|
||||
// the next |idx|;
|
||||
//
|
||||
// 2) We've exhausted the current cursor, but there is at least
|
||||
// one |parent| cursor with more keys. We find that |parent| recursively,
|
||||
// perform step (1), and then have every child initialize itself
|
||||
// using the new |parent|.
|
||||
//
|
||||
// 3) We've exhausted the current cursor and every |parent|. Jump
|
||||
// to an end state (idx = node.count).
|
||||
func (cur *Cursor) Advance(ctx context.Context) error {
|
||||
if cur.hasNext() {
|
||||
cur.idx++
|
||||
return nil
|
||||
}
|
||||
|
||||
if cur.parent == nil {
|
||||
cur.invalidate()
|
||||
return nil
|
||||
}
|
||||
|
||||
// recursively increment the parent
|
||||
err := cur.parent.Advance(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
cur.idx = int(cur.nd.count)
|
||||
return err
|
||||
}
|
||||
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (cur *Cursor) advanceInBounds(ctx context.Context) (bool, error) {
|
||||
lastKeyIdx := int(cur.nd.count - 1)
|
||||
if cur.idx < lastKeyIdx {
|
||||
cur.idx += 1
|
||||
return true, nil
|
||||
if cur.parent.outOfBounds() {
|
||||
// exhausted every parent cursor
|
||||
cur.invalidate()
|
||||
return nil
|
||||
}
|
||||
|
||||
if cur.idx == int(cur.nd.count) {
|
||||
// |cur| is already out of bounds
|
||||
return false, nil
|
||||
}
|
||||
|
||||
assertTrue(cur.atNodeEnd())
|
||||
|
||||
if cur.parent != nil {
|
||||
ok, err := cur.parent.advanceInBounds(ctx)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if ok {
|
||||
// at end of currentPair chunk and there are more
|
||||
err := cur.fetchNode(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cur.skipToNodeStart()
|
||||
cur.subtrees = nil // lazy load
|
||||
|
||||
return true, nil
|
||||
}
|
||||
// if not |ok|, then every parent, grandparent, etc.,
|
||||
// failed to advanceInBounds(): we're past the end
|
||||
// of the prolly tree.
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (cur *Cursor) Retreat(ctx context.Context) (bool, error) {
|
||||
ok, err := cur.retreatInBounds(ctx)
|
||||
// new parent cursor points to new cur node
|
||||
err = cur.fetchNode(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
cur.idx = -1
|
||||
return err
|
||||
}
|
||||
|
||||
return ok, nil
|
||||
cur.skipToNodeStart()
|
||||
cur.subtrees = nil // lazy load
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cur *Cursor) retreatInBounds(ctx context.Context) (bool, error) {
|
||||
if cur.idx > 0 {
|
||||
cur.idx -= 1
|
||||
return true, nil
|
||||
// Retreat decrements to the previous key, if necessary by
|
||||
// recursively decrementing parent nodes.
|
||||
func (cur *Cursor) Retreat(ctx context.Context) error {
|
||||
if cur.hasPrev() {
|
||||
cur.idx--
|
||||
return nil
|
||||
}
|
||||
|
||||
if cur.idx == -1 {
|
||||
// |cur| is already out of bounds
|
||||
return false, nil
|
||||
if cur.parent == nil {
|
||||
cur.invalidate()
|
||||
return nil
|
||||
}
|
||||
|
||||
assertTrue(cur.atNodeStart())
|
||||
|
||||
if cur.parent != nil {
|
||||
ok, err := cur.parent.retreatInBounds(ctx)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if ok {
|
||||
err := cur.fetchNode(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cur.skipToNodeEnd()
|
||||
cur.subtrees = nil // lazy load
|
||||
|
||||
return true, nil
|
||||
}
|
||||
// if not |ok|, then every parent, grandparent, etc.,
|
||||
// failed to retreatInBounds(): we're before the start.
|
||||
// of the prolly tree.
|
||||
// recursively decrement the parent
|
||||
err := cur.parent.Retreat(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
if cur.parent.outOfBounds() {
|
||||
// exhausted every parent cursor
|
||||
cur.invalidate()
|
||||
return nil
|
||||
}
|
||||
|
||||
// new parent cursor points to new cur node
|
||||
err = cur.fetchNode(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur.skipToNodeEnd()
|
||||
cur.subtrees = nil // lazy load
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchNode loads the Node that the cursor index points to.
|
||||
@@ -441,6 +449,19 @@ func (cur *Cursor) fetchNode(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compare returns the highest relative index difference
|
||||
// between two cursor trees. A parent has a higher precedence
|
||||
// than its child.
|
||||
//
|
||||
// Ex:
|
||||
//
|
||||
// cur: L3 -> 4, L2 -> 2, L1 -> 5, L0 -> 2
|
||||
// other: L3 -> 4, L2 -> 2, L1 -> 5, L0 -> 4
|
||||
// res => -2 (from level 0)
|
||||
//
|
||||
// cur: L3 -> 4, L2 -> 2, L1 -> 5, L0 -> 2
|
||||
// other: L3 -> 4, L2 -> 3, L1 -> 5, L0 -> 4
|
||||
// res => +1 (from level 2)
|
||||
func (cur *Cursor) Compare(other *Cursor) int {
|
||||
return compareCursors(cur, other)
|
||||
}
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
)
|
||||
|
||||
// Geometry represents any of the types Point, Linestring, or Polygon.
|
||||
// Geometry represents any of the types Point, LineString, or Polygon.
|
||||
// TODO: Generics maybe?
|
||||
type Geometry struct {
|
||||
Inner Value // Can be types.Point, types.Linestring, or types.Polygon
|
||||
Inner Value // Can be types.Point, types.LineString, or types.Polygon
|
||||
}
|
||||
|
||||
// Value interface
|
||||
@@ -87,7 +87,7 @@ func (v Geometry) writeTo(w nomsWriter, nbf *NomsBinFormat) error {
|
||||
WriteEWKBHeader(inner, buf)
|
||||
WriteEWKBPointData(inner, buf[geometry.EWKBHeaderSize:])
|
||||
w.writeString(string(buf))
|
||||
case Linestring:
|
||||
case LineString:
|
||||
// Allocate buffer for linestring
|
||||
buf := make([]byte, geometry.EWKBHeaderSize+LengthSize+geometry.PointSize*len(inner.Points))
|
||||
// Write header and data to buffer
|
||||
@@ -119,7 +119,7 @@ func readGeometry(nbf *NomsBinFormat, b *valueDecoder) (Geometry, error) {
|
||||
switch geomType {
|
||||
case geometry.PointType:
|
||||
inner = ParseEWKBPoint(buf[geometry.EWKBHeaderSize:], srid)
|
||||
case geometry.LinestringType:
|
||||
case geometry.LineStringType:
|
||||
inner = ParseEWKBLine(buf[geometry.EWKBHeaderSize:], srid)
|
||||
case geometry.PolygonType:
|
||||
inner = ParseEWKBPoly(buf[geometry.EWKBHeaderSize:], srid)
|
||||
@@ -136,7 +136,7 @@ func (v Geometry) readFrom(nbf *NomsBinFormat, b *binaryNomsReader) (Value, erro
|
||||
switch geomType {
|
||||
case geometry.PointType:
|
||||
inner = ParseEWKBPoint(buf[geometry.EWKBHeaderSize:], srid)
|
||||
case geometry.LinestringType:
|
||||
case geometry.LineStringType:
|
||||
inner = ParseEWKBLine(buf[geometry.EWKBHeaderSize:], srid)
|
||||
case geometry.PolygonType:
|
||||
inner = ParseEWKBPoly(buf[geometry.EWKBHeaderSize:], srid)
|
||||
|
||||
@@ -31,20 +31,20 @@ const (
|
||||
LengthSize = 4
|
||||
)
|
||||
|
||||
// Linestring is a Noms Value wrapper around a string.
|
||||
type Linestring struct {
|
||||
// LineString is a Noms Value wrapper around a string.
|
||||
type LineString struct {
|
||||
SRID uint32
|
||||
Points []Point
|
||||
}
|
||||
|
||||
// Value interface
|
||||
func (v Linestring) Value(ctx context.Context) (Value, error) {
|
||||
func (v LineString) Value(ctx context.Context) (Value, error) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (v Linestring) Equals(other Value) bool {
|
||||
func (v LineString) Equals(other Value) bool {
|
||||
// Compare types
|
||||
v2, ok := other.(Linestring)
|
||||
v2, ok := other.(LineString)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
@@ -65,11 +65,11 @@ func (v Linestring) Equals(other Value) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (v Linestring) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
|
||||
func (v LineString) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
|
||||
// Compare types
|
||||
v2, ok := other.(Linestring)
|
||||
v2, ok := other.(LineString)
|
||||
if !ok {
|
||||
return LinestringKind < other.Kind(), nil
|
||||
return LineStringKind < other.Kind(), nil
|
||||
}
|
||||
// TODO: should I even take this into account?
|
||||
// Compare SRID
|
||||
@@ -97,32 +97,32 @@ func (v Linestring) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error)
|
||||
return len1 < len2, nil
|
||||
}
|
||||
|
||||
func (v Linestring) Hash(nbf *NomsBinFormat) (hash.Hash, error) {
|
||||
func (v LineString) Hash(nbf *NomsBinFormat) (hash.Hash, error) {
|
||||
return getHash(v, nbf)
|
||||
}
|
||||
|
||||
func (v Linestring) isPrimitive() bool {
|
||||
func (v LineString) isPrimitive() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (v Linestring) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
|
||||
func (v LineString) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v Linestring) typeOf() (*Type, error) {
|
||||
return PrimitiveTypeMap[LinestringKind], nil
|
||||
func (v LineString) typeOf() (*Type, error) {
|
||||
return PrimitiveTypeMap[LineStringKind], nil
|
||||
}
|
||||
|
||||
func (v Linestring) Kind() NomsKind {
|
||||
return LinestringKind
|
||||
func (v LineString) Kind() NomsKind {
|
||||
return LineStringKind
|
||||
}
|
||||
|
||||
func (v Linestring) valueReadWriter() ValueReadWriter {
|
||||
func (v LineString) valueReadWriter() ValueReadWriter {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteEWKBLineData converts a Line into a byte array in EWKB format
|
||||
func WriteEWKBLineData(l Linestring, buf []byte) {
|
||||
func WriteEWKBLineData(l LineString, buf []byte) {
|
||||
// Write length of linestring
|
||||
binary.LittleEndian.PutUint32(buf[:LengthSize], uint32(len(l.Points)))
|
||||
// Append each point
|
||||
@@ -131,8 +131,8 @@ func WriteEWKBLineData(l Linestring, buf []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func (v Linestring) writeTo(w nomsWriter, nbf *NomsBinFormat) error {
|
||||
err := LinestringKind.writeTo(w, nbf)
|
||||
func (v LineString) writeTo(w nomsWriter, nbf *NomsBinFormat) error {
|
||||
err := LineStringKind.writeTo(w, nbf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -148,9 +148,9 @@ func (v Linestring) writeTo(w nomsWriter, nbf *NomsBinFormat) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseEWKBLine converts the data portion of a WKB point to Linestring
|
||||
// ParseEWKBLine converts the data portion of a WKB point to LineString
|
||||
// Very similar logic to the function in GMS
|
||||
func ParseEWKBLine(buf []byte, srid uint32) Linestring {
|
||||
func ParseEWKBLine(buf []byte, srid uint32) LineString {
|
||||
// Read length of linestring
|
||||
numPoints := binary.LittleEndian.Uint32(buf[:4])
|
||||
|
||||
@@ -160,32 +160,32 @@ func ParseEWKBLine(buf []byte, srid uint32) Linestring {
|
||||
points[i] = ParseEWKBPoint(buf[LengthSize+geometry.PointSize*i:LengthSize+geometry.PointSize*(i+1)], srid)
|
||||
}
|
||||
|
||||
return Linestring{SRID: srid, Points: points}
|
||||
return LineString{SRID: srid, Points: points}
|
||||
}
|
||||
|
||||
func readLinestring(nbf *NomsBinFormat, b *valueDecoder) (Linestring, error) {
|
||||
func readLineString(nbf *NomsBinFormat, b *valueDecoder) (LineString, error) {
|
||||
buf := []byte(b.ReadString())
|
||||
srid, _, geomType := geometry.ParseEWKBHeader(buf)
|
||||
if geomType != geometry.LinestringType {
|
||||
return Linestring{}, errors.New("not a linestring")
|
||||
if geomType != geometry.LineStringType {
|
||||
return LineString{}, errors.New("not a linestring")
|
||||
}
|
||||
return ParseEWKBLine(buf[geometry.EWKBHeaderSize:], srid), nil
|
||||
}
|
||||
|
||||
func (v Linestring) readFrom(nbf *NomsBinFormat, b *binaryNomsReader) (Value, error) {
|
||||
func (v LineString) readFrom(nbf *NomsBinFormat, b *binaryNomsReader) (Value, error) {
|
||||
buf := []byte(b.ReadString())
|
||||
srid, _, geomType := geometry.ParseEWKBHeader(buf)
|
||||
if geomType != geometry.LinestringType {
|
||||
if geomType != geometry.LineStringType {
|
||||
return nil, errors.New("not a linestring")
|
||||
}
|
||||
return ParseEWKBLine(buf[geometry.EWKBHeaderSize:], srid), nil
|
||||
}
|
||||
|
||||
func (v Linestring) skip(nbf *NomsBinFormat, b *binaryNomsReader) {
|
||||
func (v LineString) skip(nbf *NomsBinFormat, b *binaryNomsReader) {
|
||||
b.skipString()
|
||||
}
|
||||
|
||||
func (v Linestring) HumanReadableString() string {
|
||||
func (v LineString) HumanReadableString() string {
|
||||
points := make([]string, len(v.Points))
|
||||
for i, p := range v.Points {
|
||||
points[i] = p.HumanReadableString()
|
||||
|
||||
@@ -58,7 +58,7 @@ const (
|
||||
JSONKind
|
||||
GeometryKind
|
||||
PointKind
|
||||
LinestringKind
|
||||
LineStringKind
|
||||
PolygonKind
|
||||
|
||||
SerialMessageKind
|
||||
@@ -92,7 +92,7 @@ func init() {
|
||||
KindToType[JSONKind] = JSON{}
|
||||
KindToType[GeometryKind] = Geometry{}
|
||||
KindToType[PointKind] = Point{}
|
||||
KindToType[LinestringKind] = Linestring{}
|
||||
KindToType[LineStringKind] = LineString{}
|
||||
KindToType[PolygonKind] = Polygon{}
|
||||
KindToType[SerialMessageKind] = SerialMessage{}
|
||||
KindToType[TupleRowStorageKind] = TupleRowStorage{}
|
||||
@@ -121,7 +121,7 @@ func init() {
|
||||
SupportedKinds[JSONKind] = true
|
||||
SupportedKinds[GeometryKind] = true
|
||||
SupportedKinds[PointKind] = true
|
||||
SupportedKinds[LinestringKind] = true
|
||||
SupportedKinds[LineStringKind] = true
|
||||
SupportedKinds[PolygonKind] = true
|
||||
SupportedKinds[SerialMessageKind] = true
|
||||
SupportedKinds[TupleRowStorageKind] = true
|
||||
@@ -155,7 +155,7 @@ var KindToString = map[NomsKind]string{
|
||||
JSONKind: "JSON",
|
||||
GeometryKind: "Geometry",
|
||||
PointKind: "Point",
|
||||
LinestringKind: "Linestring",
|
||||
LineStringKind: "LineString",
|
||||
PolygonKind: "Polygon",
|
||||
SerialMessageKind: "SerialMessage",
|
||||
TupleRowStorageKind: "TupleRowStorage",
|
||||
@@ -180,7 +180,7 @@ func isKindOrderedByValue(k NomsKind) bool {
|
||||
func IsGeometryKind(k NomsKind) bool {
|
||||
switch k {
|
||||
case PointKind,
|
||||
LinestringKind,
|
||||
LineStringKind,
|
||||
PolygonKind,
|
||||
GeometryKind:
|
||||
return true
|
||||
|
||||
@@ -81,8 +81,8 @@ func WriteEWKBHeader(v interface{}, buf []byte) {
|
||||
case Point:
|
||||
// Write SRID and type
|
||||
geometry.WriteEWKBHeader(buf, v.SRID, geometry.PointType)
|
||||
case Linestring:
|
||||
geometry.WriteEWKBHeader(buf, v.SRID, geometry.LinestringType)
|
||||
case LineString:
|
||||
geometry.WriteEWKBHeader(buf, v.SRID, geometry.LineStringType)
|
||||
case Polygon:
|
||||
geometry.WriteEWKBHeader(buf, v.SRID, geometry.PolygonType)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
// Polygon is a Noms Value wrapper around a string.
|
||||
type Polygon struct {
|
||||
SRID uint32
|
||||
Lines []Linestring
|
||||
Lines []LineString
|
||||
}
|
||||
|
||||
// Value interface
|
||||
@@ -157,7 +157,7 @@ func ParseEWKBPoly(buf []byte, srid uint32) Polygon {
|
||||
|
||||
// Parse lines
|
||||
s := LengthSize
|
||||
lines := make([]Linestring, numLines)
|
||||
lines := make([]LineString, numLines)
|
||||
for i := uint32(0); i < numLines; i++ {
|
||||
lines[i] = ParseEWKBLine(buf[s:], srid)
|
||||
s += LengthSize * geometry.PointSize * len(lines[i].Points)
|
||||
|
||||
@@ -49,7 +49,7 @@ type CodecReader interface {
|
||||
ReadDecimal() (decimal.Decimal, error)
|
||||
ReadGeometry() (Geometry, error)
|
||||
ReadPoint() (Point, error)
|
||||
ReadLinestring() (Linestring, error)
|
||||
ReadLineString() (LineString, error)
|
||||
ReadPolygon() (Polygon, error)
|
||||
ReadBlob() (Blob, error)
|
||||
ReadJSON() (JSON, error)
|
||||
@@ -93,8 +93,8 @@ func (r *valueDecoder) ReadPoint() (Point, error) {
|
||||
return readPoint(nil, r)
|
||||
}
|
||||
|
||||
func (r *valueDecoder) ReadLinestring() (Linestring, error) {
|
||||
return readLinestring(nil, r)
|
||||
func (r *valueDecoder) ReadLineString() (LineString, error) {
|
||||
return readLineString(nil, r)
|
||||
}
|
||||
|
||||
func (r *valueDecoder) ReadPolygon() (Polygon, error) {
|
||||
@@ -384,7 +384,7 @@ func (r *valueDecoder) readValue(nbf *NomsBinFormat) (Value, error) {
|
||||
switch geomType {
|
||||
case geometry.PointType:
|
||||
return ParseEWKBPoint(buf[geometry.EWKBHeaderSize:], srid), nil
|
||||
case geometry.LinestringType:
|
||||
case geometry.LineStringType:
|
||||
return ParseEWKBLine(buf[geometry.EWKBHeaderSize:], srid), nil
|
||||
case geometry.PolygonType:
|
||||
return ParseEWKBPoly(buf[geometry.EWKBHeaderSize:], srid), nil
|
||||
@@ -399,11 +399,11 @@ func (r *valueDecoder) readValue(nbf *NomsBinFormat) (Value, error) {
|
||||
return nil, ErrUnknownType
|
||||
}
|
||||
return ParseEWKBPoint(buf[geometry.EWKBHeaderSize:], srid), nil
|
||||
case LinestringKind:
|
||||
case LineStringKind:
|
||||
r.skipKind()
|
||||
buf := []byte(r.ReadString())
|
||||
srid, _, geomType := geometry.ParseEWKBHeader(buf)
|
||||
if geomType != geometry.LinestringType {
|
||||
if geomType != geometry.LineStringType {
|
||||
return nil, ErrUnknownType
|
||||
}
|
||||
return ParseEWKBLine(buf[geometry.EWKBHeaderSize:], srid), nil
|
||||
@@ -470,7 +470,7 @@ func (r *valueDecoder) SkipValue(nbf *NomsBinFormat) error {
|
||||
case PointKind:
|
||||
r.skipKind()
|
||||
r.skipString()
|
||||
case LinestringKind:
|
||||
case LineStringKind:
|
||||
r.skipKind()
|
||||
r.skipString()
|
||||
case PolygonKind:
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/dolthub/dolt/go/gen/fb/serial"
|
||||
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
@@ -38,74 +40,69 @@ const (
|
||||
type ByteSize uint16
|
||||
|
||||
const (
|
||||
int8Size ByteSize = 1
|
||||
uint8Size ByteSize = 1
|
||||
int16Size ByteSize = 2
|
||||
uint16Size ByteSize = 2
|
||||
int32Size ByteSize = 4
|
||||
uint32Size ByteSize = 4
|
||||
int64Size ByteSize = 8
|
||||
uint64Size ByteSize = 8
|
||||
float32Size ByteSize = 4
|
||||
float64Size ByteSize = 8
|
||||
|
||||
hash128Size ByteSize = 16
|
||||
|
||||
int8Size ByteSize = 1
|
||||
uint8Size ByteSize = 1
|
||||
int16Size ByteSize = 2
|
||||
uint16Size ByteSize = 2
|
||||
int32Size ByteSize = 4
|
||||
uint32Size ByteSize = 4
|
||||
int64Size ByteSize = 8
|
||||
uint64Size ByteSize = 8
|
||||
float32Size ByteSize = 4
|
||||
float64Size ByteSize = 8
|
||||
bit64Size ByteSize = 8
|
||||
hash128Size ByteSize = 16
|
||||
yearSize ByteSize = 1
|
||||
dateSize ByteSize = 4
|
||||
timeSize ByteSize = 8
|
||||
datetimeSize ByteSize = 8
|
||||
enumSize ByteSize = 2
|
||||
setSize ByteSize = 8
|
||||
)
|
||||
|
||||
type Encoding uint8
|
||||
type Encoding byte
|
||||
|
||||
// Constant Size Encodings
|
||||
// Fixed Width Encodings
|
||||
const (
|
||||
NullEnc Encoding = 0
|
||||
Int8Enc Encoding = 1
|
||||
Uint8Enc Encoding = 2
|
||||
Int16Enc Encoding = 3
|
||||
Uint16Enc Encoding = 4
|
||||
Int32Enc Encoding = 7
|
||||
Uint32Enc Encoding = 8
|
||||
Int64Enc Encoding = 9
|
||||
Uint64Enc Encoding = 10
|
||||
Float32Enc Encoding = 11
|
||||
Float64Enc Encoding = 12
|
||||
|
||||
Hash128Enc Encoding = 13
|
||||
|
||||
YearEnc Encoding = 14
|
||||
DateEnc Encoding = 15
|
||||
TimeEnc Encoding = 16
|
||||
DatetimeEnc Encoding = 17
|
||||
NullEnc = Encoding(serial.EncodingNull)
|
||||
Int8Enc = Encoding(serial.EncodingInt8)
|
||||
Uint8Enc = Encoding(serial.EncodingUint8)
|
||||
Int16Enc = Encoding(serial.EncodingInt16)
|
||||
Uint16Enc = Encoding(serial.EncodingUint16)
|
||||
Int32Enc = Encoding(serial.EncodingInt32)
|
||||
Uint32Enc = Encoding(serial.EncodingUint32)
|
||||
Int64Enc = Encoding(serial.EncodingInt64)
|
||||
Uint64Enc = Encoding(serial.EncodingUint64)
|
||||
Float32Enc = Encoding(serial.EncodingFloat32)
|
||||
Float64Enc = Encoding(serial.EncodingFloat64)
|
||||
Bit64Enc = Encoding(serial.EncodingBit64)
|
||||
Hash128Enc = Encoding(serial.EncodingHash128)
|
||||
YearEnc = Encoding(serial.EncodingYear)
|
||||
DateEnc = Encoding(serial.EncodingDate)
|
||||
TimeEnc = Encoding(serial.EncodingTime)
|
||||
DatetimeEnc = Encoding(serial.EncodingDatetime)
|
||||
EnumEnc = Encoding(serial.EncodingEnum)
|
||||
SetEnc = Encoding(serial.EncodingSet)
|
||||
|
||||
sentinel Encoding = 127
|
||||
)
|
||||
|
||||
// Variable Size Encodings
|
||||
// Variable Width Encodings
|
||||
const (
|
||||
StringEnc Encoding = 128
|
||||
ByteStringEnc Encoding = 129
|
||||
|
||||
// todo(andy): experimental encodings
|
||||
DecimalEnc Encoding = 130
|
||||
JSONEnc Encoding = 131
|
||||
GeometryEnc Encoding = 133
|
||||
StringEnc = Encoding(serial.EncodingString)
|
||||
ByteStringEnc = Encoding(serial.EncodingBytes)
|
||||
DecimalEnc = Encoding(serial.EncodingDecimal)
|
||||
JSONEnc = Encoding(serial.EncodingJSON)
|
||||
GeometryEnc = Encoding(serial.EncodingGeometry)
|
||||
|
||||
// TODO
|
||||
// BitEnc
|
||||
// CharEnc
|
||||
// VarCharEnc
|
||||
// TextEnc
|
||||
// BinaryEnc
|
||||
// VarBinaryEnc
|
||||
// TextEnc
|
||||
// BlobEnc
|
||||
// JSONEnc
|
||||
// EnumEnc
|
||||
// SetEnc
|
||||
// ExpressionEnc
|
||||
// GeometryEnc
|
||||
)
|
||||
|
||||
func sizeFromType(t Type) (ByteSize, bool) {
|
||||
@@ -130,16 +127,22 @@ func sizeFromType(t Type) (ByteSize, bool) {
|
||||
return float32Size, true
|
||||
case Float64Enc:
|
||||
return float64Size, true
|
||||
case Hash128Enc:
|
||||
return hash128Size, true
|
||||
case YearEnc:
|
||||
return yearSize, true
|
||||
case DateEnc:
|
||||
return dateSize, true
|
||||
//case TimeEnc:
|
||||
// return timeSize, true
|
||||
case TimeEnc:
|
||||
return timeSize, true
|
||||
case DatetimeEnc:
|
||||
return datetimeSize, true
|
||||
case Hash128Enc:
|
||||
return hash128Size, true
|
||||
case EnumEnc:
|
||||
return enumSize, true
|
||||
case SetEnc:
|
||||
return setSize, true
|
||||
case Bit64Enc:
|
||||
return bit64Size, true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
@@ -370,6 +373,18 @@ func compareFloat64(l, r float64) int {
|
||||
}
|
||||
}
|
||||
|
||||
func readBit64(val []byte) uint64 {
|
||||
return readUint64(val)
|
||||
}
|
||||
|
||||
func writeBit64(buf []byte, val uint64) {
|
||||
writeUint64(buf, val)
|
||||
}
|
||||
|
||||
func compareBit64(l, r uint64) int {
|
||||
return compareUint64(l, r)
|
||||
}
|
||||
|
||||
func readDecimal(val []byte) decimal.Decimal {
|
||||
e := readInt32(val[:int32Size])
|
||||
s := readInt8(val[int32Size : int32Size+int8Size])
|
||||
@@ -478,6 +493,30 @@ func compareDatetime(l, r time.Time) int {
|
||||
}
|
||||
}
|
||||
|
||||
func readEnum(val []byte) uint16 {
|
||||
return readUint16(val)
|
||||
}
|
||||
|
||||
func writeEnum(buf []byte, val uint16) {
|
||||
writeUint16(buf, val)
|
||||
}
|
||||
|
||||
func compareEnum(l, r uint16) int {
|
||||
return compareUint16(l, r)
|
||||
}
|
||||
|
||||
func readSet(val []byte) uint64 {
|
||||
return readUint64(val)
|
||||
}
|
||||
|
||||
func writeSet(buf []byte, val uint64) {
|
||||
writeUint64(buf, val)
|
||||
}
|
||||
|
||||
func compareSet(l, r uint64) int {
|
||||
return compareUint64(l, r)
|
||||
}
|
||||
|
||||
func readString(val []byte) string {
|
||||
return stringFromBytes(readByteString(val))
|
||||
}
|
||||
|
||||
@@ -78,6 +78,22 @@ func TestCompare(t *testing.T) {
|
||||
l: encFloat(1), r: encFloat(0),
|
||||
cmp: 1,
|
||||
},
|
||||
// bit
|
||||
{
|
||||
typ: Type{Enc: Bit64Enc},
|
||||
l: encBit(0), r: encBit(0),
|
||||
cmp: 0,
|
||||
},
|
||||
{
|
||||
typ: Type{Enc: Bit64Enc},
|
||||
l: encBit(0), r: encBit(1),
|
||||
cmp: -1,
|
||||
},
|
||||
{
|
||||
typ: Type{Enc: Bit64Enc},
|
||||
l: encBit(1), r: encBit(0),
|
||||
cmp: 1,
|
||||
},
|
||||
// decimal
|
||||
{
|
||||
typ: Type{Enc: DecimalEnc},
|
||||
@@ -161,6 +177,38 @@ func TestCompare(t *testing.T) {
|
||||
r: encDatetime(time.Date(2000, 11, 01, 01, 01, 01, 00, time.UTC)),
|
||||
cmp: -1,
|
||||
},
|
||||
// enum
|
||||
{
|
||||
typ: Type{Enc: EnumEnc},
|
||||
l: encEnum(0), r: encEnum(0),
|
||||
cmp: 0,
|
||||
},
|
||||
{
|
||||
typ: Type{Enc: EnumEnc},
|
||||
l: encEnum(0), r: encEnum(1),
|
||||
cmp: -1,
|
||||
},
|
||||
{
|
||||
typ: Type{Enc: EnumEnc},
|
||||
l: encEnum(1), r: encEnum(0),
|
||||
cmp: 1,
|
||||
},
|
||||
// set
|
||||
{
|
||||
typ: Type{Enc: SetEnc},
|
||||
l: encSet(0), r: encSet(0),
|
||||
cmp: 0,
|
||||
},
|
||||
{
|
||||
typ: Type{Enc: SetEnc},
|
||||
l: encSet(0), r: encSet(1),
|
||||
cmp: -1,
|
||||
},
|
||||
{
|
||||
typ: Type{Enc: SetEnc},
|
||||
l: encSet(1), r: encSet(0),
|
||||
cmp: 1,
|
||||
},
|
||||
// string
|
||||
{
|
||||
typ: Type{Enc: StringEnc},
|
||||
@@ -231,6 +279,12 @@ func encFloat(f float64) []byte {
|
||||
return buf
|
||||
}
|
||||
|
||||
func encBit(u uint64) []byte {
|
||||
buf := make([]byte, bit64Size)
|
||||
writeBit64(buf, u)
|
||||
return buf
|
||||
}
|
||||
|
||||
func encDecimal(d decimal.Decimal) []byte {
|
||||
buf := make([]byte, sizeOfDecimal(d))
|
||||
writeDecimal(buf, d)
|
||||
@@ -268,6 +322,18 @@ func encDatetime(dt time.Time) []byte {
|
||||
return buf
|
||||
}
|
||||
|
||||
func encEnum(u uint16) []byte {
|
||||
buf := make([]byte, enumSize)
|
||||
writeEnum(buf, u)
|
||||
return buf
|
||||
}
|
||||
|
||||
func encSet(u uint64) []byte {
|
||||
buf := make([]byte, setSize)
|
||||
writeSet(buf, u)
|
||||
return buf
|
||||
}
|
||||
|
||||
func TestCodecRoundTrip(t *testing.T) {
|
||||
t.Run("round trip bool", func(t *testing.T) {
|
||||
roundTripBools(t)
|
||||
@@ -365,6 +431,14 @@ func roundTripUints(t *testing.T) {
|
||||
zero(buf)
|
||||
}
|
||||
|
||||
buf = make([]byte, enumSize)
|
||||
for _, value := range uintegers {
|
||||
exp := uint16(value)
|
||||
writeEnum(buf, exp)
|
||||
assert.Equal(t, exp, readEnum(buf))
|
||||
zero(buf)
|
||||
}
|
||||
|
||||
buf = make([]byte, uint32Size)
|
||||
uintegers = append(uintegers, math.MaxUint32)
|
||||
for _, value := range uintegers {
|
||||
@@ -382,6 +456,22 @@ func roundTripUints(t *testing.T) {
|
||||
assert.Equal(t, exp, readUint64(buf))
|
||||
zero(buf)
|
||||
}
|
||||
|
||||
buf = make([]byte, bit64Size)
|
||||
for _, value := range uintegers {
|
||||
exp := uint64(value)
|
||||
writeBit64(buf, exp)
|
||||
assert.Equal(t, exp, readBit64(buf))
|
||||
zero(buf)
|
||||
}
|
||||
|
||||
buf = make([]byte, setSize)
|
||||
for _, value := range uintegers {
|
||||
exp := uint64(value)
|
||||
writeSet(buf, exp)
|
||||
assert.Equal(t, exp, readSet(buf))
|
||||
zero(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func roundTripFloats(t *testing.T) {
|
||||
|
||||
@@ -159,6 +159,21 @@ func (tb *TupleBuilder) PutFloat64(i int, v float64) {
|
||||
tb.pos += float64Size
|
||||
}
|
||||
|
||||
func (tb *TupleBuilder) PutBit(i int, v uint64) {
|
||||
tb.Desc.expectEncoding(i, Bit64Enc)
|
||||
tb.fields[i] = tb.buf[tb.pos : tb.pos+bit64Size]
|
||||
writeBit64(tb.fields[i], v)
|
||||
tb.pos += bit64Size
|
||||
}
|
||||
|
||||
func (tb *TupleBuilder) PutDecimal(i int, v decimal.Decimal) {
|
||||
tb.Desc.expectEncoding(i, DecimalEnc)
|
||||
sz := sizeOfDecimal(v)
|
||||
tb.fields[i] = tb.buf[tb.pos : tb.pos+sz]
|
||||
writeDecimal(tb.fields[i], v)
|
||||
tb.pos += sz
|
||||
}
|
||||
|
||||
// PutYear writes an int16-encoded year to the ith field of the Tuple being built.
|
||||
func (tb *TupleBuilder) PutYear(i int, v int16) {
|
||||
tb.Desc.expectEncoding(i, YearEnc)
|
||||
@@ -189,12 +204,18 @@ func (tb *TupleBuilder) PutDatetime(i int, v time.Time) {
|
||||
tb.pos += datetimeSize
|
||||
}
|
||||
|
||||
func (tb *TupleBuilder) PutDecimal(i int, v decimal.Decimal) {
|
||||
tb.Desc.expectEncoding(i, DecimalEnc)
|
||||
sz := sizeOfDecimal(v)
|
||||
tb.fields[i] = tb.buf[tb.pos : tb.pos+sz]
|
||||
writeDecimal(tb.fields[i], v)
|
||||
tb.pos += sz
|
||||
func (tb *TupleBuilder) PutEnum(i int, v uint16) {
|
||||
tb.Desc.expectEncoding(i, EnumEnc)
|
||||
tb.fields[i] = tb.buf[tb.pos : tb.pos+enumSize]
|
||||
writeEnum(tb.fields[i], v)
|
||||
tb.pos += enumSize
|
||||
}
|
||||
|
||||
func (tb *TupleBuilder) PutSet(i int, v uint64) {
|
||||
tb.Desc.expectEncoding(i, SetEnc)
|
||||
tb.fields[i] = tb.buf[tb.pos : tb.pos+setSize]
|
||||
writeSet(tb.fields[i], v)
|
||||
tb.pos += setSize
|
||||
}
|
||||
|
||||
// PutString writes a string to the ith field of the Tuple being built.
|
||||
|
||||
@@ -90,6 +90,10 @@ func compare(typ Type, left, right []byte) int {
|
||||
return compareFloat32(readFloat32(left), readFloat32(right))
|
||||
case Float64Enc:
|
||||
return compareFloat64(readFloat64(left), readFloat64(right))
|
||||
case Bit64Enc:
|
||||
return compareBit64(readBit64(left), readBit64(right))
|
||||
case DecimalEnc:
|
||||
return compareDecimal(readDecimal(left), readDecimal(right))
|
||||
case YearEnc:
|
||||
return compareYear(readYear(left), readYear(right))
|
||||
case DateEnc:
|
||||
@@ -98,8 +102,10 @@ func compare(typ Type, left, right []byte) int {
|
||||
return compareTime(readTime(left), readTime(right))
|
||||
case DatetimeEnc:
|
||||
return compareDatetime(readDatetime(left), readDatetime(right))
|
||||
case DecimalEnc:
|
||||
return compareDecimal(readDecimal(left), readDecimal(right))
|
||||
case EnumEnc:
|
||||
return compareEnum(readEnum(left), readEnum(right))
|
||||
case SetEnc:
|
||||
return compareSet(readSet(left), readSet(right))
|
||||
case StringEnc:
|
||||
return compareString(readString(left), readString(right))
|
||||
case ByteStringEnc:
|
||||
|
||||
@@ -241,6 +241,17 @@ func (td TupleDesc) GetFloat64(i int, tup Tuple) (v float64, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// GetBit reads a uint64 from the ith field of the Tuple.
|
||||
// If the ith field is NULL, |ok| is set to false.
|
||||
func (td TupleDesc) GetBit(i int, tup Tuple) (v uint64, ok bool) {
|
||||
td.expectEncoding(i, Bit64Enc)
|
||||
b := td.GetField(i, tup)
|
||||
if b != nil {
|
||||
v, ok = readBit64(b), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetDecimal reads a float64 from the ith field of the Tuple.
|
||||
// If the ith field is NULL, |ok| is set to false.
|
||||
func (td TupleDesc) GetDecimal(i int, tup Tuple) (v decimal.Decimal, ok bool) {
|
||||
@@ -296,6 +307,28 @@ func (td TupleDesc) GetDatetime(i int, tup Tuple) (v time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// GetEnum reads a uin16 from the ith field of the Tuple.
|
||||
// If the ith field is NULL, |ok| is set to false.
|
||||
func (td TupleDesc) GetEnum(i int, tup Tuple) (v uint16, ok bool) {
|
||||
td.expectEncoding(i, EnumEnc)
|
||||
b := td.GetField(i, tup)
|
||||
if b != nil {
|
||||
v, ok = readEnum(b), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetSet reads a uint64 from the ith field of the Tuple.
|
||||
// If the ith field is NULL, |ok| is set to false.
|
||||
func (td TupleDesc) GetSet(i int, tup Tuple) (v uint64, ok bool) {
|
||||
td.expectEncoding(i, SetEnc)
|
||||
b := td.GetField(i, tup)
|
||||
if b != nil {
|
||||
v, ok = readSet(b), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetString reads a string from the ith field of the Tuple.
|
||||
// If the ith field is NULL, |ok| is set to false.
|
||||
func (td TupleDesc) GetString(i int, tup Tuple) (v string, ok bool) {
|
||||
@@ -423,19 +456,30 @@ func formatValue(enc Encoding, value []byte) string {
|
||||
case Float64Enc:
|
||||
v := readFloat64(value)
|
||||
return fmt.Sprintf("%f", v)
|
||||
case Bit64Enc:
|
||||
v := readUint64(value)
|
||||
return strconv.FormatUint(v, 10)
|
||||
case DecimalEnc:
|
||||
v := readDecimal(value)
|
||||
return v.String()
|
||||
case YearEnc:
|
||||
v := readYear(value)
|
||||
return strconv.Itoa(int(v))
|
||||
case DateEnc:
|
||||
v := readDate(value)
|
||||
return v.Format("2006-01-02")
|
||||
//case TimeEnc:
|
||||
// // todo(andy)
|
||||
// v := readTime(value)
|
||||
// return v
|
||||
case TimeEnc:
|
||||
v := readTime(value)
|
||||
return strconv.FormatInt(v, 10)
|
||||
case DatetimeEnc:
|
||||
v := readDatetime(value)
|
||||
return v.Format(time.RFC3339)
|
||||
case EnumEnc:
|
||||
v := readEnum(value)
|
||||
return strconv.Itoa(int(v))
|
||||
case SetEnc:
|
||||
v := readSet(value)
|
||||
return strconv.FormatUint(v, 10)
|
||||
case StringEnc:
|
||||
return readString(value)
|
||||
case ByteStringEnc:
|
||||
|
||||
@@ -52,6 +52,14 @@ skip_if_no_aws_tests() {
|
||||
dolt sql -q 'show tables'
|
||||
}
|
||||
|
||||
@test "remotes-aws: can clone an existing aws remote without AWS_SDK_LOAD_CONFIG=1 set." {
|
||||
skip_if_no_aws_tests
|
||||
rm -rf .dolt
|
||||
env -u AWS_SDK_LOAD_CONFIG dolt clone 'aws://['"$DOLT_BATS_AWS_TABLE"':'"$DOLT_BATS_AWS_BUCKET"']/'"$DOLT_BATS_AWS_EXISTING_REPO"
|
||||
cd "$DOLT_BATS_AWS_EXISTING_REPO"
|
||||
dolt sql -q 'show tables'
|
||||
}
|
||||
|
||||
# Matches behavior of other remote types
|
||||
@test "remotes-aws: clone empty aws remote fails" {
|
||||
skip_if_no_aws_tests
|
||||
|
||||
@@ -204,9 +204,9 @@ SQL
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "table,column,tag" ]] || false
|
||||
[[ "$output" =~ "test2,pk1,6801" ]] || false
|
||||
[[ "$output" =~ "test2,pk2,4776" ]] || false
|
||||
[[ "$output" =~ "test2,v1,10579" ]] || false
|
||||
[[ "$output" =~ "test2,v2,7704" ]] || false
|
||||
[[ "$output" =~ "test2,PK2,4776" ]] || false
|
||||
[[ "$output" =~ "test2,V1,10579" ]] || false
|
||||
[[ "$output" =~ "test2,V2,7704" ]] || false
|
||||
|
||||
dolt diff
|
||||
run dolt diff
|
||||
@@ -214,9 +214,9 @@ SQL
|
||||
[[ "$output" =~ '- `pk2` bigint NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '- `v1` varchar(100) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '- `v2` varchar(120),' ]] || false
|
||||
[[ "$output" =~ '+ `pk2` tinyint NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `v1` varchar(300) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `v2` varchar(1024) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `PK2` tinyint NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `V1` varchar(300) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `V2` varchar(1024) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ 'PRIMARY KEY' ]] || false
|
||||
|
||||
dolt add .
|
||||
@@ -232,8 +232,8 @@ SQL
|
||||
|
||||
run dolt sql -q 'show create table test2'
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ '`pk2` tinyint NOT NULL' ]] || false
|
||||
[[ "$output" =~ '`v1` varchar(300) NOT NULL' ]] || false
|
||||
[[ "$output" =~ '`PK2` tinyint NOT NULL' ]] || false
|
||||
[[ "$output" =~ '`V1` varchar(300) NOT NULL' ]] || false
|
||||
|
||||
run dolt sql -q 'select * from test2' -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
@@ -254,8 +254,8 @@ SQL
|
||||
dolt pull
|
||||
run dolt sql -q 'show create table test2'
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ '`pk2` tinyint NOT NULL' ]] || false
|
||||
[[ "$output" =~ '`v1` varchar(300) NOT NULL' ]] || false
|
||||
[[ "$output" =~ '`PK2` tinyint NOT NULL' ]] || false
|
||||
[[ "$output" =~ '`V1` varchar(300) NOT NULL' ]] || false
|
||||
|
||||
run dolt sql -q 'select * from test2' -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
@@ -270,9 +270,9 @@ SQL
|
||||
[[ "$output" =~ '- `pk2` bigint NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '- `v1` varchar(100) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '- `v2` varchar(120),' ]] || false
|
||||
[[ "$output" =~ '+ `pk2` tinyint NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `v1` varchar(300) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `v2` varchar(1024) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `PK2` tinyint NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `V1` varchar(300) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `V2` varchar(1024) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ 'PRIMARY KEY' ]] || false
|
||||
}
|
||||
|
||||
|
||||
@@ -1131,22 +1131,25 @@ END""")
|
||||
}
|
||||
|
||||
@test "sql-server: connect to databases case insensitive" {
|
||||
skip "Database connnection strings are case sensitive and should not be"
|
||||
skiponwindows "Has dependencies that are missing on the Jenkins Windows installation."
|
||||
skip_nbf_dolt_1
|
||||
|
||||
|
||||
mkdir no_dolt && cd no_dolt
|
||||
start_sql_server
|
||||
|
||||
server_query "" 1 "create database Test1"
|
||||
|
||||
|
||||
server_query "" 1 "show databases" "Database\nTest1\ninformation_schema"
|
||||
server_query "test1" 1 "create table a(x int)"
|
||||
server_query "TEST1" 1 "insert into a values (1), (2)"
|
||||
run server_query "test1" 1 "select dolt_commit('-a', '-m', 'new table a')"
|
||||
run server_query "test1" 1 "select dolt_checkout('-b', 'newbranch')"
|
||||
server_query "TEST1/newbranch" 1 "select * from a" "x\n1\n2"
|
||||
server_query "TEST1/NEWBRANCH" 1 "select * from a" "x\n1\n2"
|
||||
multi_query "" 1 "use test1; create table a(x int);"
|
||||
multi_query "" 1 "use TEST1; insert into a values (1), (2);"
|
||||
run multi_query "" 1 "use test1; select dolt_commit('-a', '-m', 'new table a');"
|
||||
run multi_query "" 1 "use test1; select dolt_checkout('-b', 'newbranch');"
|
||||
multi_query "" 1 "use \`TEST1/newbranch\`; select * from a" "x\n1\n2"
|
||||
multi_query "" 1 "use \`test1/newbranch\`; select * from a" "x\n1\n2"
|
||||
server_query "" 1 "use \`TEST1/NEWBRANCH\`" "" "database not found: TEST1/NEWBRANCH"
|
||||
|
||||
multi_query "" 1 "create database test2; use test2; select database();" "database()\ntest2"
|
||||
multi_query "" 1 "use test2; drop database TEST2; select database();" "null"
|
||||
}
|
||||
|
||||
@test "sql-server: create and drop database with --multi-db-dir" {
|
||||
|
||||
@@ -42,6 +42,39 @@ teardown() {
|
||||
[[ "$output" =~ "+---------------------" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-shell: dolt sql shell has mysql db and can create users" {
|
||||
# there does not exist a mysql.db file
|
||||
run ls
|
||||
! [[ "$output" =~ "mysql.db" ]] || false
|
||||
|
||||
# mysql database exists and has privilege tables
|
||||
run dolt sql <<< "show tables from mysql;"
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "user" ]] || false
|
||||
[[ "$output" =~ "role_edges" ]] || false
|
||||
|
||||
# show users, expect just root user
|
||||
run dolt sql <<< "select user from mysql.user;"
|
||||
[[ "$output" =~ "root" ]] || false
|
||||
! [[ "$output" =~ "new_user" ]] || false
|
||||
|
||||
# create a new user
|
||||
run dolt sql <<< "create user new_user;"
|
||||
[ "$status" -eq "0" ]
|
||||
|
||||
# there should now be a mysql.db file
|
||||
run ls
|
||||
[[ "$output" =~ "mysql.db" ]] || false
|
||||
|
||||
# show users, expect root and new_user
|
||||
run dolt sql <<< "select user from mysql.user;"
|
||||
[[ "$output" =~ "root" ]] || false
|
||||
[[ "$output" =~ "new_user" ]] || false
|
||||
|
||||
# remove mysql.db just in case
|
||||
rm -f mysql.db
|
||||
}
|
||||
|
||||
@test "sql-shell: bad sql in sql shell should error" {
|
||||
run dolt sql <<< "This is bad sql"
|
||||
[ $status -eq 1 ]
|
||||
|
||||
@@ -195,3 +195,68 @@ teardown() {
|
||||
dolt sql -q "create index idx on poly_tbl (a)"
|
||||
}
|
||||
|
||||
@test "sql-spatial-types: SRID defined in column definition in CREATE TABLE" {
|
||||
run dolt sql -q "CREATE TABLE pt (i int primary key, p POINT NOT NULL SRID 1)"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "unsupported feature: unsupported SRID value" ]] || false
|
||||
|
||||
run dolt sql -q "CREATE TABLE pt (i int primary key, p POINT NOT NULL SRID 0)"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt sql -q "SHOW CREATE TABLE pt"
|
||||
[[ "$output" =~ "\`p\` point NOT NULL SRID 0" ]] || false
|
||||
|
||||
dolt sql -q "INSERT INTO pt VALUES (1, POINT(5,6))"
|
||||
run dolt sql -q "SELECT ST_ASWKT(p) FROM pt"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "POINT(5 6)" ]] || false
|
||||
|
||||
run dolt sql -q "INSERT INTO pt VALUES (2, ST_GEOMFROMTEXT(ST_ASWKT(POINT(1,2)), 4326))"
|
||||
[ "$status" -eq 1 ]
|
||||
[ "$output" = "The SRID of the geometry does not match the SRID of the column 'p'. The SRID of the geometry is 4326, but the SRID of the column is 0. Consider changing the SRID of the geometry or the SRID property of the column." ]
|
||||
|
||||
run dolt sql -q "SELECT ST_ASWKT(p) FROM pt"
|
||||
[[ ! "$output" =~ "POINT(1 2)" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-spatial-types: SRID defined in column definition in ALTER TABLE" {
|
||||
run dolt sql << SQL
|
||||
CREATE TABLE table1 (i int primary key, p LINESTRING NOT NULL SRID 4326);
|
||||
INSERT INTO table1 VALUES (1, ST_GEOMFROMTEXT(ST_ASWKT(LINESTRING(POINT(0,0),POINT(1,2))), 4326));
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT ST_ASWKT(p) FROM table1"
|
||||
[[ "$output" =~ "LINESTRING(0 0,1 2)" ]] || false
|
||||
|
||||
run dolt sql -q "ALTER TABLE table1 MODIFY COLUMN p GEOMETRY NOT NULL SRID 4326"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt sql -q "SHOW CREATE TABLE table1"
|
||||
[[ "$output" =~ "\`p\` geometry NOT NULL SRID 4326" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT ST_ASWKT(p) FROM table1"
|
||||
[[ "$output" =~ "LINESTRING(0 0,1 2)" ]] || false
|
||||
|
||||
run dolt sql -q "INSERT INTO table1 VALUES (2, ST_SRID(POINT(1,2), 4326))"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT ST_ASWKT(p) FROM table1"
|
||||
[[ "$output" =~ "LINESTRING(0 0,1 2)" ]] || false
|
||||
[[ "$output" =~ "POINT(2 1)" ]] || false
|
||||
|
||||
run dolt sql -q "ALTER TABLE table1 MODIFY COLUMN p LINESTRING SRID 4326"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "Cannot get geometry object from data you send to the GEOMETRY field" ]] || false
|
||||
|
||||
dolt sql -q "DELETE FROM table1 WHERE i = 1"
|
||||
run dolt sql -q "SELECT ST_ASWKT(p) FROM pt"
|
||||
[[ ! "$output" =~ "LINESTRING(0 0,1 2)" ]] || false
|
||||
|
||||
run dolt sql -q "ALTER TABLE table1 MODIFY COLUMN p POINT SRID 4326"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt sql -q "ALTER TABLE table1 MODIFY COLUMN p POINT SRID 0"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "The SRID of the geometry does not match the SRID of the column 'p'. The SRID of the geometry is 4326, but the SRID of the column is 0. Consider changing the SRID of the geometry or the SRID property of the column." ]] || false
|
||||
}
|
||||
|
||||
@@ -39,6 +39,39 @@ teardown() {
|
||||
teardown_common
|
||||
}
|
||||
|
||||
@test "sql: dolt sql -q has mysql db and can create users" {
|
||||
# there does not exist a mysql.db file
|
||||
run ls
|
||||
! [[ "$output" =~ "mysql.db" ]] || false
|
||||
|
||||
# mysql database exists and has privilege tables
|
||||
run dolt sql -q "show tables from mysql;"
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "user" ]] || false
|
||||
[[ "$output" =~ "role_edges" ]] || false
|
||||
|
||||
# show users, expect just root user
|
||||
run dolt sql -q "select user from mysql.user;"
|
||||
[[ "$output" =~ "root" ]] || false
|
||||
! [[ "$output" =~ "new_user" ]] || false
|
||||
|
||||
# create a new user
|
||||
run dolt sql -q "create user new_user;"
|
||||
[ "$status" -eq "0" ]
|
||||
|
||||
# there should now be a mysql.db file
|
||||
run ls
|
||||
[[ "$output" =~ "mysql.db" ]] || false
|
||||
|
||||
# show users, expect root and new_user
|
||||
run dolt sql -q "select user from mysql.user;"
|
||||
[[ "$output" =~ "root" ]] || false
|
||||
[[ "$output" =~ "new_user" ]] || false
|
||||
|
||||
# remove mysql.db just in case
|
||||
rm -f mysql.db
|
||||
}
|
||||
|
||||
@test "sql: errors do not write incomplete rows" {
|
||||
skip_nbf_dolt_1
|
||||
dolt sql <<"SQL"
|
||||
@@ -664,7 +697,7 @@ SQL
|
||||
CREATE DATABASE test1;
|
||||
CREATE DATABASE test2;
|
||||
USE test1;
|
||||
CALL DOLT_CHECKOUT('-b', 'newbranch');
|
||||
CALL DOLT_CHECKOUT('-b', 'newBranch');
|
||||
USE \`test1/newBranch\`;
|
||||
USE test2;
|
||||
DROP DATABASE test1;
|
||||
@@ -675,16 +708,16 @@ SQL
|
||||
run dolt sql <<SQL
|
||||
CREATE DATABASE test1;
|
||||
USE test1;
|
||||
CALL DOLT_CHECKOUT('-b', 'newbranch');
|
||||
USE \`test1/newBranch\`;
|
||||
CALL DOLT_CHECKOUT('-b', 'newBranch');
|
||||
USE \`TEST1/newBranch\`;
|
||||
USE test2;
|
||||
DROP DATABASE test1;
|
||||
DROP DATABASE Test1;
|
||||
SHOW TABLES;
|
||||
USE \`test1/newBranch\`;
|
||||
SQL
|
||||
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "database not found: test1/newbranch" ]] || false
|
||||
[[ "$output" =~ "database not found: test1/newBranch" ]] || false
|
||||
|
||||
cd ../
|
||||
}
|
||||
@@ -990,6 +1023,7 @@ ALTER TABLE t1 MODIFY COLUMN v1 BIGINT;
|
||||
ALTER TABLE t2 MODIFY COLUMN v1 VARCHAR(2000);
|
||||
ALTER TABLE t3 MODIFY COLUMN v1 TIMESTAMP;
|
||||
SQL
|
||||
|
||||
run dolt sql -q "SELECT * FROM t1 ORDER BY pk" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "pk,v1" ]] || false
|
||||
@@ -1025,18 +1059,10 @@ SQL
|
||||
skip_nbf_dolt_1
|
||||
dolt sql <<SQL
|
||||
CREATE TABLE t1(pk BIGINT PRIMARY KEY, v1 INT, INDEX(v1));
|
||||
CREATE TABLE t2(pk BIGINT PRIMARY KEY, v1 VARCHAR(20), INDEX(v1));
|
||||
CREATE TABLE t3(pk BIGINT PRIMARY KEY, v1 DATETIME, INDEX(v1));
|
||||
INSERT INTO t1 VALUES (0,-1),(1,1);
|
||||
INSERT INTO t2 VALUES (0,'hi'),(1,'bye');
|
||||
INSERT INTO t3 VALUES (0,'1999-11-02 17:39:38'),(1,'3021-01-08 02:59:27');
|
||||
SQL
|
||||
run dolt sql -q "ALTER TABLE t1 MODIFY COLUMN v1 INT UNSIGNED"
|
||||
[ "$status" -eq "1" ]
|
||||
run dolt sql -q "ALTER TABLE t2 MODIFY COLUMN v1 VARCHAR(2)"
|
||||
[ "$status" -eq "1" ]
|
||||
run dolt sql -q "ALTER TABLE t3 MODIFY COLUMN v1 TIMESTAMP"
|
||||
[ "$status" -eq "1" ]
|
||||
}
|
||||
|
||||
@test "sql: alter table modify column type no data change" {
|
||||
|
||||
@@ -459,3 +459,25 @@ SQL
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "too big number" ]] || false
|
||||
}
|
||||
|
||||
@test "import mysqldump: show create table on table with geometry type with SRID value" {
|
||||
run dolt sql <<SQL
|
||||
CREATE TABLE address (
|
||||
address_id smallint unsigned NOT NULL AUTO_INCREMENT,
|
||||
address varchar(50) NOT NULL,
|
||||
address2 varchar(50) DEFAULT NULL,
|
||||
district varchar(20) NOT NULL,
|
||||
city_id smallint unsigned NOT NULL,
|
||||
postal_code varchar(10) DEFAULT NULL,
|
||||
phone varchar(20) NOT NULL,
|
||||
location geometry NOT NULL /*!80003 SRID 0 */,
|
||||
last_update timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (address_id)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=606 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt sql -q "show create table address;" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "\`location\` geometry NOT NULL SRID 0," ]] || false
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ CREATE TABLE `address` (
|
||||
`city_id` smallint unsigned NOT NULL,
|
||||
`postal_code` varchar(10) DEFAULT NULL,
|
||||
`phone` varchar(20) NOT NULL,
|
||||
`location` geometry NOT NULL /* UNSUPPORTED SYNTAX (https://github.com/dolthub/dolt/issues/3229): SRID 0 */,
|
||||
`location` geometry NOT NULL /*!80003 SRID 0 */,
|
||||
`last_update` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (`address_id`),
|
||||
KEY `idx_fk_city_id` (`city_id`),
|
||||
|
||||
Reference in New Issue
Block a user