Merge branch 'main' into JCOR11599-b8bd869a

This commit is contained in:
James Cor
2023-05-23 12:26:46 -07:00
committed by GitHub
18 changed files with 1013 additions and 241 deletions
+53 -46
View File
@@ -105,6 +105,7 @@ const (
UserFlag = "user"
DefaultUser = "root"
DefaultHost = "localhost"
UseDbFlag = "use-db"
welcomeMsg = `# Welcome to the DoltSQL shell.
# Statements must be terminated with ';'.
@@ -233,7 +234,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
fi, err := os.Stdin.Stat()
if err != nil {
if !osutil.IsWindows {
return HandleVErrAndExitCode(errhand.BuildDError("Couldn't stat STDIN. This is a bug.").Build(), usage)
return sqlHandleVErrAndExitCode(queryist, errhand.BuildDError("Couldn't stat STDIN. This is a bug.").Build(), usage)
}
} else {
isTty = fi.Mode()&os.ModeCharDevice != 0
@@ -246,11 +247,11 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
isTty = false
input, err = os.OpenFile(fileInput, os.O_RDONLY, os.ModePerm)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("couldn't open file %s", fileInput).Build(), usage)
return sqlHandleVErrAndExitCode(queryist, errhand.BuildDError("couldn't open file %s", fileInput).Build(), usage)
}
info, err := os.Stat(fileInput)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("couldn't get file size %s", fileInput).Build(), usage)
return sqlHandleVErrAndExitCode(queryist, errhand.BuildDError("couldn't get file size %s", fileInput).Build(), usage)
}
// initialize fileReadProg global variable if there is a file to process queries from
@@ -261,23 +262,23 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
if isTty {
err := execShell(sqlCtx, queryist, format)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
return sqlHandleVErrAndExitCode(queryist, errhand.VerboseErrorFromError(err), usage)
}
} else if runInBatchMode {
se, ok := queryist.(*engine.SqlEngine)
if !ok {
misuse := fmt.Errorf("Using batch with non-local access pattern. Stop server if it is running")
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(misuse), usage)
return sqlHandleVErrAndExitCode(queryist, errhand.VerboseErrorFromError(misuse), usage)
}
verr := execBatch(sqlCtx, se, input, continueOnError, format)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
return sqlHandleVErrAndExitCode(queryist, verr, usage)
}
} else {
err := execMultiStatements(sqlCtx, queryist, input, continueOnError, format)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
return sqlHandleVErrAndExitCode(queryist, errhand.VerboseErrorFromError(err), usage)
}
}
}
@@ -285,6 +286,35 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
return 0
}
// sqlHandleVErrAndExitCode is a helper function to print errors to the user. Currently, the Queryist interface is used to
// determine if this is a local or remote execution. This is hacky, and too simplistic. We should possibly add an error
// messaging interface to the CliContext.
func sqlHandleVErrAndExitCode(queryist cli.Queryist, verr errhand.VerboseError, usage cli.UsagePrinter) int {
if verr != nil {
if msg := verr.Verbose(); strings.TrimSpace(msg) != "" {
if _, ok := queryist.(*engine.SqlEngine); !ok {
// We are in a context where we are attempting to connect to a remote database. These errors
// are unstructured, so we add some additional context around them.
tmpMsg := `You've encountered a new behavior in dolt which is not fully documented yet.
A local dolt server is using your dolt data directory, and in an attempt to service your request, we are attempting to
connect to it. That has failed. You should stop the server, or reach out to @macneale on https://discord.gg/gqr7K4VNKe
for help.`
cli.PrintErrln(tmpMsg)
msg = fmt.Sprintf("A local server is running, and dolt is failing to connect. Error connecting to remote database: \"%s\".\n", msg)
}
cli.PrintErrln(msg)
}
if verr.ShouldPrintUsage() {
usage()
}
return 1
}
return 0
}
// handleLegacyArguments is a temporary function to parse args, and print a error and explanation when the old form is provided.
func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr string, args []string) (*argparser.ArgParseResults, error) {
@@ -326,19 +356,19 @@ func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr stri
func listSavedQueries(ctx *sql.Context, qryist cli.Queryist, dEnv *env.DoltEnv, format engine.PrintResultFormat, usage cli.UsagePrinter) int {
if !dEnv.Valid() {
return HandleVErrAndExitCode(errhand.BuildDError("error: --%s must be used in a dolt database directory.", listSavedFlag).Build(), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: --%s must be used in a dolt database directory.", listSavedFlag).Build(), usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
hasQC, err := workingRoot.HasTable(ctx, doltdb.DoltQueryCatalogTableName)
if err != nil {
verr := errhand.BuildDError("error: Failed to read from repository.").AddCause(err).Build()
return HandleVErrAndExitCode(verr, usage)
return sqlHandleVErrAndExitCode(qryist, verr, usage)
}
if !hasQC {
@@ -346,27 +376,27 @@ func listSavedQueries(ctx *sql.Context, qryist cli.Queryist, dEnv *env.DoltEnv,
}
query := "SELECT * FROM " + doltdb.DoltQueryCatalogTableName
return HandleVErrAndExitCode(execQuery(ctx, qryist, query, format), usage)
return sqlHandleVErrAndExitCode(qryist, execQuery(ctx, qryist, query, format), usage)
}
func executeSavedQuery(ctx *sql.Context, qryist cli.Queryist, dEnv *env.DoltEnv, savedQueryName string, format engine.PrintResultFormat, usage cli.UsagePrinter) int {
if !dEnv.Valid() {
return HandleVErrAndExitCode(errhand.BuildDError("error: --%s must be used in a dolt database directory.", executeFlag).Build(), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: --%s must be used in a dolt database directory.", executeFlag).Build(), usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
sq, err := dtables.RetrieveFromQueryCatalog(ctx, workingRoot, savedQueryName)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
cli.PrintErrf("Executing saved query '%s':\n%s\n", savedQueryName, sq.Query)
return HandleVErrAndExitCode(execQuery(ctx, qryist, sq.Query, format), usage)
return sqlHandleVErrAndExitCode(qryist, execQuery(ctx, qryist, sq.Query, format), usage)
}
func queryMode(
@@ -388,19 +418,19 @@ func queryMode(
se, ok := qryist.(*engine.SqlEngine)
if !ok {
misuse := fmt.Errorf("Using batch with non-local access pattern. Stop server if it is running")
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(misuse), usage)
return sqlHandleVErrAndExitCode(se, errhand.VerboseErrorFromError(misuse), usage)
}
batchInput := strings.NewReader(query)
verr := execBatch(ctx, se, batchInput, continueOnError, format)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
return sqlHandleVErrAndExitCode(qryist, verr, usage)
}
} else {
input := strings.NewReader(query)
err := execMultiStatements(ctx, qryist, input, continueOnError, format)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.VerboseErrorFromError(err), usage)
}
}
@@ -409,58 +439,35 @@ func queryMode(
func execSaveQuery(ctx *sql.Context, dEnv *env.DoltEnv, qryist cli.Queryist, apr *argparser.ArgParseResults, query string, format engine.PrintResultFormat, usage cli.UsagePrinter) int {
if !dEnv.Valid() {
return HandleVErrAndExitCode(errhand.BuildDError("error: --%s must be used in a dolt database directory.", saveFlag).Build(), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: --%s must be used in a dolt database directory.", saveFlag).Build(), usage)
}
saveName := apr.GetValueOrDefault(saveFlag, "")
verr := execQuery(ctx, qryist, query, format)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
return sqlHandleVErrAndExitCode(qryist, verr, usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("error: failed to get working root").AddCause(err).Build(), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: failed to get working root").AddCause(err).Build(), usage)
}
saveMessage := apr.GetValueOrDefault(messageFlag, "")
newRoot, verr := saveQuery(ctx, workingRoot, query, saveName, saveMessage)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
return sqlHandleVErrAndExitCode(qryist, verr, usage)
}
err = dEnv.UpdateWorkingRoot(ctx, newRoot)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("error: failed to update working root").AddCause(err).Build(), usage)
return sqlHandleVErrAndExitCode(qryist, errhand.BuildDError("error: failed to update working root").AddCause(err).Build(), usage)
}
return 0
}
// getMultiRepoEnv returns an appropriate MultiRepoEnv for this invocation of the command
func getMultiRepoEnv(ctx context.Context, workingDir string, dEnv *env.DoltEnv) (mrEnv *env.MultiRepoEnv, resolvedDir string, verr errhand.VerboseError) {
var err error
fs := dEnv.FS
if len(workingDir) > 0 {
fs, err = fs.WithWorkingDir(workingDir)
}
if err != nil {
return nil, "", errhand.VerboseErrorFromError(err)
}
resolvedDir, err = fs.Abs("")
if err != nil {
return nil, "", errhand.VerboseErrorFromError(err)
}
mrEnv, err = env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), fs, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
return nil, "", errhand.VerboseErrorFromError(err)
}
return mrEnv, resolvedDir, nil
}
func execBatch(
sqlCtx *sql.Context,
se *engine.SqlEngine,
+59 -9
View File
@@ -38,13 +38,13 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
)
const (
sqlClientDualFlag = "dual"
SqlClientQueryFlag = "query"
SqlClientUseDbFlag = "use-db"
sqlClientResultFormat = "result-format"
)
@@ -85,7 +85,7 @@ func (cmd SqlClientCmd) ArgParser() *argparser.ArgParser {
ap := SqlServerCmd{}.ArgParserWithName(cmd.Name())
ap.SupportsFlag(sqlClientDualFlag, "d", "Causes this command to spawn a dolt server that is automatically connected to.")
ap.SupportsString(SqlClientQueryFlag, "q", "string", "Sends the given query to the server and immediately exits.")
ap.SupportsString(SqlClientUseDbFlag, "", "db_name", fmt.Sprintf("Selects the given database before executing a query. "+
ap.SupportsString(commands.UseDbFlag, "", "db_name", fmt.Sprintf("Selects the given database before executing a query. "+
"By default, uses the current folder's name. Must be used with the --%s flag.", SqlClientQueryFlag))
ap.SupportsString(sqlClientResultFormat, "", "format", fmt.Sprintf("Returns the results in the given format. Must be used with the --%s flag.", SqlClientQueryFlag))
return ap
@@ -127,8 +127,8 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, SqlClientQueryFlag)))
return 1
}
if apr.Contains(SqlClientUseDbFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, SqlClientUseDbFlag)))
if apr.Contains(commands.UseDbFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, commands.UseDbFlag)))
return 1
}
if apr.Contains(sqlClientResultFormat) {
@@ -136,7 +136,7 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
return 1
}
serverConfig, err = GetServerConfig(dEnv, apr)
serverConfig, err = GetServerConfig(dEnv.FS, apr)
if err != nil {
cli.PrintErrln(color.RedString("Bad Configuration"))
cli.PrintErrln(err.Error())
@@ -159,7 +159,7 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
return 1
}
} else {
serverConfig, err = GetServerConfig(dEnv, apr)
serverConfig, err = GetServerConfig(dEnv.FS, apr)
if err != nil {
cli.PrintErrln(color.RedString("Bad Configuration"))
cli.PrintErrln(err.Error())
@@ -168,13 +168,13 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
}
query, hasQuery := apr.GetValue(SqlClientQueryFlag)
dbToUse, hasUseDb := apr.GetValue(SqlClientUseDbFlag)
dbToUse, hasUseDb := apr.GetValue(commands.UseDbFlag)
resultFormat, hasResultFormat := apr.GetValue(sqlClientResultFormat)
if !hasQuery && hasUseDb {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", SqlClientUseDbFlag, SqlClientQueryFlag)))
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", commands.UseDbFlag, SqlClientQueryFlag)))
return 1
} else if !hasQuery && hasResultFormat {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", SqlClientUseDbFlag, sqlClientResultFormat)))
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", commands.UseDbFlag, sqlClientResultFormat)))
return 1
}
if !hasUseDb && hasQuery {
@@ -453,3 +453,53 @@ func secondsSince(start time.Time, end time.Time) float64 {
timeDisplay := float64(seconds) + float64(milliRemainder)*.001
return timeDisplay
}
// ConnectionQueryist executes queries by connecting to a running mySql server.
type ConnectionQueryist struct {
connection *dbr.Connection
}
var _ cli.Queryist = ConnectionQueryist{}
func (c ConnectionQueryist) Query(ctx *sql.Context, query string) (sql.Schema, sql.RowIter, error) {
rows, err := c.connection.QueryContext(ctx, query)
if err != nil {
return nil, nil, err
}
rowIter, err := NewMysqlRowWrapper(rows)
if err != nil {
return nil, nil, err
}
return rowIter.Schema(), rowIter, nil
}
// BuildConnectionStringQueryist returns a Queryist that connects to the server specified by the given server config. Presence in this
// module isn't ideal, but it's the only way to get the server config into the queryist.
func BuildConnectionStringQueryist(ctx context.Context, cwdFS filesys.Filesys, apr *argparser.ArgParseResults, port int, database string) (cli.LateBindQueryist, error) {
serverConfig, err := GetServerConfig(cwdFS, apr)
if err != nil {
return nil, err
}
parsedMySQLConfig, err := mysqlDriver.ParseDSN(ConnectionString(serverConfig, database))
if err != nil {
return nil, err
}
parsedMySQLConfig.Addr = fmt.Sprintf("localhost:%d", port)
mysqlConnector, err := mysqlDriver.NewConnector(parsedMySQLConfig)
if err != nil {
return nil, err
}
conn := &dbr.Connection{DB: mysql.OpenDB(mysqlConnector), EventReceiver: nil, Dialect: dialect.MySQL}
queryist := ConnectionQueryist{connection: conn}
var lateBind cli.LateBindQueryist = func(ctx context.Context) (cli.Queryist, *sql.Context, func(), error) {
return queryist, sql.NewContext(ctx), func() { conn.Conn(ctx) }, nil
}
return lateBind, nil
}
+5 -5
View File
@@ -207,7 +207,7 @@ func startServer(ctx context.Context, versionStr, commandStr string, args []stri
if err := validateSqlServerArgs(apr); err != nil {
return 1
}
serverConfig, err := GetServerConfig(dEnv, apr)
serverConfig, err := GetServerConfig(dEnv.FS, apr)
if err != nil {
if serverController != nil {
serverController.StopServer()
@@ -246,16 +246,16 @@ func startServer(ctx context.Context, versionStr, commandStr string, args []stri
// GetServerConfig returns ServerConfig that is set either from yaml file if given, if not it is set with values defined
// on command line. Server config variables not defined are set to default values.
func GetServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (ServerConfig, error) {
func GetServerConfig(cwdFS filesys.Filesys, apr *argparser.ArgParseResults) (ServerConfig, error) {
var yamlCfg YAMLConfig
if cfgFile, ok := apr.GetValue(configFileFlag); ok {
cfg, err := getYAMLServerConfig(dEnv.FS, cfgFile)
cfg, err := getYAMLServerConfig(cwdFS, cfgFile)
if err != nil {
return nil, err
}
yamlCfg = cfg.(YAMLConfig)
} else {
return getCommandLineServerConfig(dEnv, apr)
return getCommandLineServerConfig(apr)
}
// if command line user argument was given, replace yaml's user and password
@@ -350,7 +350,7 @@ func SetupDoltConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults, config S
// getCommandLineServerConfig sets server config variables and persisted global variables with values defined on command line.
// If not defined, it sets variables to default values.
func getCommandLineServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (ServerConfig, error) {
func getCommandLineServerConfig(apr *argparser.ArgParseResults) (ServerConfig, error) {
serverConfig := DefaultServerConfig()
if sock, ok := apr.GetValue(socketFlag); ok {
+53 -34
View File
@@ -28,6 +28,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
)
var fwtStageName = "fwt"
@@ -78,17 +79,22 @@ func MaybeGetCommitWithVErr(dEnv *env.DoltEnv, maybeCommit string) (*doltdb.Comm
// NewArgFreeCliContext creates a new CliContext instance with no arguments using a local SqlEngine. This is useful for testing primarily
func NewArgFreeCliContext(ctx context.Context, dEnv *env.DoltEnv) (cli.CliContext, errhand.VerboseError) {
lateBind, err := BuildSqlEngineQueryist(ctx, dEnv, argparser.NewEmptyResults())
mrEnv, err := env.MultiEnvForSingleEnv(ctx, dEnv)
if err != nil {
return nil, err
return nil, errhand.VerboseErrorFromError(err)
}
lateBind, verr := BuildSqlEngineQueryist(ctx, dEnv.FS, mrEnv, argparser.NewEmptyResults())
if err != nil {
return nil, verr
}
return cli.NewCliContext(argparser.NewEmptyResults(), lateBind)
}
// BuildSqlEngineQueryist Utility function to build a local SQLEngine for use interacting with data on disk using
// SQL queries. ctx and dEnv must be non-nil. apr can be nil.
func BuildSqlEngineQueryist(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (cli.LateBindQueryist, errhand.VerboseError) {
if ctx == nil || dEnv == nil || apr == nil {
// SQL queries. ctx, cwdFS, mrEnv, and apr must all be non-nil.
func BuildSqlEngineQueryist(ctx context.Context, cwdFS filesys.Filesys, mrEnv *env.MultiRepoEnv, apr *argparser.ArgParseResults) (cli.LateBindQueryist, errhand.VerboseError) {
if ctx == nil || cwdFS == nil || mrEnv == nil || apr == nil {
errhand.VerboseErrorFromError(fmt.Errorf("Invariant violated. Nil argument provided to BuildSqlEngineQueryist"))
}
@@ -98,44 +104,42 @@ func BuildSqlEngineQueryist(ctx context.Context, dEnv *env.DoltEnv, apr *argpars
username = user
}
// data-dir args come either from the global args or the subcommand args. We need to check both.
var dataDir string
dataDirGiven := false
if dataDirPath, ok := apr.GetValue(DataDirFlag); ok {
dataDir = dataDirPath
dataDirGiven = true
}
mrEnv, dataDir, verr := getMultiRepoEnv(ctx, dataDir, dEnv)
if verr != nil {
return nil, verr
// We want to know if the user provided us the data-dir flag, but we want to use the abs value used to
// create the DoltEnv. This is a little messy.
dataDir, dataDirGiven := apr.GetValue(DataDirFlag)
dataDir, err := cwdFS.Abs(dataDir)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
// need to return cfgdirpath and error
var cfgDirPath string
cfgDir, cfgDirSpecified := apr.GetValue(CfgDirFlag)
if cfgDirSpecified {
cfgDirPath = cfgDir
cfgDirPath, err = cwdFS.Abs(cfgDir)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
} else if dataDirGiven {
cfgDirPath = filepath.Join(dataDir, DefaultCfgDirName)
} else {
// Look in parent directory for doltcfg
// Look in CWD parent directory for doltcfg
parentDirCfg := filepath.Join("..", DefaultCfgDirName)
parentExists, isDir := dEnv.FS.Exists(parentDirCfg)
parentExists, isDir := cwdFS.Exists(parentDirCfg)
parentDirExists := parentExists && isDir
// Look in data directory (which is necessarily current directory) for doltcfg
currDirCfg := filepath.Join(dataDir, DefaultCfgDirName)
currExists, isDir := dEnv.FS.Exists(currDirCfg)
currDirExists := currExists && isDir
// Look in data directory for doltcfg
dataDirCfg := filepath.Join(dataDir, DefaultCfgDirName)
dataDirCfgExists, isDir := cwdFS.Exists(dataDirCfg)
currDirExists := dataDirCfgExists && isDir
// Error if both current and parent exist
// Error if both CWD/../.doltfcfg and dataDir/.doltcfg exist because it's unclear which to use.
if currDirExists && parentDirExists {
p1, err := dEnv.FS.Abs(cfgDirPath)
p1, err := cwdFS.Abs(cfgDirPath)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
p2, err := dEnv.FS.Abs(parentDirCfg)
p2, err := cwdFS.Abs(parentDirCfg)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
@@ -146,15 +150,19 @@ func BuildSqlEngineQueryist(ctx context.Context, dEnv *env.DoltEnv, apr *argpars
if parentDirExists {
cfgDirPath = parentDirCfg
} else {
cfgDirPath = currDirCfg
cfgDirPath = dataDirCfg
}
}
var err error
// If no privilege filepath specified, default to doltcfg directory
privsFp, hasPrivsFp := apr.GetValue(PrivsFilePathFlag)
if !hasPrivsFp {
privsFp, err = dEnv.FS.Abs(filepath.Join(cfgDirPath, DefaultPrivsName))
privsFp, err = cwdFS.Abs(filepath.Join(cfgDirPath, DefaultPrivsName))
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
} else {
privsFp, err = cwdFS.Abs(privsFp)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
@@ -163,13 +171,25 @@ func BuildSqlEngineQueryist(ctx context.Context, dEnv *env.DoltEnv, apr *argpars
// If no branch control file path is specified, default to doltcfg directory
branchControlFilePath, hasBCFilePath := apr.GetValue(BranchCtrlPathFlag)
if !hasBCFilePath {
branchControlFilePath, err = dEnv.FS.Abs(filepath.Join(cfgDirPath, DefaultBranchCtrlName))
branchControlFilePath, err = cwdFS.Abs(filepath.Join(cfgDirPath, DefaultBranchCtrlName))
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
} else {
branchControlFilePath, err = cwdFS.Abs(branchControlFilePath)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
}
binder, err := newLateBindingEngine(ctx, apr, cfgDirPath, privsFp, branchControlFilePath, username, mrEnv)
// Whether we're running in shell mode or some other mode, sql commands from the command line always have a current
// database set when you begin using them.
database, hasDB := apr.GetValue(UseDbFlag)
if !hasDB {
database = mrEnv.GetFirstDatabase()
}
binder, err := newLateBindingEngine(cfgDirPath, privsFp, branchControlFilePath, username, database, mrEnv)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
@@ -178,12 +198,11 @@ func BuildSqlEngineQueryist(ctx context.Context, dEnv *env.DoltEnv, apr *argpars
}
func newLateBindingEngine(
ctx context.Context,
apr *argparser.ArgParseResults,
cfgDirPath string,
privsFp string,
branchControlFilePath string,
username string,
database string,
mrEnv *env.MultiRepoEnv,
) (cli.LateBindQueryist, error) {
@@ -212,7 +231,7 @@ func newLateBindingEngine(
// Whether we're running in shell mode or some other mode, sql commands from the command line always have a current
// database set when you begin using them.
sqlCtx.SetCurrentDatabase(mrEnv.GetFirstDatabase())
sqlCtx.SetCurrentDatabase(database)
// Add specified user as new superuser, if it doesn't already exist
if user := se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.GetUser(config.ServerUser, config.ServerHost, false); user == nil {
+88 -28
View File
@@ -62,7 +62,7 @@ import (
)
const (
Version = "1.1.0"
Version = "1.1.1"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
@@ -140,6 +140,7 @@ const stdOutFlag = "--stdout"
const stdErrFlag = "--stderr"
const stdOutAndErrFlag = "--out-and-err"
const ignoreLocksFlag = "--ignore-lock-file"
const verboseEngineSetupFlag = "--verbose-engine-setup"
const cpuProf = "cpu"
const memProf = "mem"
@@ -168,6 +169,7 @@ func runMain() int {
csMetrics := false
ignoreLockFile := false
verboseEngineSetup := false
if len(args) > 0 {
var doneDebugFlags bool
for !doneDebugFlags && len(args) > 0 {
@@ -324,6 +326,9 @@ func runMain() int {
args = args[2:]
case verboseEngineSetupFlag:
verboseEngineSetup = true
args = args[1:]
default:
doneDebugFlags = true
}
@@ -342,7 +347,42 @@ func runMain() int {
return exit
}
dEnv := env.Load(ctx, env.GetCurrentUserHomeDir, filesys.LocalFS, doltdb.LocalDirDoltDB, Version)
globalArgs, args, initCliContext, printUsage, err := splitArgsOnSubCommand(args)
_, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString("dolt", doc, globalArgParser))
if printUsage {
doltCommand.PrintUsage("dolt")
specialMsg := `
Dolt subcommands are in transition to using the flags listed below as global flags.
The sql subcommand is currently the only command that uses these flags. All other commands will ignore them.
`
cli.Println(specialMsg)
usage()
return 0
}
if err != nil {
cli.PrintErrln(color.RedString("Failure to parse arguments: %v", err))
return 1
}
apr := cli.ParseArgsOrDie(globalArgParser, globalArgs, usage)
var fs filesys.Filesys
fs = filesys.LocalFS
dataDir, hasDataDir := apr.GetValue(commands.DataDirFlag)
if hasDataDir {
// If a relative path was provided, this ensures we have an absolute path everywhere.
dataDir, err = fs.Abs(dataDir)
if err != nil {
cli.PrintErrln(color.RedString("Failed to get absolute path for %s: %v", dataDir, err))
return 1
}
if ok, dir := fs.Exists(dataDir); !ok || !dir {
cli.Println(color.RedString("Provided data directory does not exist: %s", dataDir))
return 1
}
}
dEnv := env.Load(ctx, env.GetCurrentUserHomeDir, fs, doltdb.LocalDirDoltDB, Version)
dEnv.IgnoreLockFile = ignoreLockFile
root, err := env.GetCurrentUserHomeDir()
@@ -409,7 +449,12 @@ func runMain() int {
// variables like `${db_name}_default_branch` (maybe these should not be
// part of Dolt config in the first place!).
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), dEnv.FS, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
dataDirFS, err := dEnv.FS.WithWorkingDir(dataDir)
if err != nil {
cli.PrintErrln(color.RedString("Failed to set the data directory. %v", err))
return 1
}
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), dataDirFS, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
cli.PrintErrln("failed to load database names")
return 1
@@ -424,31 +469,9 @@ func runMain() int {
cli.Printf("error: failed to load persisted global variables: %s\n", err.Error())
}
globalArgs, args, initCliContext, printUsage, err := splitArgsOnSubCommand(args)
if printUsage {
doltCommand.PrintUsage("dolt")
_, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString("dolt", doc, globalArgParser))
specialMsg := `
Dolt subcommands are in transition to using the flags listed below as global flags.
The sql subcommand is currently the only command that uses these flags. All other commands will ignore them.
`
cli.Println(specialMsg)
usage()
return 0
}
if err != nil {
cli.PrintErrln(color.RedString("Failure to parse arguments: %v", err))
return 1
}
var cliCtx cli.CliContext = nil
if initCliContext {
_, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString("dolt", doc, globalArgParser))
apr := cli.ParseArgsOrDie(globalArgParser, globalArgs, usage)
lateBind, err := commands.BuildSqlEngineQueryist(ctx, dEnv, apr)
lateBind, err := buildLateBinder(ctx, dEnv.FS, mrEnv, apr, verboseEngineSetup)
if err != nil {
cli.PrintErrln(color.RedString("Failure to Load SQL Engine: %v", err))
return 1
@@ -459,7 +482,6 @@ The sql subcommand is currently the only command that uses these flags. All othe
cli.PrintErrln(color.RedString("Unexpected Error: %v", err))
return 1
}
}
ctx, stop := context.WithCancel(ctx)
@@ -482,6 +504,44 @@ The sql subcommand is currently the only command that uses these flags. All othe
return res
}
func buildLateBinder(ctx context.Context, cwdFS filesys.Filesys, mrEnv *env.MultiRepoEnv, apr *argparser.ArgParseResults, verbose bool) (cli.LateBindQueryist, error) {
var targetEnv *env.DoltEnv = nil
useDb, hasUseDb := apr.GetValue(commands.UseDbFlag)
if hasUseDb {
targetEnv = mrEnv.GetEnv(useDb)
if targetEnv == nil {
return nil, fmt.Errorf("The provided --use-db %s does not exist or is not a directory.", useDb)
}
} else {
useDb = mrEnv.GetFirstDatabase()
}
if targetEnv == nil && useDb != "" {
targetEnv = mrEnv.GetEnv(useDb)
}
// nil targetEnv will happen if the user ran a command in an empty directory - which we support in some cases.
if targetEnv != nil {
isLocked, lock, err := targetEnv.GetLock()
if err != nil {
return nil, err
}
if isLocked {
if verbose {
cli.Println("verbose: starting remote mode")
}
return sqlserver.BuildConnectionStringQueryist(ctx, cwdFS, apr, lock.Port, useDb)
}
}
if verbose {
cli.Println("verbose: starting local mode")
}
return commands.BuildSqlEngineQueryist(ctx, cwdFS, mrEnv, apr)
}
// splitArgsOnSubCommand splits the args into two slices, the first containing all args before the first subcommand,
// and the second containing all args after the first subcommand. The second slice will start with the subcommand name.
func splitArgsOnSubCommand(args []string) (globalArgs, subArgs []string, initCliContext, printUsages bool, err error) {
@@ -568,12 +628,12 @@ func interceptSendMetrics(ctx context.Context, args []string) (bool, int) {
func buildGlobalArgs() *argparser.ArgParser {
ap := argparser.NewArgParserWithVariableArgs("dolt")
// Pulling this argument forward first to pave the way. Others will follow.
ap.SupportsString(commands.UserFlag, "u", "user", fmt.Sprintf("Defines the local superuser (defaults to `%v`). If the specified user exists, will take on permissions of that user.", commands.DefaultUser))
ap.SupportsString(commands.DataDirFlag, "", "directory", "Defines a directory whose subdirectories should all be dolt data repositories accessible as independent databases within. Defaults to the current directory.")
ap.SupportsString(commands.CfgDirFlag, "", "directory", "Defines a directory that contains configuration files for dolt. Defaults to `$data-dir/.doltcfg`. Will only be created if there is a change to configuration settings.")
ap.SupportsString(commands.PrivsFilePathFlag, "", "privilege file", "Path to a file to load and store users and grants. Defaults to `$doltcfg-dir/privileges.db`. Will only be created if there is a change to privileges.")
ap.SupportsString(commands.BranchCtrlPathFlag, "", "branch control file", "Path to a file to load and store branch control permissions. Defaults to `$doltcfg-dir/branch_control.db`. Will only be created if there is a change to branch control permissions.")
ap.SupportsString(commands.UseDbFlag, "", "database", "The name of the database to use when executing SQL queries. Defaults the database of the root directory, if it exists, and the first alphabetically if not.")
return ap
}
+20 -6
View File
@@ -1149,7 +1149,18 @@ func (dEnv *DoltEnv) IsLocked() bool {
if dEnv.IgnoreLockFile {
return false
}
return FsIsLocked(dEnv.FS)
ans, _, _ := fsIsLocked(dEnv.FS)
return ans
}
// GetLock returns the lockfile for this database or nil if the database is not locked
func (dEnv *DoltEnv) GetLock() (bool, *DBLock, error) {
if dEnv.IgnoreLockFile {
return false, nil, nil
}
return fsIsLocked(dEnv.FS)
}
// DBLock is a struct that contains the pid of the process that created the lockfile and the port that the server is running on
@@ -1259,24 +1270,27 @@ func WriteLockfile(fs filesys.Filesys, lock DBLock) error {
// FsIsLocked returns true if a lockFile exists with the same pid as
// any live process.
func FsIsLocked(fs filesys.Filesys) bool {
func fsIsLocked(fs filesys.Filesys) (bool, *DBLock, error) {
lockFile, _ := fs.Abs(filepath.Join(dbfactory.DoltDir, ServerLockFile))
ok, _ := fs.Exists(lockFile)
if !ok {
return false
return false, nil, nil
}
loadedLock, err := LoadDBLockFile(fs, lockFile)
if err != nil { // if there's any error assume that env is locked since the file exists
return true
return true, nil, err
}
// Check whether the pid that spawned the lock file is still running. Ignore it if not.
p, err := ps.FindProcess(loadedLock.Pid)
if err != nil {
return false
return false, nil, nil
}
return p != nil
if p != nil {
return true, loadedLock, nil
}
return false, nil, nil
}
+38 -23
View File
@@ -19,6 +19,7 @@ import (
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"unicode"
@@ -58,52 +59,61 @@ type MultiRepoEnv struct {
ignoreLockFile bool
}
// NewMultiEnv returns a new MultiRepoEnv instance dirived from a root DoltEnv instance.
func MultiEnvForSingleEnv(ctx context.Context, env *DoltEnv) (*MultiRepoEnv, error) {
return MultiEnvForDirectory(ctx, env.Config.WriteableConfig(), env.FS, env.Version, env.IgnoreLockFile, env)
}
// MultiEnvForDirectory returns a MultiRepoEnv for the directory rooted at the file system given. The doltEnv from the
// invoking context is included. If it's non-nil and valid, it will be included in the returned MultiRepoEnv, and will
// be the first database in all iterations.
func MultiEnvForDirectory(
ctx context.Context,
config config.ReadWriteConfig,
fs filesys.Filesys,
dataDirFS filesys.Filesys,
version string,
ignoreLockFile bool,
dEnv *DoltEnv,
) (*MultiRepoEnv, error) {
mrEnv := &MultiRepoEnv{
envs: make([]NamedEnv, 0),
fs: fs,
cfg: config,
dialProvider: NewGRPCDialProviderFromDoltEnv(dEnv),
ignoreLockFile: ignoreLockFile,
}
// Load current dataDirFS and put into mr env
var dbName string = "dolt"
var newDEnv *DoltEnv = dEnv
// Load current fs and put into mr env
var dbName string
if _, ok := fs.(*filesys.InMemFS); ok {
dbName = "dolt"
} else {
path, err := fs.Abs("")
// InMemFS is used only for testing.
// All other FS Types should get a newly created Environment which will serve as the primary env in the MultiRepoEnv
if _, ok := dataDirFS.(*filesys.InMemFS); !ok {
path, err := dataDirFS.Abs("")
if err != nil {
return nil, err
}
envName := getRepoRootDir(path, string(os.PathSeparator))
dbName = dirToDBName(envName)
newDEnv = Load(ctx, GetCurrentUserHomeDir, dataDirFS, doltdb.LocalDirDoltDB, version)
}
mrEnv := &MultiRepoEnv{
envs: make([]NamedEnv, 0),
fs: dataDirFS,
cfg: config,
dialProvider: NewGRPCDialProviderFromDoltEnv(newDEnv),
ignoreLockFile: ignoreLockFile,
}
envSet := map[string]*DoltEnv{}
if dEnv.Valid() {
envSet[dbName] = dEnv
if newDEnv.Valid() {
envSet[dbName] = newDEnv
}
// If there are other directories in the directory, try to load them as additional databases
fs.Iter(".", false, func(path string, size int64, isDir bool) (stop bool) {
dataDirFS.Iter(".", false, func(path string, size int64, isDir bool) (stop bool) {
if !isDir {
return false
}
dir := filepath.Base(path)
newFs, err := fs.WithWorkingDir(dir)
newFs, err := dataDirFS.WithWorkingDir(dir)
if err != nil {
return false
}
@@ -124,14 +134,19 @@ func MultiEnvForDirectory(
enforceSingleFormat(envSet)
// if the current directory database is in our set, add it first so it will be the current database
var ok bool
if dEnv, ok = envSet[dbName]; ok && dEnv.Valid() {
mrEnv.addEnv(dbName, dEnv)
if env, ok := envSet[dbName]; ok && env.Valid() {
mrEnv.addEnv(dbName, env)
delete(envSet, dbName)
}
for dbName, dEnv = range envSet {
mrEnv.addEnv(dbName, dEnv)
// get the keys from the envSet keys as a sorted list
sortedKeys := make([]string, 0, len(envSet))
for k := range envSet {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
for _, dbName := range sortedKeys {
mrEnv.addEnv(dbName, envSet[dbName])
}
return mrEnv, nil
@@ -22,14 +22,19 @@ import (
"fmt"
"io"
"github.com/dolthub/go-mysql-server/memory"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/analyzer"
"github.com/dolthub/go-mysql-server/sql/parse"
"github.com/dolthub/go-mysql-server/sql/planbuilder"
"github.com/dolthub/go-mysql-server/sql/transform"
errorkinds "gopkg.in/src-d/go-errors.v1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly"
@@ -156,6 +161,11 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
return nil, nil, err
}
checkValidator, err := newCheckValidator(ctx, tm, valueMerger, finalSch, artEditor)
if err != nil {
return nil, nil, err
}
// validator shares an artifact editor with conflict merge
uniq, err := newUniqValidator(ctx, finalSch, tm, valueMerger, artEditor)
if err != nil {
@@ -193,6 +203,12 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
continue
}
cnt, err = checkValidator.validateDiff(ctx, diff)
if err != nil {
return nil, nil, err
}
s.ConstraintViolations += cnt
switch diff.Op {
case tree.DiffOpDivergentModifyConflict, tree.DiffOpDivergentDeleteConflict:
// In this case, a modification or delete was made to one side, and a conflicting delete or modification
@@ -335,6 +351,175 @@ func threeWayDiffer(ctx context.Context, tm *TableMerger, valueMerger *valueMerg
return tree.NewThreeWayDiffer(ctx, leftRows.NodeStore(), leftRows.Tuples(), rightRows.Tuples(), ancRows.Tuples(), valueMerger.tryMerge, valueMerger.keyless, leftRows.Tuples().Order)
}
// checkValidator is responsible for inspecting three-way diff events, running any check constraint expressions
// that need to be reevaluated, and reporting any check constraint violations.
type checkValidator struct {
checkExpressions map[string]sql.Expression
valueMerger *valueMerger
tableMerger *TableMerger
sch schema.Schema
edits *prolly.ArtifactsEditor
srcHash hash.Hash
}
// newCheckValidator creates a new checkValidator, ready to validate diff events. |tm| provides the overall information
// about the table being merged, |vm| provides the details on how the value tuples are being merged between the ancestor,
// right and left sides of the merge, |sch| provides the final schema of the merge, and |edits| is used to write
// constraint validation artifacts.
func newCheckValidator(ctx *sql.Context, tm *TableMerger, vm *valueMerger, sch schema.Schema, edits *prolly.ArtifactsEditor) (checkValidator, error) {
checkExpressions := make(map[string]sql.Expression)
checks := sch.Checks()
for _, check := range checks.AllChecks() {
if !check.Enforced() {
continue
}
expr, err := resolveExpression(ctx, check.Expression(), sch, tm.name)
if err != nil {
return checkValidator{}, err
}
checkExpressions[check.Name()] = expr
}
srcHash, err := tm.rightSrc.HashOf()
if err != nil {
return checkValidator{}, err
}
return checkValidator{
checkExpressions: checkExpressions,
valueMerger: vm,
tableMerger: tm,
sch: sch,
edits: edits,
srcHash: srcHash,
}, nil
}
// validateDiff inspects the three-way diff event |diff| and evaluates any check constraint expressions that need to
// be rechecked after the merge. If any check constraint violations are detected, the violation count is returned as
// the first return parameter and the violations are also written to the artifact editor passed in on creation.
func (cv checkValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff) (int, error) {
conflictCount := 0
var valueTuple val.Tuple
var valueDesc val.TupleDesc
switch diff.Op {
case tree.DiffOpLeftDelete, tree.DiffOpRightDelete, tree.DiffOpConvergentDelete:
// no need to validate check constraints for deletes
return 0, nil
case tree.DiffOpDivergentDeleteConflict, tree.DiffOpDivergentModifyConflict:
// Don't bother validating divergent conflicts, just let them get reported as conflicts
return 0, nil
case tree.DiffOpLeftAdd, tree.DiffOpLeftModify:
valueTuple = diff.Left
valueDesc = cv.tableMerger.leftSch.GetValueDescriptor()
case tree.DiffOpRightAdd, tree.DiffOpRightModify:
valueTuple = diff.Right
valueDesc = cv.tableMerger.rightSch.GetValueDescriptor()
case tree.DiffOpConvergentAdd, tree.DiffOpConvergentModify:
// both sides made the same change, just take the left
valueTuple = diff.Left
valueDesc = cv.tableMerger.leftSch.GetValueDescriptor()
case tree.DiffOpDivergentModifyResolved:
valueTuple = diff.Merged
valueDesc = cv.tableMerger.leftSch.GetValueDescriptor()
}
for checkName, checkExpression := range cv.checkExpressions {
// If the row came from the right side of the merge, then remap it (if necessary) to the final schema.
// This isn't necessary for left-side changes, because we already migrated the primary index data to
// the merged schema, and we skip keyless tables, since their value tuples require different mapping
// logic and we don't currently support merges to keyless tables that contain schema changes anyway.
newTuple := valueTuple
if !cv.valueMerger.keyless && (diff.Op == tree.DiffOpRightAdd || diff.Op == tree.DiffOpRightModify) {
newTupleBytes := remapTuple(valueTuple, valueDesc, cv.valueMerger.rightMapping)
newTuple = val.NewTuple(cv.valueMerger.syncPool, newTupleBytes...)
}
row, err := cv.buildRow(ctx, diff.Key, newTuple)
if err != nil {
return 0, err
}
result, err := checkExpression.Eval(ctx, row)
if err != nil {
return 0, err
}
if result == nil || result == true {
// If a check constraint returns NULL or TRUE, then the check constraint is fulfilled
// https://dev.mysql.com/doc/refman/8.0/en/create-table-check-constraints.html
continue
} else if result == false {
conflictCount++
meta, err := newCheckCVMeta(cv.sch, checkName)
if err != nil {
return 0, err
}
if err = cv.insertArtifact(ctx, diff.Key, newTuple, meta); err != nil {
return conflictCount, err
}
} else {
return 0, fmt.Errorf("unexpected result from check constraint expression: %v", result)
}
}
return conflictCount, nil
}
// insertArtifact records a check constraint violation, as described by |meta|, for the row with the specified
// |key| and |value|.
func (cv checkValidator) insertArtifact(ctx context.Context, key, value val.Tuple, meta CheckCVMeta) error {
vinfo, err := json.Marshal(meta)
if err != nil {
return err
}
cvm := prolly.ConstraintViolationMeta{VInfo: vinfo, Value: value}
return cv.edits.ReplaceConstraintViolation(ctx, key, cv.srcHash, prolly.ArtifactTypeChkConsViol, cvm)
}
// buildRow takes the |key| and |value| tuple and returns a new sql.Row, along with any errors encountered.
func (cv checkValidator) buildRow(ctx *sql.Context, key, value val.Tuple) (sql.Row, error) {
// When we parse and resolve the check constraint expression with planbuilder, it leaves row position 0
// for the expression itself, so we add an empty spot in our row to account for that before we pass the
// row into the expression to evaluate it.
var row sql.Row
row = append(row, nil)
// Skip adding the key tuple if we're working with a keyless table, since the table row data is
// always all contained in the value tuple for keyless tables.
if !cv.valueMerger.keyless {
keyDesc := cv.sch.GetKeyDescriptor()
for i := range keyDesc.Types {
value, err := index.GetField(ctx, keyDesc, i, key, cv.tableMerger.ns)
if err != nil {
return nil, err
}
row = append(row, value)
}
}
valueDescriptor := cv.sch.GetValueDescriptor()
for i := range valueDescriptor.Types {
// Skip processing the first value in the value tuple for keyless tables, since that field
// always holds the cardinality of the row and shouldn't be passed in to an expression.
if cv.valueMerger.keyless && i == 0 {
continue
}
value, err := index.GetField(ctx, valueDescriptor, i, value, cv.tableMerger.ns)
if err != nil {
return nil, err
}
row = append(row, value)
}
return row, nil
}
// uniqValidator checks whether new additions from the merge-right
// duplicate secondary index entries.
type uniqValidator struct {
@@ -404,8 +589,7 @@ func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff
return
}
// Don't remap the value to the merged schema if the table is keyless (since they
// don't allow schema changes) or if the mapping is an identity mapping.
// Don't remap the value to the merged schema if the table is keyless or if the mapping is an identity mapping.
if !uv.valueMerger.keyless && !uv.valueMerger.rightMapping.IsIdentityMapping() {
modifiedValue := remapTuple(value, uv.tm.rightSch.GetValueDescriptor(), uv.valueMerger.rightMapping)
value = val.NewTuple(uv.valueMerger.syncPool, modifiedValue...)
@@ -856,6 +1040,41 @@ func (m *secondaryMerger) finalize(ctx context.Context) (durable.IndexSet, durab
return m.leftSet, m.rightSet, nil
}
// resolveExpression takes in a string |expression| and does basic resolution on it (e.g. column names and function
// names) so that the returned sql.Expression can be evaluated. The schema of the table is specified in |sch| and the
// name of the table in |tableName|.
func resolveExpression(ctx *sql.Context, expression string, sch schema.Schema, tableName string) (sql.Expression, error) {
query := fmt.Sprintf("SELECT %s from %s.%s", expression, "mydb", tableName)
sqlSch, err := sqlutil.FromDoltSchema(tableName, sch)
if err != nil {
return nil, err
}
mockTable := memory.NewTable(tableName, sqlSch, nil)
mockDatabase := memory.NewDatabase("mydb")
mockDatabase.AddTable(tableName, mockTable)
mockProvider := memory.NewDBProvider(mockDatabase)
catalog := analyzer.NewCatalog(mockProvider)
pseudoAnalyzedQuery, err := planbuilder.Parse(ctx, catalog, query)
if err != nil {
return nil, err
}
var expr sql.Expression
transform.Inspect(pseudoAnalyzedQuery, func(n sql.Node) bool {
if projector, ok := n.(sql.Projector); ok {
expr = projector.ProjectedExprs()[0]
return false
}
return true
})
if expr == nil {
return nil, fmt.Errorf("unable to find expression in analyzed query")
}
return expr, nil
}
// remapTuple takes the given |tuple| and the |desc| that describes its data, and uses |mapping| to map the tuple's
// data into a new [][]byte, as indicated by the specified ordinal mapping.
func remapTuple(tuple val.Tuple, desc val.TupleDesc, mapping val.OrdinalMapping) [][]byte {
@@ -387,6 +387,8 @@ type FkCVMeta struct {
Table string `json:"Table"`
}
var _ types.JSONValue = FkCVMeta{}
func (m FkCVMeta) Unmarshall(ctx *sql.Context) (val types.JSONDocument, err error) {
return types.JSONDocument{Val: m}, nil
}
@@ -104,25 +104,6 @@ func replaceUniqueKeyViolation(ctx context.Context, edt *prolly.ArtifactsEditor,
return nil
}
func replaceUniqueKeyViolationWithValue(ctx context.Context, edt *prolly.ArtifactsEditor, k, value val.Tuple, kd val.TupleDesc, theirRootIsh doltdb.Rootish, vInfo []byte, tblName string) error {
meta := prolly.ConstraintViolationMeta{
VInfo: vInfo,
Value: value,
}
theirsHash, err := theirRootIsh.HashOf()
if err != nil {
return err
}
err = edt.ReplaceConstraintViolation(ctx, k, theirsHash, prolly.ArtifactTypeUniqueKeyViol, meta)
if err != nil {
return err
}
return nil
}
func getPKFromSecondaryKey(pKB *val.TupleBuilder, pool pool.BuffPool, pkMapping val.OrdinalMapping, k val.Tuple) val.Tuple {
for to := range pkMapping {
from := pkMapping.MapOrdinal(to)
@@ -179,4 +160,52 @@ func (m NullViolationMeta) ToString(ctx *sql.Context) (string, error) {
return fmt.Sprintf("{Columns: [%s]}", strings.Join(m.Columns, ",")), nil
}
var _ types.JSONValue = FkCVMeta{}
// CheckCVMeta holds metadata describing a check constraint violation.
type CheckCVMeta struct {
Name string `json:"Name"`
Expression string `jason:"Expression"`
}
var _ types.JSONValue = CheckCVMeta{}
// newCheckCVMeta creates a new CheckCVMeta from a schema |sch| and a check constraint name |checkName|. If the
// check constraint is not found in the specified schema, an error is returned.
func newCheckCVMeta(sch schema.Schema, checkName string) (CheckCVMeta, error) {
found := false
var check schema.Check
for _, check = range sch.Checks().AllChecks() {
if check.Name() == checkName {
found = true
break
}
}
if !found {
return CheckCVMeta{}, fmt.Errorf("check constraint '%s' not found in schema", checkName)
}
return CheckCVMeta{
Name: check.Name(),
Expression: check.Expression(),
}, nil
}
// Unmarshall implements types.JSONValue
func (m CheckCVMeta) Unmarshall(_ *sql.Context) (val types.JSONDocument, err error) {
return types.JSONDocument{Val: m}, nil
}
// Compare implements types.JSONValue
func (m CheckCVMeta) Compare(ctx *sql.Context, v types.JSONValue) (cmp int, err error) {
ours := types.JSONDocument{Val: m}
return ours.Compare(ctx, v)
}
// ToString implements types.JSONValue
func (m CheckCVMeta) ToString(_ *sql.Context) (string, error) {
jsonStr := fmt.Sprintf(`{`+
`"Name": "%s", `+
`"Expression": "%s"}`,
m.Name,
m.Expression)
return jsonStr, nil
}
@@ -219,6 +219,13 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
return nil, err
}
r[o] = m
case prolly.ArtifactTypeChkConsViol:
var m merge.CheckCVMeta
err = json.Unmarshal(meta.VInfo, &m)
if err != nil {
return nil, err
}
r[o] = m
default:
panic("json not implemented for artifact type")
}
@@ -3770,7 +3770,6 @@ var DoltVerifyConstraintsTestScripts = []queries.ScriptTest{
var errTmplNoAutomaticMerge = "table %s can't be automatically merged.\nTo merge this table, make the schema on the source and target branch equal."
var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
// Data conflicts during a merge with schema changes
{
Name: "data conflict",
@@ -3791,7 +3790,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
Expected: []sql.Row{{0, 1}},
},
{
Query: "select * from dolt_conflicts;",
@@ -3986,7 +3985,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x0}},
Expected: []sql.Row{{0, 0}},
},
{
Query: "select * from t;",
@@ -4011,7 +4010,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x0}},
Expected: []sql.Row{{0, 0}},
},
{
Query: "select * from t;",
@@ -4119,7 +4118,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
// Constraint changes
// Constraints: Not Null
{
Name: "removing a not-null constraint",
AncSetUpScript: []string{
@@ -4151,6 +4150,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
// Constraints: Foreign Keys
{
Name: "adding a foreign key to one side, with fk constraint violation",
AncSetUpScript: []string{
@@ -4217,6 +4218,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
// Constraints: Unique
{
Name: "adding a unique key, with unique key violation",
AncSetUpScript: []string{
@@ -4264,7 +4267,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
Expected: []sql.Row{{0, 1}},
},
{
Query: "select * from dolt_conflicts;",
@@ -4318,6 +4321,210 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
// Constraints: Check Expressions
{
Name: "check constraint violation - simple case, no schema changes",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk int primary key, col1 int, col2 int, CHECK (col1 != col2));",
"INSERT into t values (1, 2, 3);",
"alter table t add index idx1 (pk, col2);",
},
RightSetUpScript: []string{
"update t set col2=4;",
},
LeftSetUpScript: []string{
"update t set col1=4;",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "select * from dolt_constraint_violations;",
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: "select violation_type, pk, col1, col2, violation_info like '\\%NOT((col1 = col2))\\%' from dolt_constraint_violations_t;",
Expected: []sql.Row{{uint64(3), 1, 4, 4, true}},
},
},
},
{
Name: "check constraint violation - schema change",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk int primary key, col1 int, col2 int, col3 int, CHECK (col2 != col3));",
"INSERT into t values (1, 2, 3, -3);",
"alter table t add index idx1 (pk, col2);",
},
RightSetUpScript: []string{
"update t set col2=100;",
},
LeftSetUpScript: []string{
"alter table t drop column col1;",
"update t set col3=100;",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "select * from dolt_constraint_violations;",
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: "select violation_type, pk, col2, col3, violation_info like '\\%NOT((col2 = col3))\\%' from dolt_constraint_violations_t;",
Expected: []sql.Row{{uint64(3), 1, 100, 100, true}},
},
},
},
{
Name: "check constraint violation - deleting rows",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk int primary key, col1 int, col2 int, col3 int, CHECK (col2 != col3));",
"INSERT into t values (1, 2, 3, -3);",
"alter table t add index idx1 (pk, col2);",
},
RightSetUpScript: []string{
"delete from t where pk=1;",
},
LeftSetUpScript: []string{
"insert into t values (4, 3, 2, 1);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0}},
},
},
},
{
Name: "check constraint violation - divergent edits",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk int primary key, col1 varchar(100) default ('hello'));",
"INSERT into t values (1, 'hi');",
"alter table t add index idx1 (col1);",
},
RightSetUpScript: []string{
"alter table t add constraint CHECK (col1 != concat('he', 'llo'))",
"update t set col1 = 'bye' where pk=1;",
},
LeftSetUpScript: []string{
"update t set col1 = 'adios' where pk=1;",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
},
},
{
Name: "check constraint violation - check is always NULL",
AncSetUpScript: []string{
"CREATE table t (pk int primary key, col1 varchar(100) default ('hello'));",
"INSERT into t values (1, 'hi');",
"alter table t add index idx1 (col1);",
},
RightSetUpScript: []string{
"alter table t add constraint CHECK (NULL = NULL)",
},
LeftSetUpScript: []string{
"insert into t values (2, DEFAULT);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0}},
},
},
},
{
Name: "check constraint violation - check is always false",
AncSetUpScript: []string{
"SET @@dolt_force_transaction_commit=1;",
"CREATE table t (pk int primary key, col1 varchar(100) default ('hello'));",
"alter table t add index idx1 (col1);",
},
RightSetUpScript: []string{
"alter table t add constraint CHECK (1 = 2)",
},
LeftSetUpScript: []string{
"insert into t values (1, DEFAULT);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
},
},
{
Name: "check constraint violation - right side violates new check constraint",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk int primary key, col00 int, col01 int, col1 varchar(100) default ('hello'));",
"INSERT into t values (1, 0, 0, 'hi');",
"alter table t add index idx1 (col1);",
},
RightSetUpScript: []string{
"insert into t values (2, 0, 0, DEFAULT);",
},
LeftSetUpScript: []string{
"alter table t drop column col00;",
"alter table t drop column col01;",
"alter table t add constraint CHECK (col1 != concat('he', 'llo'))",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "select * from dolt_constraint_violations;",
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: `select violation_type, pk, col1, violation_info like "\%NOT((col1 = concat('he','llo')))\%" from dolt_constraint_violations_t;`,
Expected: []sql.Row{{uint64(3), 2, "hello", true}},
},
},
},
{
Name: "check constraint violation - keyless table, right side violates new check constraint",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (c0 int, col0 varchar(100), col1 varchar(100) default ('hello'));",
"INSERT into t values (1, 'adios', 'hi');",
"alter table t add index idx1 (col1);",
},
RightSetUpScript: []string{
"insert into t values (2, 'hola', DEFAULT);",
},
LeftSetUpScript: []string{
"alter table t add constraint CHECK (col1 != concat('he', 'llo'))",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "select * from dolt_constraint_violations;",
Expected: []sql.Row{{"t", uint64(1)}},
},
{
Query: `select violation_type, c0, col0, col1, violation_info like "\%NOT((col1 = concat('he','llo')))\%" from dolt_constraint_violations_t;`,
Expected: []sql.Row{{uint64(3), 2, "hola", "hello", true}},
},
},
},
// Resolvable type changes
{
Name: "type widening - enums and sets",
@@ -4337,7 +4544,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x0}},
Expected: []sql.Row{{0, 0}},
},
{
Query: "select * from t order by pk;",
@@ -4378,7 +4585,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
Expected: []sql.Row{{0, 1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
@@ -4418,7 +4625,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
Expected: []sql.Row{{0, 1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
@@ -4449,7 +4656,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
Expected: []sql.Row{{0, 1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema, description from dolt_schema_conflicts;",
@@ -4711,7 +4918,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
Expected: []sql.Row{{0, 1}},
},
{
Query: "select pk, col1 from t;",
@@ -4748,7 +4955,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
Expected: []sql.Row{{0, 1}},
},
{
Query: "select pk, col1 from t;",
@@ -4765,31 +4972,6 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
{
// TODO: We should scan for check constraints during merge and flag failing
// constraints as violations in `dolt_constraint_violations`.
Name: "adding a check-constraint",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int);",
"insert into t values (1, 1);",
},
RightSetUpScript: []string{
"update t set col1 = col1 + 5 where col1 < 5;",
"alter table t add check ( col1 > 5 );",
},
LeftSetUpScript: []string{
"insert into t values (2, 2);",
},
Assertions: []queries.ScriptTestAssertion{
{
// TODO: Dolt currently merges this without an error, but it shouldn't;
// There is a constraint violation that should be reported.
Skip: true,
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
},
},
},
{
// TODO: Changing a column's collation requires rewriting the table and any indexes containing that column.
// For now, we just detect the schema incompatibility and return schema conflict metadata.
@@ -249,6 +249,7 @@ call dolt_commit('-am', 'new table');
SQL
cd $TMPDIRS
mkdir -p "dbs2"
# this is a hack: we have to change our persisted global server
# vars for the sql command to work on the replica TODO: fix this
@@ -1,3 +1,4 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
@@ -0,0 +1,110 @@
#! /usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
load $BATS_TEST_DIRNAME/helper/query-server-common.bash
make_repo() {
mkdir "$1"
cd "$1"
dolt init
dolt sql -q "create table $1_tbl (id int)"
cd ..
}
setup() {
setup_no_dolt_init
make_repo defaultDB
make_repo altDB
}
teardown() {
stop_sql_server 1
teardown_common
}
@test "sql-local-remote: test switch between server/no server" {
start_sql_server defaultDB
run dolt --verbose-engine-setup --user dolt sql -q "show databases"
[ "$status" -eq 0 ] || false
[[ "$output" =~ "starting remote mode" ]] || false
[[ "$output" =~ "defaultDB" ]] || false
[[ "$output" =~ "altDB" ]] || false
stop_sql_server 1
run dolt --verbose-engine-setup sql -q "show databases"
[ "$status" -eq 0 ] || false
[[ "$output" =~ "starting local mode" ]] || false
[[ "$output" =~ "defaultDB" ]] || false
[[ "$output" =~ "altDB" ]] || false
}
@test "sql-local-remote: check --data-dir pointing to a server root can be used when in different directory." {
start_sql_server altDb
ROOT_DIR=$(pwd)
mkdir someplace_else
cd someplace_else
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR" --user dolt --use-db altDB sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting remote mode" ]] || false
[[ "$output" =~ "altDB_tbl" ]] || false
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR" --user dolt --use-db defaultDB sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting remote mode" ]] || false
[[ "$output" =~ "defaultDB_tbl" ]] || false
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR" --user dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting remote mode" ]] || false
[[ "$output" =~ "altDB_tbl" ]] || false
stop_sql_server 1
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR" --user dolt --use-db altDB sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting local mode" ]] || false
[[ "$output" =~ "altDB_tbl" ]] || false
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR" --user dolt --use-db defaultDB sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting local mode" ]] || false
[[ "$output" =~ "defaultDB_tbl" ]] || false
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR" --user dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting local mode" ]] || false
[[ "$output" =~ "altDB_tbl" ]] || false
}
@test "sql-local-remote: check --data-dir pointing to a database root can be used when in different directory." {
start_sql_server altDb
ROOT_DIR=$(pwd)
mkdir -p someplace_new/fun
cd someplace_new/fun
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR/altDB" --user dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting remote mode" ]] || false
[[ "$output" =~ "altDB_tbl" ]] || false
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR/altDB" --user dolt --use-db defaultDB sql -q "show tables"
[ "$status" -eq 1 ]
[[ "$output" =~ "defaultDB does not exist" ]] || false
stop_sql_server 1
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR/altDB" --user dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting local mode" ]] || false
[[ "$output" =~ "altDB_tbl" ]] || false
run dolt --verbose-engine-setup --data-dir="$ROOT_DIR/altDB" --user dolt --use-db defaultDB sql -q "show tables"
[ "$status" -eq 1 ]
[[ "$output" =~ "defaultDB does not exist" ]] || false
}
+31 -28
View File
@@ -74,7 +74,7 @@ EOF
# verify that dolt_clone works
dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test01" ""
dolt sql-client -P $PORT -u dolt --use-db 'test01' -q"call dolt_clone('file:///$tempDir/remote')"
dolt sql-client -P $PORT -u dolt --use-db 'test01' -q"call dolt_clone('file:///$tempDir/remote')"
}
@test "sql-server: loglevels are case insensitive" {
@@ -200,19 +200,19 @@ SQL
[[ "$output" =~ "one_pk" ]] || false
# Add rows on the command line
run dolt --user=dolt sql -q "insert into one_pk values (1,1,1)"
[ "$status" -eq 1 ]
run dolt --verbose-engine-setup --user=dolt sql -q "insert into one_pk values (1,1,1)"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting remote mode" ]] || false
run dolt sql-client -P $PORT -u dolt -q "SELECT * FROM one_pk"
[ $status -eq 0 ]
! [[ $output =~ " 1 " ]] || false
[[ $output =~ " 1 " ]] || false
# Test import as well (used by doltpy)
echo 'pk,c1,c2' > import.csv
echo '2,2,2' >> import.csv
run dolt table import -u one_pk import.csv
[ "$status" -eq 1 ]
run dolt sql-client -P $PORT -u dolt -q "SELECT * FROM one_pk"
[ $status -eq 0 ]
! [[ $output =~ " 2 " ]] || false
@@ -235,7 +235,7 @@ SQL
c1 BIGINT,
c2 BIGINT,
PRIMARY KEY (pk))"
run dolt ls
[ "$status" -eq 0 ]
[[ "$output" =~ "No tables in working set" ]] || false
@@ -394,7 +394,7 @@ SQL
[[ $output =~ "1,1," ]] || false
[[ $output =~ "2,2,2" ]] || false
[[ $output =~ "3,3,3" ]] || false
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "
UPDATE r1_one_pk SET c2=1 WHERE pk=1;
USE repo2;
@@ -445,10 +445,10 @@ SQL
[[ $output =~ "r2_one_pk" ]] || false
# put data in both using database scoped inserts
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo1.r1_one_pk (pk) VALUES (0)"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo1.r1_one_pk (pk) VALUES (0)"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo1.r1_one_pk (pk,c1) VALUES (1,1)"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo1.r1_one_pk (pk,c1,c2) VALUES (2,2,2),(3,3,3)"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo2.r2_one_pk (pk) VALUES (0)"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo2.r2_one_pk (pk,c3) VALUES (1,1)"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo2.r2_one_pk (pk,c3,c4) VALUES (2,2,2),(3,3,3)"
@@ -466,10 +466,10 @@ SQL
[[ $output =~ "1,1," ]] || false
[[ $output =~ "2,2,2" ]] || false
[[ $output =~ "3,3,3" ]] || false
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "DELETE FROM repo1.r1_one_pk where pk=0"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "DELETE FROM repo2.r2_one_pk where pk=0"
run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo1.r1_one_pk ORDER BY pk"
[ $status -eq 0 ]
! [[ $output =~ "0,," ]] || false
@@ -740,15 +740,15 @@ SQL
INSERT INTO one_pk (pk,c1,c2) VALUES (2,2,2),(3,3,3);
CALL DOLT_ADD('.');
CALL dolt_commit('-am', 'test commit message', '--author', 'John Doe <john@example.com>');"
run dolt ls
[ "$status" -eq 0 ]
[[ "$output" =~ "one_pk" ]] || false
run dolt --user=dolt sql -q "drop table one_pk"
[ "$status" -eq 1 ]
run dolt --verbose-engine-setup --user=dolt --use-db repo1 sql -q "drop table one_pk"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting remote mode" ]] || false
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "drop table one_pk"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_add('.')"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_commit('-am', 'Dropped table one_pk')"
@@ -891,7 +891,7 @@ EOF
dolt sql-server --config ./config.yml --socket "dolt.$PORT.sock" &
SERVER_PID=$!
sleep 1
# We do things manually here because we need to control
# CLIENT_MULTI_STATEMENTS.
python3 -c '
@@ -1005,7 +1005,7 @@ END""")
[[ $output =~ "2,2" ]] || false
[[ $output =~ "4,4" ]] || false
! [[ $output =~ "3,3" ]] || false
# drop the table on main, should keep counting from 4
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "drop table t1"
dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE t1(pk bigint primary key auto_increment, val int)" ""
@@ -1055,7 +1055,7 @@ END""")
cd repo3
run dolt sql -q "select * from test" -r csv
[ "$status" -eq 0 ]
[ "${lines[0]}" = "pk" ]
[ "${lines[0]}" = "pk" ]
[ "${lines[1]}" = "0" ]
[ "${lines[2]}" = "1" ]
[ "${lines[3]}" = "2" ]
@@ -1183,7 +1183,7 @@ END""")
run dolt sql-client --use-db "test1/newbranch" -u dolt -P $PORT -q "select * from a"
[ $status -ne 0 ]
[[ "$output" =~ "database not found" ]] || false
# can't drop a branch-qualified database name
run dolt sql-client -P $PORT -u dolt --use-db '' -q "drop database \`test2/newbranch\`"
[ $status -ne 0 ]
@@ -1237,7 +1237,7 @@ END""")
mkdir no_dolt && cd no_dolt
mkdir db_dir
start_sql_server_with_args --host 0.0.0.0 --user dolt --data-dir=db_dir
dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test1"
run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases"
[ $status -eq 0 ]
@@ -1304,7 +1304,7 @@ END""")
run dolt sql-client -P $PORT -u dolt --use-db '' -q "create database dir_exists"
[ $status -ne 0 ]
[[ $output =~ exists ]] || false
run dolt sql-client -P $PORT -u dolt --use-db '' -q "create database file_exists"
[ $status -ne 0 ]
[[ $output =~ exists ]] || false
@@ -1496,14 +1496,17 @@ databases:
@test "sql-server: sql-server locks database to writes" {
cd repo2
dolt sql -q "create table a (x int primary key)"
dolt sql -q "create table a (x int primary key)"
start_sql_server
run dolt sql -q "create table b (x int primary key)"
run dolt --verbose-engine-setup sql -q "create table b (x int primary key)"
[ "$status" -eq 1 ]
[[ "$output" =~ "database is locked to writes" ]] || false
run dolt sql -q "insert into b values (0)"
[ "$status" -eq 1 ]
[[ "$output" =~ "database is locked to writes" ]] || false
[[ "$output" =~ "Error connecting to remote database" ]] || false
[[ "$output" =~ "User not found 'root'" ]] || false
run dolt --verbose-engine-setup --user dolt sql -q "create table b (x int primary key)"
[ "$status" -eq 0 ]
[[ "$output" =~ "starting remote mode" ]] || false
}
@test "sql-server: start server without socket flag should set default socket path" {
+1 -2
View File
@@ -478,8 +478,7 @@ teardown() {
! [[ "$output" =~ ".doltcfg" ]] || false
# create new user
run dolt --data-dir=db_dir --privilege-file=privs.db sql <<< "create user new_user"
[ "$status" -eq 0 ]
dolt --data-dir=db_dir --privilege-file=privs.db sql <<< "create user new_user"
# show users, expect root user and new_user
run dolt --data-dir=db_dir --privilege-file=privs.db sql <<< "select user from mysql.user;"
+55 -1
View File
@@ -1031,7 +1031,7 @@ SQL
run dolt sql -r csv -q "select @@character_set_client"
[ $status -eq 0 ]
[[ "$output" =~ "utf8mb4" ]] || false
run dolt sql -r json -q "select * from test order by a"
[ $status -eq 0 ]
[ "$output" == '{"rows": [{"a":1,"b":1.5,"c":"1","d":"2020-01-01 00:00:00"},{"a":2,"b":2.5,"c":"2","d":"2020-02-02 00:00:00"},{"a":3,"c":"3","d":"2020-03-03 00:00:00"},{"a":4,"b":4.5,"d":"2020-04-04 00:00:00"},{"a":5,"b":5.5,"c":"5"}]}' ]
@@ -2776,3 +2776,57 @@ SQL
[[ "$output" =~ "Query OK, 1 row affected (2".*" sec)" ]] || false
[[ "$output" =~ "Query OK, 1 row affected (3".*" sec)" ]] || false
}
@test "sql: check --data-dir used from a completely different location and still resolve DB names" {
# remove config files
rm -rf .doltcfg
rm -rf db_dir
mkdir db_dir
cd db_dir
ROOT_DIR=$(pwd)
# create an alternate database, without the table
mkdir dba
cd dba
dolt init
cd ..
dolt sql -q "create table dba_tbl (id int)"
mkdir dbb
cd dbb
dolt init
dolt sql -q "create table dbb_tbl (id int)"
# Ensure --data-dir flag is really used by changing the cwd.
cd /tmp
run dolt --data-dir="$ROOT_DIR/dbb" sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "dbb_tbl" ]] || false
run dolt --data-dir="$ROOT_DIR/dba" sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "dba_tbl" ]] || false
# Default to first DB alphabetically.
run dolt --data-dir="$ROOT_DIR" sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "dba_tbl" ]] || false
# --use-db arg can be used to be specific.
run dolt --data-dir="$ROOT_DIR" --use-db=dbb sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "dbb_tbl" ]] || false
# Redundant use of the flag is OK.
run dolt --data-dir="$ROOT_DIR/dbb" --use-db=dbb sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "dbb_tbl" ]] || false
# Use of the use-db flag when we have a different DB specified by data-dir should error.
run dolt --data-dir="$ROOT_DIR/dbb" --use-db=dba sql -q "show tables"
[ "$status" -eq 1 ]
[[ "$output" =~ "provided --use-db dba does not exist or is not a directory" ]] || false
}