Merge branch 'main' into zachmu/multi-db3

This commit is contained in:
Zach Musgrave
2023-04-27 16:48:56 -07:00
committed by GitHub
32 changed files with 649 additions and 240 deletions
+1
View File
@@ -106,6 +106,7 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.ref || github.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }}
submodules: true
token: ${{ secrets.GITHUB_TOKEN }}
- name: Run go mod tidy
run: go mod tidy
+4
View File
@@ -14,6 +14,10 @@
package cli
import "github.com/dolthub/dolt/go/libraries/utils/argparser"
// CliContexct is used to pass top level command information down to subcommands.
type CliContext interface {
// GlobalArgs returns the arguments passed before the subcommand.
GlobalArgs() *argparser.ArgParseResults
}
+6 -6
View File
@@ -32,7 +32,7 @@ import (
"github.com/dolthub/dolt/go/store/types"
)
func isHelp(str string) bool {
func IsHelp(str string) bool {
str = strings.TrimSpace(str)
if len(str) == 0 {
@@ -50,7 +50,7 @@ func isHelp(str string) bool {
func hasHelpFlag(args []string) bool {
for _, arg := range args {
if isHelp(arg) {
if IsHelp(arg) {
return true
}
}
@@ -171,7 +171,7 @@ func (hc SubCommandHandler) Hidden() bool {
func (hc SubCommandHandler) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx CliContext) int {
if len(args) < 1 && hc.Unspecified == nil {
hc.printUsage(commandStr)
hc.PrintUsage(commandStr)
return 1
}
@@ -190,12 +190,12 @@ func (hc SubCommandHandler) Exec(ctx context.Context, commandStr string, args []
return hc.handleCommand(ctx, commandStr, hc.Unspecified, args, dEnv, cliCtx)
}
if !isHelp(subCommandStr) {
if !IsHelp(subCommandStr) {
PrintErrln(color.RedString("Unknown Command " + subCommandStr))
return 1
}
hc.printUsage(commandStr)
hc.PrintUsage(commandStr)
return 0
}
@@ -302,7 +302,7 @@ func CheckUserNameAndEmail(dEnv *env.DoltEnv) bool {
return true
}
func (hc SubCommandHandler) printUsage(commandStr string) {
func (hc SubCommandHandler) PrintUsage(commandStr string) {
Println("Valid commands for", commandStr, "are")
for _, cmd := range hc.Subcommands {
+1 -1
View File
@@ -90,5 +90,5 @@ func (cmd BlameCmd) Exec(ctx context.Context, commandStr string, args []string,
}
args = []string{"--" + QueryFlag, fmt.Sprintf(blameQueryTemplate, apr.Arg(0))}
return SqlCmd{}.Exec(ctx, "sql", args, dEnv, nil)
return SqlCmd{}.Exec(ctx, "sql", args, dEnv, cliCtx)
}
+48 -32
View File
@@ -189,6 +189,12 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
globalArgs := cliCtx.GlobalArgs()
err = validateSqlArgs(globalArgs)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
// We need a username and password for many SQL commands, so set defaults if they don't exist
dEnv.Config.SetFailsafes(env.DefaultFailsafeConfig)
@@ -198,24 +204,33 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
username = user
}
mrEnv, verr := getMultiRepoEnv(ctx, apr, dEnv)
// data-dir args come either from the global args or the subcommand args. We need to check both.
var dataDir string
dataDirGiven := false
if multiDbDir, ok := apr.GetValue(MultiDBDirFlag); ok {
// When GlobalArgs migration is complete, drop this flag.
dataDir = multiDbDir
dataDirGiven = true
} else if dataDirPath, ok := apr.GetValue(DataDirFlag); ok {
// TODO: remove this once we remove the deprecated passing of data dir directly to subcommand.
dataDir = dataDirPath
dataDirGiven = true
} else if dataDirPath, ok := globalArgs.GetValue(DataDirFlag); ok {
dataDir = dataDirPath
dataDirGiven = true
}
mrEnv, dataDir, verr := getMultiRepoEnv(ctx, dataDir, dEnv)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
// need to return cfgdirpath and error
var cfgDirPath string
var dataDir string
if multiDbDir, ok := apr.GetValue(MultiDBDirFlag); ok {
dataDir = multiDbDir
} else if dataDirPath, ok := apr.GetValue(DataDirFlag); ok {
dataDir = dataDirPath
}
cfgDir, cfgDirSpecified := apr.GetValue(CfgDirFlag)
if cfgDirSpecified {
cfgDirPath = cfgDir
} else if len(dataDir) != 0 {
} else if dataDirGiven {
cfgDirPath = filepath.Join(dataDir, DefaultCfgDirName)
} else {
// Look in parent directory for doltcfg
@@ -267,10 +282,20 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
}
se, sqlCtx, err := newEngine(ctx, apr, cfgDirPath, privsFp, branchControlFilePath, username, mrEnv)
format := engine.FormatTabular
if formatSr, ok := apr.GetValue(FormatFlag); ok {
var verr errhand.VerboseError
format, verr = GetResultFormat(formatSr)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
}
se, sqlCtx, err := newEngine(ctx, format, cfgDirPath, privsFp, branchControlFilePath, username, mrEnv)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
defer se.Close()
if query, queryOK := apr.GetValue(QueryFlag); queryOK {
@@ -338,7 +363,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
func newEngine(
ctx context.Context,
apr *argparser.ArgParseResults,
format engine.PrintResultFormat,
cfgDirPath string,
privsFp string,
branchControlFilePath string,
@@ -346,15 +371,6 @@ func newEngine(
mrEnv *env.MultiRepoEnv,
) (*engine.SqlEngine, *sql.Context, error) {
format := engine.FormatTabular
if formatSr, ok := apr.GetValue(FormatFlag); ok {
var verr errhand.VerboseError
format, verr = GetResultFormat(formatSr)
if verr != nil {
return nil, nil, verr
}
}
config := &engine.SqlEngineConfig{
DoltCfgDirPath: cfgDirPath,
PrivFilePath: privsFp,
@@ -501,26 +517,26 @@ func execSaveQuery(ctx *sql.Context, dEnv *env.DoltEnv, se *engine.SqlEngine, ap
}
// getMultiRepoEnv returns an appropriate MultiRepoEnv for this invocation of the command
func getMultiRepoEnv(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv) (*env.MultiRepoEnv, errhand.VerboseError) {
func getMultiRepoEnv(ctx context.Context, workingDir string, dEnv *env.DoltEnv) (mrEnv *env.MultiRepoEnv, resolvedDir string, verr errhand.VerboseError) {
var err error
fs := dEnv.FS
if dataDir, ok := apr.GetValue(MultiDBDirFlag); ok {
fs, err = fs.WithWorkingDir(dataDir)
} else if dataDir, ok := apr.GetValue(DataDirFlag); ok {
fs, err = fs.WithWorkingDir(dataDir)
if len(workingDir) > 0 {
fs, err = fs.WithWorkingDir(workingDir)
}
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
return nil, "", errhand.VerboseErrorFromError(err)
}
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), fs, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
resolvedDir, err = fs.Abs("")
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
return nil, "", errhand.VerboseErrorFromError(err)
}
return mrEnv, nil
mrEnv, err = env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), fs, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
return nil, "", errhand.VerboseErrorFromError(err)
}
return mrEnv, resolvedDir, nil
}
func execBatch(
+13 -11
View File
@@ -43,6 +43,8 @@ import (
var tableName = "people"
var stubCliCtx = BuildEmptyCliContext()
// Smoke test: Console opens and exits
func TestSqlConsole(t *testing.T) {
t.Run("SQL console opens and exits", func(t *testing.T) {
@@ -53,7 +55,7 @@ func TestSqlConsole(t *testing.T) {
args := []string{}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, 0, result)
})
@@ -81,7 +83,7 @@ func TestSqlBatchMode(t *testing.T) {
args := []string{"-b", "-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
})
}
@@ -120,7 +122,7 @@ func TestSqlSelect(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
})
}
@@ -146,7 +148,7 @@ func TestSqlShow(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
})
}
@@ -179,7 +181,7 @@ func TestCreateTable(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
working, err = dEnv.WorkingRoot(context.Background())
@@ -219,7 +221,7 @@ func TestShowTables(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
})
}
@@ -250,7 +252,7 @@ func TestAlterTable(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
})
}
@@ -277,7 +279,7 @@ func TestDropTable(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(context.TODO(), commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
})
}
@@ -396,7 +398,7 @@ func TestInsert(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
if result == 0 {
@@ -477,7 +479,7 @@ func TestUpdate(t *testing.T) {
args := []string{"-q", test.query}
commandStr := "dolt sql"
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
if result == 0 {
@@ -552,7 +554,7 @@ func TestDelete(t *testing.T) {
ctx := context.Background()
commandStr := "dolt sql"
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv, nil)
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv, stubCliCtx)
assert.Equal(t, test.expectedRes, result)
if result == 0 {
+14 -14
View File
@@ -43,8 +43,8 @@ import (
const (
sqlClientDualFlag = "dual"
sqlClientQueryFlag = "query"
sqlClientUseDbFlag = "use-db"
SqlClientQueryFlag = "query"
SqlClientUseDbFlag = "use-db"
sqlClientResultFormat = "result-format"
)
@@ -84,10 +84,10 @@ func (cmd SqlClientCmd) Docs() *cli.CommandDocumentation {
func (cmd SqlClientCmd) ArgParser() *argparser.ArgParser {
ap := SqlServerCmd{}.ArgParserWithName(cmd.Name())
ap.SupportsFlag(sqlClientDualFlag, "d", "Causes this command to spawn a dolt server that is automatically connected to.")
ap.SupportsString(sqlClientQueryFlag, "q", "string", "Sends the given query to the server and immediately exits.")
ap.SupportsString(sqlClientUseDbFlag, "", "db_name", fmt.Sprintf("Selects the given database before executing a query. "+
"By default, uses the current folder's name. Must be used with the --%s flag.", sqlClientQueryFlag))
ap.SupportsString(sqlClientResultFormat, "", "format", fmt.Sprintf("Returns the results in the given format. Must be used with the --%s flag.", sqlClientQueryFlag))
ap.SupportsString(SqlClientQueryFlag, "q", "string", "Sends the given query to the server and immediately exits.")
ap.SupportsString(SqlClientUseDbFlag, "", "db_name", fmt.Sprintf("Selects the given database before executing a query. "+
"By default, uses the current folder's name. Must be used with the --%s flag.", SqlClientQueryFlag))
ap.SupportsString(sqlClientResultFormat, "", "format", fmt.Sprintf("Returns the results in the given format. Must be used with the --%s flag.", SqlClientQueryFlag))
return ap
}
@@ -123,12 +123,12 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
cli.PrintErrln(err.Error())
return 1
}
if apr.Contains(sqlClientQueryFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, sqlClientQueryFlag)))
if apr.Contains(SqlClientQueryFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, SqlClientQueryFlag)))
return 1
}
if apr.Contains(sqlClientUseDbFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, sqlClientUseDbFlag)))
if apr.Contains(SqlClientUseDbFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, SqlClientUseDbFlag)))
return 1
}
if apr.Contains(sqlClientResultFormat) {
@@ -167,14 +167,14 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
}
}
query, hasQuery := apr.GetValue(sqlClientQueryFlag)
dbToUse, hasUseDb := apr.GetValue(sqlClientUseDbFlag)
query, hasQuery := apr.GetValue(SqlClientQueryFlag)
dbToUse, hasUseDb := apr.GetValue(SqlClientUseDbFlag)
resultFormat, hasResultFormat := apr.GetValue(sqlClientResultFormat)
if !hasQuery && hasUseDb {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", sqlClientUseDbFlag, sqlClientQueryFlag)))
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", SqlClientUseDbFlag, SqlClientQueryFlag)))
return 1
} else if !hasQuery && hasResultFormat {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", sqlClientUseDbFlag, sqlClientResultFormat)))
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", SqlClientUseDbFlag, sqlClientResultFormat)))
return 1
}
if !hasUseDb && hasQuery {
+1 -1
View File
@@ -94,5 +94,5 @@ func (cmd CpCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
fmt.Sprintf("--%s", commands.BatchFlag),
fmt.Sprintf(`--%s`, commands.QueryFlag),
queryStr,
}, dEnv, nil)
}, dEnv, cliCtx)
}
+1 -1
View File
@@ -95,5 +95,5 @@ func (cmd MvCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
fmt.Sprintf("--%s", commands.BatchFlag),
fmt.Sprintf(`--%s`, commands.QueryFlag),
queryStr,
}, dEnv, nil)
}, dEnv, cliCtx)
}
+1 -1
View File
@@ -93,5 +93,5 @@ func (cmd RmCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
fmt.Sprintf("--%s", commands.BatchFlag),
fmt.Sprintf(`--%s`, commands.QueryFlag),
queryStr,
}, dEnv, nil)
}, dEnv, cliCtx)
}
+17
View File
@@ -17,6 +17,9 @@ package commands
import (
"context"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
@@ -68,3 +71,17 @@ func MaybeGetCommitWithVErr(dEnv *env.DoltEnv, maybeCommit string) (*doltdb.Comm
return cm, nil
}
type cliCtx struct{}
func (c cliCtx) GlobalArgs() *argparser.ArgParseResults {
ap := argparser.NewArgParserWithMaxArgs("empty", 0)
apr, _ := ap.Parse(make([]string, 0))
return apr
}
var _ cli.CliContext = cliCtx{}
func BuildEmptyCliContext() cli.CliContext {
return cliCtx{}
}
+93 -3
View File
@@ -18,12 +18,14 @@ import (
"context"
crand "crypto/rand"
"encoding/binary"
"errors"
"math/rand"
"net/http"
_ "net/http/pprof"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/fatih/color"
@@ -52,6 +54,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/events"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/util/tempfiles"
@@ -63,7 +66,8 @@ const (
var dumpDocsCommand = &commands.DumpDocsCmd{}
var dumpZshCommand = &commands.GenZshCompCmd{}
var doltCommand = cli.NewSubCommandHandler("dolt", "it's git for data", []cli.Command{
var doltSubCommands = []cli.Command{
commands.InitCmd{},
commands.StatusCmd{},
commands.AddCmd{},
@@ -114,7 +118,10 @@ var doltCommand = cli.NewSubCommandHandler("dolt", "it's git for data", []cli.Co
docscmds.Commands,
stashcmds.StashCommands,
&commands.Assist{},
})
}
var doltCommand = cli.NewSubCommandHandler("dolt", "it's git for data", doltSubCommands)
var globalArgParser = buildGlobalArgs()
func init() {
dumpDocsCommand.DoltCommand = doltCommand
@@ -147,6 +154,11 @@ func main() {
func runMain() int {
args := os.Args[1:]
if len(args) == 0 {
doltCommand.PrintUsage("dolt")
return 1
}
if os.Getenv("DOLT_VERBOSE_ASSERT_TABLE_FILES_CLOSED") == "" {
nbs.TableIndexGCFinalizerWithStackTrace = false
}
@@ -401,9 +413,28 @@ func runMain() int {
cli.Printf("error: failed to load persisted global variables: %s\n", err.Error())
}
globalArgs, args, initCliContext, printUsage, err := splitArgsOnSubCommand(args)
if printUsage {
doltCommand.PrintUsage("dolt")
return 0
}
if err != nil {
cli.PrintErrln(color.RedString("Failure to parse arguments: %v", err))
return 1
}
start := time.Now()
ctx, stop := context.WithCancel(ctx)
res := doltCommand.Exec(ctx, "dolt", args, dEnv, nil)
var cliCtx cli.CliContext = nil
if initCliContext {
_, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString("dolt", doc, globalArgParser))
apr := cli.ParseArgsOrDie(globalArgParser, globalArgs, usage)
cliCtx = tmpCliContext{globalArgs: apr}
}
res := doltCommand.Exec(ctx, "dolt", args, dEnv, cliCtx)
stop()
if err = dbfactory.CloseAllLocalDatabases(); err != nil {
@@ -422,6 +453,56 @@ func runMain() int {
return res
}
// tmpCliContext is a temporary implementation of the CliContext interface. It is used to pass the global args to the
// subcommands, will be replaced with implementations aware of query contexts shortly.
type tmpCliContext struct {
globalArgs *argparser.ArgParseResults
}
func (t tmpCliContext) GlobalArgs() *argparser.ArgParseResults {
return t.globalArgs
}
var _ cli.CliContext = (*tmpCliContext)(nil)
// splitArgsOnSubCommand splits the args into two slices, the first containing all args before the first subcommand,
// and the second containing all args after the first subcommand. The second slice will start with the subcommand name.
func splitArgsOnSubCommand(args []string) (globalArgs, subArgs []string, initCliContext, printUsages bool, err error) {
commandSet := make(map[string]bool)
for _, cmd := range doltSubCommands {
commandSet[cmd.Name()] = true
}
for i, arg := range args {
arg = strings.ToLower(arg)
if cli.IsHelp(arg) {
// Found --help before any subcommand, so print dolt help.
return nil, nil, false, true, nil
}
if _, ok := commandSet[arg]; ok {
// SQL is the first subcommand to support the CLIContext. We'll need a more general solution when we add more.
// blame, table rm, and table mv commands also depend on the sql command, so they are also included here.
initCliContext := "sql" == arg || "blame" == arg || "table" == arg
return args[:i], args[i:], initCliContext, false, nil
}
}
return nil, nil, false, false, errors.New("No valid dolt subcommand found. See 'dolt --help' for usage.")
}
// doc is currently used only when a `initCliContext` command is specified. This will include all commands in time,
// otherwise you only see these docs if you specify a nonsense argument before the `sql` subcommand.
var doc = cli.CommandDocumentationContent{
ShortDesc: "Dolt is git for data",
LongDesc: `Dolt comprises of multiple subcommands that allow users to import, export, update, and manipulate data with SQL.`,
Synopsis: []string{
"<--data-dir=<path>> subcommand <subcommand arguments>",
},
}
func seedGlobalRand() {
bs := make([]byte, 8)
_, err := crand.Read(bs)
@@ -466,3 +547,12 @@ func interceptSendMetrics(ctx context.Context, args []string) (bool, int) {
dEnv := env.LoadWithoutDB(ctx, env.GetCurrentUserHomeDir, filesys.LocalFS, Version)
return true, doltCommand.Exec(ctx, "dolt", args, dEnv, nil)
}
func buildGlobalArgs() *argparser.ArgParser {
ap := argparser.NewArgParserWithVariableArgs("dolt")
// Pulling this argument forward first to pave the way. Others will follow.
ap.SupportsString(commands.DataDirFlag, "", "directory", "Defines a directory whose subdirectories should all be dolt data repositories accessible as independent databases within. Defaults to the current directory.")
return ap
}
+1 -1
View File
@@ -59,7 +59,7 @@ require (
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
github.com/dolthub/go-mysql-server v0.15.1-0.20230424215657-62cb35ac61a2
github.com/dolthub/go-mysql-server v0.15.1-0.20230427180713-42232b262809
github.com/dolthub/swiss v0.1.0
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/jmoiron/sqlx v1.3.4
+2 -2
View File
@@ -166,8 +166,8 @@ github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 h1:u3PMzfF8RkKd3lB9pZ2bfn0qEG+1G
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2/go.mod h1:mIEZOHnFx4ZMQeawhw9rhsj+0zwQj7adVsnBX7t+eKY=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.15.1-0.20230424215657-62cb35ac61a2 h1:jKtEfBQlcYknZSRpjFfo0V0sqybrWoCQTdmoVkAVdGY=
github.com/dolthub/go-mysql-server v0.15.1-0.20230424215657-62cb35ac61a2/go.mod h1:B+vF4ambU4nWlkqEivILt0KJyvnIV22xFNWB9Xje948=
github.com/dolthub/go-mysql-server v0.15.1-0.20230427180713-42232b262809 h1:7UYUVmasz3bKeQaeEzA2ue0hIQbQK4kJBHJBpVHAoSU=
github.com/dolthub/go-mysql-server v0.15.1-0.20230427180713-42232b262809/go.mod h1:B+vF4ambU4nWlkqEivILt0KJyvnIV22xFNWB9Xje948=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0=
@@ -58,7 +58,7 @@ func (cmd fvCommand) exec(ctx context.Context, dEnv *env.DoltEnv) int {
// execute the command using |cmd.user|'s Feature Version
doltdb.DoltFeatureVersion = cmd.user.vers
defer func() { doltdb.DoltFeatureVersion = DoltFeatureVersionCopy }()
return cmd.cmd.Exec(ctx, cmd.cmd.Name(), cmd.args, dEnv, nil)
return cmd.cmd.Exec(ctx, cmd.cmd.Name(), cmd.args, dEnv, commands.BuildEmptyCliContext())
}
type fvUser struct {
@@ -37,6 +37,8 @@ func TestForeignKeys(t *testing.T) {
}
}
var fkCliCtx = commands.BuildEmptyCliContext()
func TestForeignKeyErrors(t *testing.T) {
skipNewFormat(t)
cmds := []testCommand{
@@ -49,13 +51,14 @@ func TestForeignKeyErrors(t *testing.T) {
dEnv := dtestutils.CreateTestEnv()
for _, c := range cmds {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, fkCliCtx)
require.Equal(t, 0, exitCode)
}
exitCode := commands.SqlCmd{}.Exec(ctx, commands.SqlCmd{}.Name(), []string{"-q", `ALTER TABLE test MODIFY v1 INT;`}, dEnv, nil)
exitCode := commands.SqlCmd{}.Exec(ctx, commands.SqlCmd{}.Name(), []string{"-q", `ALTER TABLE test MODIFY v1 INT;`}, dEnv, fkCliCtx)
require.Equal(t, 1, exitCode)
exitCode = commands.SqlCmd{}.Exec(ctx, commands.SqlCmd{}.Name(), []string{"-q", `ALTER TABLE test2 MODIFY v1 INT;`}, dEnv, nil)
exitCode = commands.SqlCmd{}.Exec(ctx, commands.SqlCmd{}.Name(), []string{"-q", `ALTER TABLE test2 MODIFY v1 INT;`}, dEnv, fkCliCtx)
require.Equal(t, 1, exitCode)
}
@@ -96,11 +99,11 @@ func testForeignKeys(t *testing.T, test foreignKeyTest) {
dEnv := dtestutils.CreateTestEnv()
for _, c := range fkSetupCommon {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, fkCliCtx)
require.Equal(t, 0, exitCode)
}
for _, c := range test.setup {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, fkCliCtx)
require.Equal(t, 0, exitCode)
}
+4 -2
View File
@@ -55,6 +55,8 @@ type gcTest struct {
postGCFunc func(ctx context.Context, t *testing.T, ddb *doltdb.DoltDB, prevRes interface{})
}
var gcCliCtx = commands.BuildEmptyCliContext()
var gcTests = []gcTest{
{
name: "gc test",
@@ -113,7 +115,7 @@ func testGarbageCollection(t *testing.T, test gcTest) {
defer dEnv.DoltDB.Close()
for _, c := range gcSetupCommon {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, gcCliCtx)
require.Equal(t, 0, exitCode)
}
@@ -121,7 +123,7 @@ func testGarbageCollection(t *testing.T, test gcTest) {
for _, stage := range test.stages {
res = stage.preStageFunc(ctx, t, dEnv.DoltDB, res)
for _, c := range stage.commands {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, gcCliCtx)
require.Equal(t, 0, exitCode)
}
}
+8 -4
View File
@@ -54,6 +54,7 @@ type MultiRepoEnv struct {
envs []NamedEnv
fs filesys.Filesys
cfg config.ReadWriteConfig
dialProvider dbfactory.GRPCDialProvider
ignoreLockFile bool
}
@@ -72,6 +73,7 @@ func MultiEnvForDirectory(
envs: make([]NamedEnv, 0),
fs: fs,
cfg: config,
dialProvider: NewGRPCDialProviderFromDoltEnv(dEnv),
ignoreLockFile: ignoreLockFile,
}
@@ -192,6 +194,11 @@ func MultiEnvForPaths(
} else if dEnv.CfgLoadErr != nil {
return nil, fmt.Errorf("error loading environment '%s' at path '%s': %s", name, absPath, dEnv.CfgLoadErr.Error())
}
if mrEnv.dialProvider == nil {
mrEnv.dialProvider = NewGRPCDialProviderFromDoltEnv(dEnv)
}
envSet[name] = dEnv
}
@@ -208,10 +215,7 @@ func (mrEnv *MultiRepoEnv) FileSystem() filesys.Filesys {
}
func (mrEnv *MultiRepoEnv) RemoteDialProvider() dbfactory.GRPCDialProvider {
for _, env := range mrEnv.envs {
return env.env
}
return NewGRPCDialProvider()
return mrEnv.dialProvider
}
func (mrEnv *MultiRepoEnv) Config() config.ReadWriteConfig {
@@ -116,7 +116,7 @@ func TestKeylessMerge(t *testing.T) {
require.NoError(t, err)
for _, c := range test.setup {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cmd.BuildEmptyCliContext())
require.Equal(t, 0, exitCode)
}
@@ -249,7 +249,7 @@ func TestKeylessMergeConflicts(t *testing.T) {
require.NoError(t, err)
for _, c := range cc {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cmd.BuildEmptyCliContext())
// allow merge to fail with conflicts
if _, ok := c.cmd.(cmd.MergeCmd); !ok {
require.Equal(t, 0, exitCode)
+105 -10
View File
@@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
"sort"
"strings"
sqle "github.com/dolthub/go-mysql-server"
@@ -36,6 +37,9 @@ const (
ColumnCheckCollision
InvalidCheckCollision
DeletedCheckCollision
// DuplicateIndexColumnSet represent a schema conflict where multiple indexes cover the same set of columns, and
// we're unable to accurately match them up on each side of the merge, so the user has to manually resolve.
DuplicateIndexColumnSet
)
type SchemaConflict struct {
@@ -92,7 +96,7 @@ type ColConflict struct {
func (c ColConflict) String() string {
switch c.Kind {
case NameCollision:
return fmt.Sprintf("two columns with the same name '%s' have different tags. See https://github.com/dolthub/dolt/issues/3963", c.Ours.Name)
return fmt.Sprintf("incompatible column types for column '%s': %s and %s", c.Ours.Name, c.Ours.TypeInfo, c.Theirs.TypeInfo)
case TagCollision:
return fmt.Sprintf("different column definitions for our column %s and their column %s", c.Ours.Name, c.Theirs.Name)
}
@@ -105,7 +109,12 @@ type IdxConflict struct {
}
func (c IdxConflict) String() string {
return ""
switch c.Kind {
case DuplicateIndexColumnSet:
return fmt.Sprintf("multiple indexes covering the same column set cannot be merged: '%s' and '%s'", c.Ours.Name(), c.Theirs.Name())
default:
return ""
}
}
type FKConflict struct {
@@ -152,7 +161,7 @@ func SchemaMerge(ctx context.Context, format *types.NomsBinFormat, ourSch, their
}
var mergedCC *schema.ColCollection
mergedCC, sc.ColConflicts, err = mergeColumns(ourSch.GetAllCols(), theirSch.GetAllCols(), ancSch.GetAllCols())
mergedCC, sc.ColConflicts, err = mergeColumns(format, ourSch.GetAllCols(), theirSch.GetAllCols(), ancSch.GetAllCols())
if err != nil {
return nil, SchemaConflict{}, err
}
@@ -303,9 +312,11 @@ func ForeignKeysMerge(ctx context.Context, mergedRoot, ourRoot, theirRoot, ancRo
// mergeColumns merges the columns from |ourCC|, |theirCC| into a single column collection, using the ancestor column
// definitions in |ancCC| to determine on which side a column has changed. If merging is not possible because of
// conflicting changes to the columns in |ourCC| and |theirCC|, then a set of ColConflict instances are returned
// describing the conflicts. If any other, unexpected error occurs, then that error is returned and the other response
// fields should be ignored.
func mergeColumns(ourCC, theirCC, ancCC *schema.ColCollection) (*schema.ColCollection, []ColConflict, error) {
// describing the conflicts. |format| indicates what storage format is in use, and is needed to determine compatibility
// between types, since different storage formats have different restrictions on how much types can change and remain
// compatible with the current stored format. If any unexpected error occurs, then that error is returned and the
// other response fields should be ignored.
func mergeColumns(format *types.NomsBinFormat, ourCC, theirCC, ancCC *schema.ColCollection) (*schema.ColCollection, []ColConflict, error) {
columnMappings, err := mapColumns(ourCC, theirCC, ancCC)
if err != nil {
return nil, nil, err
@@ -344,9 +355,25 @@ func mergeColumns(ourCC, theirCC, ancCC *schema.ColCollection) (*schema.ColColle
if oursChanged && theirsChanged {
// This is a schema change conflict and has already been handled by checkSchemaConflicts
} else if theirsChanged {
mergedColumns = append(mergedColumns, *theirs)
if columnTypesAreCompatible(format, *ours, *theirs) {
mergedColumns = append(mergedColumns, *theirs)
} else {
conflicts = append(conflicts, ColConflict{
Kind: NameCollision,
Ours: *ours,
Theirs: *theirs,
})
}
} else {
mergedColumns = append(mergedColumns, *ours)
if columnTypesAreCompatible(format, *theirs, *ours) {
mergedColumns = append(mergedColumns, *ours)
} else {
conflicts = append(conflicts, ColConflict{
Kind: NameCollision,
Ours: *ours,
Theirs: *theirs,
})
}
}
} else if ours.Equals(*theirs) {
// if the columns are identical, just use ours
@@ -477,6 +504,34 @@ func checkSchemaConflicts(columnMappings columnMappings) ([]ColConflict, error)
return conflicts, nil
}
// columnTypesAreCompatible returns true if the change from |from| to |to| is a compatible type change.
// Currently, no type change for the DOLT storage format is considered compatible, but over time we will
// widen this to include safe type migrations (e.g. smallint to bigint, varchar(100) to varchar(200)), which
// can require rewriting existing stored data to be compatible with the new type. For the older LD_1 storage
// format, we are looser with type equality and consider them compatible as long as the types are in the
// same type family/kind.
func columnTypesAreCompatible(format *types.NomsBinFormat, from, to schema.Column) bool {
if !from.TypeInfo.Equals(to.TypeInfo) {
if types.IsFormat_DOLT(format) {
// All type changes are incompatible, for the DOLT storage format.
// TODO: this is overly broad, and should be narrowed down
return false
}
if from.Kind != to.Kind {
return false
}
if schema.IsColSpatialType(to) {
// We need to do this because some spatial type changes require a full table check, but not all.
// TODO: This could be narrowed down to a smaller set of spatial type changes
return false
}
}
return true
}
// columnMapping describes the mapping for a column being merged between the two sides of the merge as well as the ancestor.
type columnMapping struct {
anc *schema.Column
@@ -612,9 +667,20 @@ func indexesInCommon(mergedCC *schema.ColCollection, ours, theirs, anc schema.In
}
}
theirIdx, ok := theirs.GetIndexByTags(idxTags...)
if !ok {
// Check that there aren't multiple indexes covering the same columns on "theirs"
theirIdx, idxConflict := findIndexInCollectionByTags(ourIdx, theirs)
if theirIdx == nil && idxConflict == nil {
return false, nil
} else if idxConflict != nil {
conflicts = append(conflicts, *idxConflict)
return true, nil
}
// Check that there aren't multiple indexes covering the same columns on "ours"
_, idxConflict = findIndexInCollectionByTags(ourIdx, ours)
if idxConflict != nil {
conflicts = append(conflicts, *idxConflict)
return true, nil
}
if ourIdx.Equals(theirIdx) {
@@ -675,6 +741,35 @@ func indexesInCommon(mergedCC *schema.ColCollection, ours, theirs, anc schema.In
return common, conflicts
}
// findIndexInCollectionByTags searches for a single index in |idxColl| that matches the same tags |idx| covers. If a
// single matching index is found, then it is returned, along with no IdxConflict. If no matching index is found, then
// nil is returned for both params. If multiple indexes are found that cover the same set of columns, a nil Index is
// returned along with an IdxConflict that describes the conflict.
//
// Dolt allows you to add multiple indexes that cover the same set of columns, but in this situation, we aren't able
// to always accurately match up the indexes between ours/theirs/anc in a merge. The set of column tags an
// index covers was being used as a unique ID for the index, but as our index support has grown and in order to match
// MySQL's behavior, this isn't guaranteed to be a unique identifier anymore.
func findIndexInCollectionByTags(idx schema.Index, idxColl schema.IndexCollection) (schema.Index, *IdxConflict) {
theirIdxs := idxColl.GetIndexesByTags(idx.IndexedColumnTags()...)
switch len(theirIdxs) {
case 0:
return nil, nil
case 1:
return theirIdxs[0], nil
default:
sort.Slice(theirIdxs, func(i, j int) bool {
return theirIdxs[i].Name() < theirIdxs[j].Name()
})
return nil, &IdxConflict{
Kind: DuplicateIndexColumnSet,
Ours: theirIdxs[0],
Theirs: theirIdxs[1],
}
}
}
func indexCollSetDifference(left, right schema.IndexCollection, cc *schema.ColCollection) (d schema.IndexCollection) {
d = schema.NewIndexCollection(cc, nil)
_ = left.Iter(func(idx schema.Index) (stop bool, err error) {
@@ -40,7 +40,7 @@ type testCommand struct {
}
func (tc testCommand) exec(t *testing.T, ctx context.Context, dEnv *env.DoltEnv) int {
return tc.cmd.Exec(ctx, tc.cmd.Name(), tc.args, dEnv, nil)
return tc.cmd.Exec(ctx, tc.cmd.Name(), tc.args, dEnv, commands.BuildEmptyCliContext())
}
type args []string
@@ -173,7 +173,7 @@ func setupMigrationTest(t *testing.T, ctx context.Context, test migrationTest) *
cmd := commands.SqlCmd{}
for _, query := range test.setup {
code := cmd.Exec(ctx, cmd.Name(), []string{"-q", query}, dEnv, nil)
code := cmd.Exec(ctx, cmd.Name(), []string{"-q", query}, dEnv, commands.BuildEmptyCliContext())
require.Equal(t, 0, code)
}
return dEnv
@@ -55,6 +55,8 @@ type testAssertion struct {
rows []sql.Row
}
var cliCtx = cmd.BuildEmptyCliContext()
var setupCommon = []testCommand{
{cmd.SqlCmd{}, args{"-q",
`create table test (
@@ -203,7 +205,7 @@ func setupFilterBranchTests(t *testing.T) *env.DoltEnv {
ctx := context.Background()
dEnv := dtestutils.CreateTestEnv()
for _, c := range setupCommon {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cliCtx)
require.Equal(t, 0, exitCode)
}
@@ -215,13 +217,13 @@ func testFilterBranch(t *testing.T, test filterBranchTest) {
dEnv := setupFilterBranchTests(t)
defer dEnv.DoltDB.Close()
for _, c := range test.setup {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cliCtx)
require.Equal(t, 0, exitCode)
}
for _, a := range test.asserts {
for _, c := range a.setup {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cliCtx)
require.Equal(t, 0, exitCode)
}
@@ -46,7 +46,13 @@ type IndexCollection interface {
// GetIndexByColumnNames returns whether the collection contains an index that has this exact collection and ordering of columns.
GetIndexByColumnNames(cols ...string) (Index, bool)
// GetIndexByTags returns whether the collection contains an index that has this exact collection and ordering of columns.
// Note that if an index collection contains multiple indexes that cover the same column tags (e.g. different index
// types) then this method will return one of them, but it is not guaranteed which one and can easily result in a
// race condition.
GetIndexByTags(tags ...uint64) (Index, bool)
// GetIndexesByTags returns all indexes from this collection that cover the same columns identified by |tags|, in the
// same order specified. This method is preferred over GetIndexByTags.
GetIndexesByTags(tags ...uint64) []Index
// IndexesWithColumn returns all indexes that index the given column.
IndexesWithColumn(columnName string) []Index
// IndexesWithTag returns all indexes that index the given tag.
@@ -282,6 +288,30 @@ func (ixc *indexCollectionImpl) GetIndexByTags(tags ...uint64) (Index, bool) {
return idx, true
}
// GetIndexesByTags implements the schema.Index interface
func (ixc *indexCollectionImpl) GetIndexesByTags(tags ...uint64) []Index {
var result []Index
tagCount := len(tags)
for _, idx := range ixc.indexes {
if tagCount != len(idx.tags) {
continue
}
allMatch := true
for i, idxTag := range idx.tags {
if tags[i] != idxTag {
allMatch = false
break
}
}
if allMatch {
result = append(result, idx)
}
}
return result
}
func (ixc *indexCollectionImpl) hasIndexOnTags(tags ...uint64) bool {
_, ok := ixc.GetIndexByTags(tags...)
return ok
@@ -177,7 +177,7 @@ func runTestSql(t *testing.T, ctx context.Context, setup []string) (*doltdb.Dolt
dEnv := dtestutils.CreateTestEnv()
cmd := commands.SqlCmd{}
for _, query := range setup {
code := cmd.Exec(ctx, cmd.Name(), []string{"-q", query}, dEnv, nil)
code := cmd.Exec(ctx, cmd.Name(), []string{"-q", query}, dEnv, commands.BuildEmptyCliContext())
require.Equal(t, 0, code)
}
root, err := dEnv.WorkingRoot(ctx)
@@ -3721,6 +3721,8 @@ var DoltVerifyConstraintsTestScripts = []queries.ScriptTest{
var errTmplNoAutomaticMerge = "table %s can't be automatically merged.\nTo merge this table, make the schema on the source and target branch equal."
var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
// Data conflicts during a merge with schema changes
{
Name: "data conflict",
AncSetUpScript: []string{
@@ -3760,51 +3762,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
{
Name: "unique constraint violation",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk varchar(100) primary key, col1 int, col2 varchar(100), UNIQUE KEY unique1 (col2));",
"INSERT into t values ('0', 0, '');",
"alter table t add index idx1 (pk, col2);",
},
RightSetUpScript: []string{
"alter table t drop column col1;",
"INSERT into t (pk, col2) values ('10', 'same');",
},
LeftSetUpScript: []string{
"INSERT into t values ('1', 10, 'same');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
},
{
Query: "select * from dolt_conflicts;",
Expected: []sql.Row{},
},
{
Query: "select * from dolt_constraint_violations;",
Expected: []sql.Row{{"t", uint(2)}},
},
{
Query: "select violation_type, pk, col2, violation_info from dolt_constraint_violations_t;",
Expected: []sql.Row{
{uint(2), "1", "same", types.JSONDocument{Val: merge.UniqCVMeta{Columns: []string{"col2"}, Name: "unique1"}}},
{uint(2), "10", "same", types.JSONDocument{Val: merge.UniqCVMeta{Columns: []string{"col2"}, Name: "unique1"}}},
},
},
{
Query: "select pk, col2 from t;",
Expected: []sql.Row{
{"0", ""},
{"1", "same"},
{"10", "same"},
},
},
},
},
// Basic column changes adds/drops/renames/reorders
{
Name: "dropping columns",
AncSetUpScript: []string{
@@ -3877,8 +3836,6 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
"alter table t add index idx4 (col1, col2);",
"alter table t add index idx5 (col2, col1);",
"alter table t add index idx6 (col2, pk, col1);",
// TODO: This duplicate index causes a race condition in the merge code
//"alter table t add index idx7 (col2);",
},
RightSetUpScript: []string{
"alter table t rename column col1 to col11;",
@@ -4034,6 +3991,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
// Constraint changes
{
Name: "removing a not-null constraint",
AncSetUpScript: []string{
@@ -4160,6 +4119,51 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
{
Name: "unique constraint violation",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk varchar(100) primary key, col1 int, col2 varchar(100), UNIQUE KEY unique1 (col2));",
"INSERT into t values ('0', 0, '');",
"alter table t add index idx1 (pk, col2);",
},
RightSetUpScript: []string{
"alter table t drop column col1;",
"INSERT into t (pk, col2) values ('10', 'same');",
},
LeftSetUpScript: []string{
"INSERT into t values ('1', 10, 'same');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
},
{
Query: "select * from dolt_conflicts;",
Expected: []sql.Row{},
},
{
Query: "select * from dolt_constraint_violations;",
Expected: []sql.Row{{"t", uint(2)}},
},
{
Query: "select violation_type, pk, col2, violation_info from dolt_constraint_violations_t;",
Expected: []sql.Row{
{uint(2), "1", "same", types.JSONDocument{Val: merge.UniqCVMeta{Columns: []string{"col2"}, Name: "unique1"}}},
{uint(2), "10", "same", types.JSONDocument{Val: merge.UniqCVMeta{Columns: []string{"col2"}, Name: "unique1"}}},
},
},
{
Query: "select pk, col2 from t;",
Expected: []sql.Row{
{"0", ""},
{"1", "same"},
{"10", "same"},
},
},
},
},
{
Name: "dropping a unique key",
AncSetUpScript: []string{
@@ -4187,7 +4191,117 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
// Schema conflict test cases
// Schema conflicts
{
// Type widening - these changes move from smaller types to bigger types, so they are guaranteed to be safe.
// TODO: We don't support automatically converting column types in merges yet, so currently these won't
// automatically merge and instead return schema conflicts.
Name: "type widening",
AncSetUpScript: []string{
"CREATE table t (pk int primary key, col1 enum('blue', 'green'), col2 float, col3 smallint, " +
"col4 decimal(4,2), col5 varchar(10), col6 set('a', 'b'), col7 bit(1));",
"INSERT into t values (1, 'blue', 1.0, 1, 0.1, 'one', 'a,b', 1);",
"alter table t add index idx1 (col1);",
},
RightSetUpScript: []string{
"alter table t modify column col1 enum('blue', 'green', 'red');",
"alter table t modify column col2 double;",
"alter table t modify column col3 bigint;",
"alter table t modify column col4 decimal(8,4);",
"alter table t modify column col5 varchar(20);",
"alter table t modify column col6 set('a', 'b', 'c');",
"alter table t modify column col7 bit(2);",
"INSERT into t values (3, 'red', 3.0, 420, 0.001, 'three', 'a,b,c', 3);",
},
LeftSetUpScript: []string{
"INSERT into t values (2, 'green', 2.0, 2, 0.2, 'two', 'a,b', 1);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
Expected: []sql.Row{{"t",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` enum('blue','green'),\n `col2` float,\n `col3` smallint,\n `col4` decimal(4,2),\n `col5` varchar(10),\n `col6` set('a','b'),\n `col7` bit(1),\n PRIMARY KEY (`pk`),\n KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` enum('blue','green','red'),\n `col2` double,\n `col3` bigint,\n `col4` decimal(8,4),\n `col5` varchar(20),\n `col6` set('a','b','c'),\n `col7` bit(2),\n PRIMARY KEY (`pk`),\n KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` enum('blue','green'),\n `col2` float,\n `col3` smallint,\n `col4` decimal(4,2),\n `col5` varchar(10),\n `col6` set('a','b'),\n `col7` bit(1),\n PRIMARY KEY (`pk`),\n KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
},
},
},
{
// Type shortening these changes move from a larger type to a smaller type and are not always safe.
// For now, we automatically fail all of these with a schema conflict that the user must resolve, but in
// theory, we could try to apply these changes and see if the data in the tables is compatible or not, but
// that's an optimization left for the future. Until then, customers can manually alter their schema to
// get merges to work, based on the schema conflict information.
Name: "type shortening",
AncSetUpScript: []string{
"CREATE TABLE t (pk int primary key, col1 enum('blue','green','red'), col2 double, col3 bigint, col4 decimal(8,4), " +
"col5 varchar(20), col6 set('a','b','c'), col7 bit(2));",
"INSERT into t values (3, 'green', 3.0, 420, 0.001, 'three', 'a,b', 1);",
"alter table t add index idx1 (col1);",
},
RightSetUpScript: []string{
"alter table t modify column col1 enum('blue', 'green');",
"alter table t modify column col2 float;",
"alter table t modify column col3 smallint;",
"alter table t modify column col4 decimal(4,2);",
"alter table t modify column col5 varchar(10);",
"alter table t modify column col6 set('a', 'b');",
"alter table t modify column col7 bit(1);",
"INSERT into t values (1, 'blue', 1.0, 1, 0.1, 'one', 'a,b', 1);",
},
LeftSetUpScript: []string{
"INSERT into t values (2, 'green', 2.0, 2, 0.2, 'two', 'a,b', 1);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
Expected: []sql.Row{{"t",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` enum('blue','green','red'),\n `col2` double,\n `col3` bigint,\n `col4` decimal(8,4),\n `col5` varchar(20),\n `col6` set('a','b','c'),\n `col7` bit(2),\n PRIMARY KEY (`pk`),\n KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` enum('blue','green'),\n `col2` float,\n `col3` smallint,\n `col4` decimal(4,2),\n `col5` varchar(10),\n `col6` set('a','b'),\n `col7` bit(1),\n PRIMARY KEY (`pk`),\n KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` enum('blue','green','red'),\n `col2` double,\n `col3` bigint,\n `col4` decimal(8,4),\n `col5` varchar(20),\n `col6` set('a','b','c'),\n `col7` bit(2),\n PRIMARY KEY (`pk`),\n KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
},
},
},
{
// Dolt indexes currently use the set of columns covered by the index, as a unique identifier for matching
// indexes on either side of a merge. As Dolt's index support has grown, this isn't guaranteed to be a unique
// id anymore, so instead of allowing a race condition in the merge logic, if we detect that multiple indexes
// cover the same set of columns, we return a schema conflict and let the user decide how to resolve it.
Name: "duplicate index tag set",
AncSetUpScript: []string{
"CREATE table t (pk int primary key, col1 varchar(100));",
"INSERT into t values (1, '100'), (2, '200');",
"alter table t add unique index idx1 (col1);",
},
RightSetUpScript: []string{
"alter table t add index idx2 (col1(10));",
},
LeftSetUpScript: []string{
"INSERT into t values (3, '300');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema, description from dolt_schema_conflicts;",
Expected: []sql.Row{{"t",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(100),\n PRIMARY KEY (`pk`),\n UNIQUE KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(100),\n PRIMARY KEY (`pk`),\n UNIQUE KEY `idx1` (`col1`),\n KEY `idx2` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(100),\n PRIMARY KEY (`pk`),\n UNIQUE KEY `idx1` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"multiple indexes covering the same column set cannot be merged: 'idx1' and 'idx2'"}},
},
},
},
{
Name: "index conflicts: both sides add an index with the same name, same columns, but different type",
AncSetUpScript: []string{
@@ -4216,7 +4330,6 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
{
// https://github.com/dolthub/dolt/issues/2973
Name: "modifying a column on one side of a merge, and deleting it on the other",
@@ -4262,15 +4375,69 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
// Smarter merge conflict detection
{
// This merge tests reports a conflict on pk=1, because the tuple value is different on the left side, right
Name: "changing the type of a column",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int);",
"insert into t values (1, 10), (2, 20);",
},
RightSetUpScript: []string{
"alter table t modify column col1 varchar(100)",
"insert into t values (3, 'thirty'), (4, 'forty')",
},
LeftSetUpScript: []string{
"insert into t values (5, 50), (6, 60);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
Expected: []sql.Row{{"t",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` int,\n PRIMARY KEY (`pk`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(100),\n PRIMARY KEY (`pk`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` int,\n PRIMARY KEY (`pk`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
},
},
},
{
Name: "changing the type of a column with an index",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int, INDEX col1_idx (col1));",
"insert into t values (1, 100), (2, 20);",
},
RightSetUpScript: []string{
"alter table t modify column col1 varchar(100);",
"insert into t values (3, 'thirty'), (4, 'forty')",
},
LeftSetUpScript: []string{
"insert into t values (5, 50), (6, 60);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
Expected: []sql.Row{{"t",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` int,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(100),\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` int,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
},
},
},
// Unsupported automatic merge cases
{
// This merge test reports a conflict on pk=1, because the tuple value is different on the left side, right
// side, and base. The value is the base is (10, '100'), on the right is nil, and on the left is ('100'),
// because the data migration for the schema change happens before the diff iterator is invoked.
// This should NOT be a conflict for a user Dolt should not conflate the schema merge data migration with
// a real data conflict created by a user. Allowing this is still better than completely blocking all schema
// merges though, so we can live with this while we continue iterating and fine tuning schema merge logic.
// merges though, so we can live with this while we continue iterating and fine-tuning schema merge logic.
Name: "schema change combined with drop row",
AncSetUpScript: []string{
"SET autocommit = 0",
@@ -4304,10 +4471,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
},
// Unsupported automatic merge cases
{
Name: "adding a non-null column to one side",
Name: "adding a non-null column with a default value to one side",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int);",
"insert into t values (1, 1);",
@@ -4334,71 +4499,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
{
Name: "changing the type of a column",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int);",
"insert into t values (1, 10), (2, 20);",
},
RightSetUpScript: []string{
"alter table t modify column col1 varchar(100)",
"insert into t values (3, 'thirty'), (4, 'forty')",
},
LeftSetUpScript: []string{
"insert into t values (5, 50), (6, 60);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"),
},
{
Query: "select pk, col1 from t;",
Expected: []sql.Row{
{1, "10"},
{2, "20"},
{3, "thirty"},
{4, "forty"},
{5, "50"},
{6, "60"},
},
Skip: true,
},
},
},
{
Name: "changing the type of a column with an index",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int, INDEX col1_idx (col1));",
"insert into t values (1, 100), (2, 20);",
},
RightSetUpScript: []string{
"alter table t modify column col1 varchar(100);",
"insert into t values (3, 'thirty'), (4, 'forty')",
},
LeftSetUpScript: []string{
"insert into t values (5, 50), (6, 60);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"),
},
{
Skip: true,
Query: "select pk, col1 from t order by col1;",
Expected: []sql.Row{
{1, "100"},
{2, "20"},
{3, "thirty"},
{4, "forty"},
{5, "50"},
{6, "60"},
},
},
},
},
{
Name: "adding a not-null constraint with default to a column",
Name: "adding a not-null constraint and default value to a column",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int);",
"insert into t values (1, null), (2, null);",
@@ -4451,9 +4552,9 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
},
{
// It would make sense if we table-scanned for check constraints during merge
// and flagged failing constraints as violations in `dolt_constraint_violations`.
Name: "adding a check-constraint should abort the merge.",
// TODO: We should scan for check constraints during merge and flag failing
// constraints as violations in `dolt_constraint_violations`.
Name: "adding a check-constraint",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 int);",
"insert into t values (1, 1);",
@@ -4467,14 +4568,18 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
},
Assertions: []queries.ScriptTestAssertion{
{
Skip: true,
Query: "call dolt_merge('right');",
ExpectedErrStr: "some schema error",
// TODO: Dolt currently merges this without an error, but it shouldn't;
// There is a constraint violation that should be reported.
Skip: true,
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 0x1}},
},
},
},
{
Name: "changing the collation of an indexed column is broken",
// TODO: Changing a column's collation requires rewriting the table and any indexes containing that column.
// For now, we just detect the schema incompatibility and return schema conflict metadata.
Name: "changing the collation of an indexed column",
AncSetUpScript: []string{
"create table t (pk int primary key, col1 varchar(32) character set utf8mb4 collate utf8mb4_bin, index col1_idx (col1));",
"insert into t values (1, 'ab'), (2, 'Ab');",
@@ -4486,10 +4591,16 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
"insert into t values (3, 'c');",
},
Assertions: []queries.ScriptTestAssertion{
// TODO: Fails secondary index validation. Changing the ordinal ordering of secondary indexes definitely breaks merge
{
Query: "call dolt_merge('right');",
ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"),
Query: "call dolt_merge('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
Expected: []sql.Row{{"t",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(32) COLLATE utf8mb4_bin,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(32) COLLATE utf8mb4_general_ci,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(32) COLLATE utf8mb4_bin,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
},
},
},
@@ -154,7 +154,7 @@ func TestDbRevision(t *testing.T) {
setup := append(setupCommon, test.setup...)
for _, c := range setup {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cmd.BuildEmptyCliContext())
require.Equal(t, 0, exitCode)
}
@@ -213,7 +213,7 @@ func setupHistoryTests(t *testing.T) *env.DoltEnv {
ctx := context.Background()
dEnv := dtestutils.CreateTestEnv()
for _, c := range setupCommon {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cmd.BuildEmptyCliContext())
require.Equal(t, 0, exitCode)
}
@@ -237,7 +237,7 @@ func setupHistoryTests(t *testing.T) *env.DoltEnv {
func testHistoryTable(t *testing.T, test historyTableTest, dEnv *env.DoltEnv) {
ctx := context.Background()
for _, c := range test.setup {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cmd.BuildEmptyCliContext())
require.Equal(t, 0, exitCode)
}
@@ -129,7 +129,7 @@ func testJsonValue(t *testing.T, test jsonValueTest, setupCommon []testCommand)
setup := append(setupCommon, test.setup...)
for _, c := range setup {
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, nil)
exitCode := c.cmd.Exec(ctx, c.cmd.Name(), c.args, dEnv, cmd.BuildEmptyCliContext())
require.Equal(t, 0, exitCode)
}
@@ -140,7 +140,7 @@ func populateRepo(dEnv *env.DoltEnv, insertData string) {
execSql := func(dEnv *env.DoltEnv, q string) int {
ctx := context.Background()
args := []string{"-r", "null", "-q", q}
return commands.SqlCmd{}.Exec(ctx, "sql", args, dEnv, nil)
return commands.SqlCmd{}.Exec(ctx, "sql", args, dEnv, commands.BuildEmptyCliContext())
}
execSql(dEnv, createTable)
execSql(dEnv, insertData)
+1 -1
View File
@@ -320,7 +320,7 @@ NOT_VALID_REPO_ERROR="The current directory is not a valid dolt repository."
@test "no-repo: don't panic if invalid HOME" {
DOLT_ROOT_PATH=
HOME=/this/is/garbage
run dolt
run dolt status
[ "$status" -eq 1 ]
[[ ! "$output" =~ "panic" ]]
[[ "$output" =~ "Failed to load the HOME directory" ]]
+32
View File
@@ -204,6 +204,38 @@ teardown() {
rm -rf db_dir
}
@test "sql: check --data-dir can be used as argument before subcommand" {
# remove config files
rm -rf .doltcfg
rm -rf db_dir
# create data dir
mkdir db_dir
cd db_dir
DATADIR=$(pwd)
# create databases
mkdir dba
cd dba
dolt init
cd ..
mkdir dbb
cd dbb
dolt init
# Ensure --data-dir flag is really used.
cd /tmp
# show databases, expect all
run dolt --data-dir="$DATADIR" sql -q "show databases;"
[ "$status" -eq 0 ]
[[ "$output" =~ "dba" ]] || false
[[ "$output" =~ "dbb" ]] || false
dolt --data-dir="$DATADIR" sql -q "use dba; create table tablea (id int);"
}
@test "sql: check configurations specify doltcfg directory" {
# remove any previous config directories
rm -rf .doltcfg