Merge remote-tracking branch 'origin/main' into andy/validate-not-null

This commit is contained in:
Andy Arthur
2023-05-11 13:49:25 -07:00
19 changed files with 323 additions and 61 deletions
+10
View File
@@ -112,6 +112,16 @@ privileges or aren't comfortable running a script with them, you can download th
for your platform from [the latest release](https://github.com/dolthub/dolt/releases), unzip it,
and put the binary somewhere on your `$PATH`.
### Linux
#### Arch Linux
Dolt is packaged in the official repositories for Arch Linux.
```
pacman -S dolt
```
### Mac
#### Homebrew
+8 -8
View File
@@ -176,16 +176,16 @@ func (cmd SqlCmd) RequiresRepo() bool {
func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {
ap := cmd.ArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, sqlDocs, ap))
apr, verr := cmd.handleLegacyArguments(ap, commandStr, args)
if verr != nil {
if verr == argparser.ErrHelp {
apr, err := cmd.handleLegacyArguments(ap, commandStr, args)
if err != nil {
if err == argparser.ErrHelp {
help()
return 0
}
return HandleVErrAndExitCode(verr, usage)
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
err := validateSqlArgs(apr)
err = validateSqlArgs(apr)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
@@ -286,7 +286,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
// handleLegacyArguments is a temporary function to parse args, and print a error and explanation when the old form is provided.
func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr string, args []string) (*argparser.ArgParseResults, errhand.VerboseError) {
func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr string, args []string) (*argparser.ArgParseResults, error) {
apr, err := ap.Parse(args)
@@ -312,12 +312,12 @@ func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr stri
if newErr != nil {
// Neither form of the arguments works. Print the usage and the error of the first parse.
return nil, errhand.VerboseErrorFromError(err)
return nil, err
}
// The legacy form worked, so print an error and exit.
err = fmt.Errorf("SQL arguments have changed. Move --data-dir, --doltcfg-dir to before the sql sub command.")
return nil, errhand.VerboseErrorFromError(err)
return nil, err
}
return apr, nil
+28
View File
@@ -27,6 +27,7 @@ import (
"github.com/dolthub/go-mysql-server/server"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/vitess/go/mysql"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
@@ -40,6 +41,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/binlogreplication"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
)
@@ -87,6 +89,32 @@ func Serve(
}
logrus.SetFormatter(LogFormat{})
sql.SystemVariables.AddSystemVariables([]sql.SystemVariable{
{
Name: dsess.DoltLogLevel,
Scope: sql.SystemVariableScope_Global,
Dynamic: true,
SetVarHintApplies: false,
Type: types.NewSystemEnumType(dsess.DoltLogLevel,
logrus.PanicLevel.String(),
logrus.FatalLevel.String(),
logrus.ErrorLevel.String(),
logrus.WarnLevel.String(),
logrus.InfoLevel.String(),
logrus.DebugLevel.String(),
logrus.TraceLevel.String(),
),
Default: logrus.GetLevel().String(),
NotifyChanged: func(scope sql.SystemVariableScope, v sql.SystemVarValue) {
if level, err := logrus.ParseLevel(v.Val.(string)); err == nil {
logrus.SetLevel(level)
} else {
logrus.Warnf("could not parse requested log level %s as a log level. dolt_log_level variable value and logging behavior will diverge.", v.Val.(string))
}
},
},
})
var mrEnv *env.MultiRepoEnv
var err error
fs := dEnv.FS
+5 -3
View File
@@ -91,13 +91,15 @@ func (cmd StashCmd) EventType() eventsapi.ClientEventType {
// Exec executes the command
func (cmd StashCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {
ap := cmd.ArgParser()
help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, stashDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if !dEnv.DoltDB.Format().UsesFlatbuffers() {
cli.PrintErrln(ErrStashNotSupportedForOldFormat.Error())
return 1
}
ap := cmd.ArgParser()
help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, stashDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if dEnv.IsLocked() {
return commands.HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
+1 -1
View File
@@ -62,7 +62,7 @@ import (
)
const (
Version = "1.0.0"
Version = "1.0.1"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
+1 -1
View File
@@ -59,7 +59,7 @@ require (
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
github.com/dolthub/go-mysql-server v0.15.1-0.20230509182237-d52f00655399
github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9
github.com/dolthub/swiss v0.1.0
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/jmoiron/sqlx v1.3.4
+2 -2
View File
@@ -166,8 +166,8 @@ github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 h1:u3PMzfF8RkKd3lB9pZ2bfn0qEG+1G
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2/go.mod h1:mIEZOHnFx4ZMQeawhw9rhsj+0zwQj7adVsnBX7t+eKY=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.15.1-0.20230509182237-d52f00655399 h1:0m0huPD01Ax41IT9OmKX1YuZucHaWaHqgVm6gN4Q/Ak=
github.com/dolthub/go-mysql-server v0.15.1-0.20230509182237-d52f00655399/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4=
github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9 h1:+0W2FuuaoOtyFkw7vtklJxbibBxRv+tuWKaRo6OyMnU=
github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0=
+13 -4
View File
@@ -21,6 +21,8 @@ import (
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
"sync"
)
type hooksDatabase struct {
@@ -58,14 +60,21 @@ func (db hooksDatabase) PostCommitHooks() []CommitHook {
func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset, onlyWS bool) {
var err error
var wg sync.WaitGroup
for _, hook := range db.postCommitHooks {
if !onlyWS || hook.ExecuteForWorkingSets() {
err = hook.Execute(ctx, ds, db)
if err != nil {
hook.HandleError(ctx, err)
}
hook := hook
wg.Add(1)
go func() {
defer wg.Done()
err = hook.Execute(ctx, ds, db)
if err != nil {
hook.HandleError(ctx, err)
}
}()
}
}
wg.Wait()
}
func (db hooksDatabase) CommitWithWorkingSet(
+3 -1
View File
@@ -245,7 +245,9 @@ func ColCollsAreEqual(cc1, cc2 *ColCollection) bool {
}
// Pks Cols need to be in the same order and equivalent.
for i := 0; i < cc1.Size(); i++ {
if !cc1.cols[i].Equals(cc2.cols[i]) {
// Test that the columns are identical, but don't worry about tags matching, since
// different tags could be generated depending on how the schemas were created.
if !cc1.cols[i].EqualsWithoutTag(cc2.cols[i]) {
return false
}
}
+10
View File
@@ -145,6 +145,16 @@ func (c Column) Equals(other Column) bool {
ColConstraintsAreEqual(c.Constraints, other.Constraints)
}
// EqualsWithoutTag tests equality between two columns, but does not check the columns' tags.
func (c Column) EqualsWithoutTag(other Column) bool {
return c.Name == other.Name &&
c.Kind == other.Kind &&
c.IsPartOfPK == other.IsPartOfPK &&
c.TypeInfo.Equals(other.TypeInfo) &&
c.Default == other.Default &&
ColConstraintsAreEqual(c.Constraints, other.Constraints)
}
// Compatible tests compatibility between two columns. Compatible columns have the same tag and can store the same
// kinds of values at the storage layer, but may have different constraints or type parameters.
func (c Column) Compatible(other Column) bool {
@@ -95,8 +95,6 @@ type procedurestore interface {
}
const (
DoltClusterRoleVariable = "dolt_cluster_role"
DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch"
// Since we fetch the keys from the other replicas were going to use a fixed string here.
DoltClusterRemoteApiAudience = "dolt-cluster-remote-api.dolthub.com"
)
@@ -285,17 +283,17 @@ func (c *Controller) refreshSystemVars() {
role, epoch := string(c.role), c.epoch
vars := []sql.SystemVariable{
{
Name: DoltClusterRoleVariable,
Name: dsess.DoltClusterRoleVariable,
Dynamic: false,
Scope: sql.SystemVariableScope_Persist,
Type: gmstypes.NewSystemStringType(DoltClusterRoleVariable),
Type: gmstypes.NewSystemStringType(dsess.DoltClusterRoleVariable),
Default: role,
},
{
Name: DoltClusterRoleEpochVariable,
Name: dsess.DoltClusterRoleEpochVariable,
Dynamic: false,
Scope: sql.SystemVariableScope_Persist,
Type: gmstypes.NewSystemIntType(DoltClusterRoleEpochVariable, 0, 9223372036854775807, false),
Type: gmstypes.NewSystemIntType(dsess.DoltClusterRoleEpochVariable, 0, 9223372036854775807, false),
Default: epoch,
},
}
@@ -304,16 +302,16 @@ func (c *Controller) refreshSystemVars() {
func (c *Controller) persistVariables() error {
toset := make(map[string]string)
toset[DoltClusterRoleVariable] = string(c.role)
toset[DoltClusterRoleEpochVariable] = strconv.Itoa(c.epoch)
toset[dsess.DoltClusterRoleVariable] = string(c.role)
toset[dsess.DoltClusterRoleEpochVariable] = strconv.Itoa(c.epoch)
return c.persistentCfg.SetStrings(toset)
}
func applyBootstrapClusterConfig(lgr *logrus.Logger, cfg Config, pCfg config.ReadWriteConfig) (Role, int, error) {
toset := make(map[string]string)
persistentRole := pCfg.GetStringOrDefault(DoltClusterRoleVariable, "")
persistentRole := pCfg.GetStringOrDefault(dsess.DoltClusterRoleVariable, "")
var roleFromPersistentConfig bool
persistentEpoch := pCfg.GetStringOrDefault(DoltClusterRoleEpochVariable, "")
persistentEpoch := pCfg.GetStringOrDefault(dsess.DoltClusterRoleEpochVariable, "")
if persistentRole == "" {
if cfg.BootstrapRole() != "" {
lgr.Tracef("cluster/controller: persisted cluster role was empty, apply bootstrap_role %s", cfg.BootstrapRole())
@@ -322,7 +320,7 @@ func applyBootstrapClusterConfig(lgr *logrus.Logger, cfg Config, pCfg config.Rea
lgr.Trace("cluster/controller: persisted cluster role was empty, bootstrap_role was empty: defaulted to primary")
persistentRole = "primary"
}
toset[DoltClusterRoleVariable] = persistentRole
toset[dsess.DoltClusterRoleVariable] = persistentRole
} else {
roleFromPersistentConfig = true
lgr.Tracef("cluster/controller: persisted cluster role is %s", persistentRole)
@@ -330,19 +328,19 @@ func applyBootstrapClusterConfig(lgr *logrus.Logger, cfg Config, pCfg config.Rea
if persistentEpoch == "" {
persistentEpoch = strconv.Itoa(cfg.BootstrapEpoch())
lgr.Tracef("cluster/controller: persisted cluster role epoch is empty, took boostrap_epoch: %s", persistentEpoch)
toset[DoltClusterRoleEpochVariable] = persistentEpoch
toset[dsess.DoltClusterRoleEpochVariable] = persistentEpoch
} else {
lgr.Tracef("cluster/controller: persisted cluster role epoch is %s", persistentEpoch)
}
if persistentRole != string(RolePrimary) && persistentRole != string(RoleStandby) {
isallowed := persistentRole == string(RoleDetectedBrokenConfig) && roleFromPersistentConfig
if !isallowed {
return "", 0, fmt.Errorf("persisted role %s.%s = %s must be \"primary\" or \"secondary\"", PersistentConfigPrefix, DoltClusterRoleVariable, persistentRole)
return "", 0, fmt.Errorf("persisted role %s.%s = %s must be \"primary\" or \"secondary\"", PersistentConfigPrefix, dsess.DoltClusterRoleVariable, persistentRole)
}
}
epochi, err := strconv.Atoi(persistentEpoch)
if err != nil {
return "", 0, fmt.Errorf("persisted role epoch %s.%s = %s must be an integer", PersistentConfigPrefix, DoltClusterRoleEpochVariable, persistentEpoch)
return "", 0, fmt.Errorf("persisted role epoch %s.%s = %s must be an integer", PersistentConfigPrefix, dsess.DoltClusterRoleEpochVariable, persistentEpoch)
}
if len(toset) > 0 {
err := pCfg.SetStrings(toset)
@@ -86,10 +86,47 @@ func doDoltGC(ctx *sql.Context, args []string) (int, error) {
return cmdFailure, err
}
} else {
// Currently, if this server is involved in cluster
// replication, a full GC is only safe to run on the primary.
// We assert that we are the primary here before we begin, and
// we assert again that we are the primary at the same epoch as
// we establish the safepoint.
origepoch := -1
if _, role, ok := sql.SystemVariables.GetGlobal(dsess.DoltClusterRoleVariable); ok {
// TODO: magic constant...
if role.(string) != "primary" {
return cmdFailure, fmt.Errorf("cannot run a full dolt_gc() while cluster replication is enabled and role is %s; must be the primary", role.(string))
}
_, epoch, ok := sql.SystemVariables.GetGlobal(dsess.DoltClusterRoleEpochVariable)
if !ok {
return cmdFailure, fmt.Errorf("internal error: cannot run a full dolt_gc(); cluster replication is enabled but could not read %s", dsess.DoltClusterRoleEpochVariable)
}
origepoch = epoch.(int)
}
// TODO: If we got a callback at the beginning and an
// (allowed-to-block) callback at the end, we could more
// gracefully tear things down.
err = ddb.GC(ctx, func() error {
if origepoch != -1 {
// Here we need to sanity check role and epoch.
if _, role, ok := sql.SystemVariables.GetGlobal(dsess.DoltClusterRoleVariable); ok {
if role.(string) != "primary" {
return fmt.Errorf("dolt_gc failed: when we began we were a primary in a cluster, but now our role is %s", role.(string))
}
_, epoch, ok := sql.SystemVariables.GetGlobal(dsess.DoltClusterRoleEpochVariable)
if !ok {
return fmt.Errorf("dolt_gc failed: when we began we were a primary in a cluster, but we can no longer read the cluster role epoch.")
}
if origepoch != epoch.(int) {
return fmt.Errorf("dolt_gc failed: when we began we were primary in the cluster at epoch %d, but now we are at epoch %d. for gc to safely finalize, our role and epoch must not change throughout the gc.", origepoch, epoch.(int))
}
} else {
return fmt.Errorf("dolt_gc failed: when we began we were a primary in a cluster, but we can no longer read the cluster role.")
}
}
killed := make(map[uint32]struct{})
processes := ctx.ProcessList.Processes()
for _, p := range processes {
@@ -50,6 +50,10 @@ const (
AwsCredsProfile = "aws_credentials_profile"
AwsCredsRegion = "aws_credentials_region"
ShowBranchDatabases = "dolt_show_branch_databases"
DoltLogLevel = "dolt_log_level"
DoltClusterRoleVariable = "dolt_cluster_role"
DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch"
)
const URLTemplateDatabasePlaceholder = "{database}"
@@ -181,7 +181,7 @@ var ShowCreateTableScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "show create table tbl",
Expected: []sql.Row{sql.Row{"tbl", "CREATE TABLE `tbl` (\n" +
Expected: []sql.Row{{"tbl", "CREATE TABLE `tbl` (\n" +
" `a` int NOT NULL,\n" +
" `b` int NOT NULL DEFAULT '42',\n" + //
" `c` int NOT NULL DEFAULT (24),\n" + // Ensure these match setup above.
@@ -215,7 +215,7 @@ var ShowCreateTableScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "show create table tbl",
Expected: []sql.Row{sql.Row{"tbl", "CREATE TABLE `tbl` (\n" +
Expected: []sql.Row{{"tbl", "CREATE TABLE `tbl` (\n" +
" `a` int NOT NULL DEFAULT (NOW()),\n" + // MySql preserves now as lower case.
" `b` int NOT NULL DEFAULT '42',\n" + //
" `c` int NOT NULL DEFAULT (24),\n" + // Ensure these match setup above.
@@ -556,6 +556,40 @@ var DoltRevisionDbScripts = []queries.ScriptTest{
},
},
},
{
Name: "database revision specs: can checkout a table",
SetUpScript: []string{
"call dolt_checkout('main')",
"create table t01 (pk int primary key, c1 int)",
"call dolt_add('t01');",
"call dolt_commit('-am', 'creating table t01 on branch1');",
"insert into t01 values (1, 1), (2, 2);",
"call dolt_branch('new-branch')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "show databases;",
Expected: []sql.Row{{"mydb"}, {"information_schema"}, {"mysql"}},
},
{
Query: "use `mydb/main`;",
Expected: []sql.Row{},
},
{
Query: "select * from dolt_status",
Expected: []sql.Row{{"t01", false, "modified"}},
},
{
Query: "call dolt_checkout('t01')",
Expected: []sql.Row{{0}},
},
{
Query: "select * from dolt_status",
// Expected: []sql.Row{},
SkipResultsCheck: true, // TODO: https://github.com/dolthub/dolt/issues/5816
},
},
},
}
// DoltScripts are script tests specific to Dolt (not the engine in general), e.g. by involving Dolt functions. Break
+20 -2
View File
@@ -55,7 +55,7 @@ func Clone(ctx context.Context, srcCS, sinkCS chunks.ChunkStore, eventCh chan<-
return fmt.Errorf("%w: sink db is not a Table File Store", ErrCloneUnsupported)
}
return clone(ctx, srcTS, sinkTS, eventCh)
return clone(ctx, srcTS, sinkTS, sinkCS, eventCh)
}
type CloneTableFileEvent int
@@ -91,7 +91,7 @@ func mapTableFiles(tblFiles []chunks.TableFile) ([]string, map[string]chunks.Tab
const concurrentTableFileDownloads = 3
func clone(ctx context.Context, srcTS, sinkTS chunks.TableFileStore, eventCh chan<- TableFileEvent) error {
func clone(ctx context.Context, srcTS, sinkTS chunks.TableFileStore, sinkCS chunks.ChunkStore, eventCh chan<- TableFileEvent) error {
root, sourceFiles, appendixFiles, err := srcTS.Sources(ctx)
if err != nil {
return err
@@ -211,6 +211,24 @@ func clone(ctx context.Context, srcTS, sinkTS chunks.TableFileStore, eventCh cha
}
sinkTS.AddTableFilesToManifest(ctx, fileIDToNumChunks)
// AddTableFilesToManifest can set the root chunk if there is a chunk
// journal which we downloaded in the clone. If that happened, the
// chunk journal is actually more accurate on what the current root is
// than the result of |Sources| up above. We choose not to touch
// anything in that case.
err = sinkCS.Rebase(ctx)
if err != nil {
return err
}
sinkRoot, err := sinkCS.Root(ctx)
if err != nil {
return err
}
if !sinkRoot.IsEmpty() {
return nil
}
return sinkTS.SetRootChunk(ctx, root, hash.Hash{})
}
+1 -17
View File
@@ -325,7 +325,7 @@ DELIM
[[ $output =~ "col1 | 6" ]] || false
}
@test "column_tags: create table on two separate branches, merge them together by updating tags" {
@test "column_tags: create table on two separate branches, merge them together even though they have different tags" {
skip_nbf_not_dolt
dolt branch other
@@ -344,23 +344,7 @@ DELIM
dolt sql -q "ALTER TABLE target DROP COLUMN badCol;"
dolt commit -Am "fixup"
run dolt schema tags
[[ $output =~ "| target | col1 | 14690 |" ]] || false
dolt checkout main
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "| target | col1 | 14649 |" ]] || false
run dolt merge other
[ $status -ne 0 ]
[[ $output =~ "table with same name 'target' added in 2 commits can't be merged" ]] || false
dolt reset --hard
dolt schema update-tag target col1 14690
dolt commit -am "update tag of col1 of target"
run dolt merge other -m "merge other into main"
[ $status -eq 0 ]
[[ $output =~ "1 tables changed, 1 rows added(+)" ]] || false
+25 -5
View File
@@ -77,17 +77,16 @@ teardown() {
@test "no-repo: check all commands for valid help text" {
# pipe all commands to a file
# cut -s suppresses the line if it doesn't contain the delim
dolt | cut -f 1 -d " - " -s | sed "s/ //g" > all.txt
dolt | awk -F ' - ' '/ - / {print $1}' > all_raw.txt
sed "s/ //g" all_raw.txt > all.txt
# filter out commands without "-h"
cat all.txt \
| sed "s/creds//g" \
| sed "s/version//g" \
| sed "s/schema//g" \
| sed "s/table//g" \
| sed "s/conflicts//g" \
> commands.txt
touch subcommands.txt
cat commands.txt | while IFS= read -r cmd;
do
if [ -z "$cmd" ]; then
@@ -96,9 +95,30 @@ teardown() {
run dolt "$cmd" -h
[ "$status" -eq 0 ]
if [[ "$output" =~ "Valid commands for dolt $cmd are" ]]; then
echo "$output" | awk -F ' - ' "/ - / {print \"$cmd\", \$1}" >> subcommands.txt
continue
fi
[[ "$output" =~ "NAME" ]] || false
[[ "$output" =~ "DESCRIPTION" ]] || false
done
cat subcommands.txt | while IFS= read -r cmd;
do
if [ -z "$cmd" ]; then
continue
fi
run dolt $cmd -h
[ "$status" -eq 0 ]
[[ "$output" =~ "NAME" ]] || false
[[ "$output" =~ "DESCRIPTION" ]] || false
done
}
@test "no-repo: testing dolt version output" {
@@ -1022,4 +1022,79 @@ tests:
- on: server1
queries:
- exec: 'use repo1'
- exec: 'call dolt_checkout("new_branch_name")'
- exec: 'call dolt_checkout("new_branch_name")'
- name: call dolt gc
multi_repos:
- name: server1
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:3852/{database}
bootstrap_role: primary
bootstrap_epoch: 1
remotesapi:
port: 3851
server:
args: ["--config", "server.yaml"]
port: 3309
- name: server2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3310
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:3851/{database}
bootstrap_role: standby
bootstrap_epoch: 1
remotesapi:
port: 3852
server:
args: ["--config", "server.yaml"]
port: 3310
connections:
- on: server1
queries:
- exec: 'create database repo1'
- exec: 'use repo1'
- exec: 'create table vals (id int primary key, val int)'
- exec: 'insert into vals values (1,1)'
- exec: 'insert into vals values (2,2)'
- exec: 'insert into vals values (3,3)'
- exec: 'insert into vals values (4,4)'
- exec: 'call dolt_gc()'
- exec: 'select * from vals'
error_match: "this connection can no longer be used"
- on: server1
queries:
- query: "select `database`, standby_remote, role, epoch, replication_lag_millis, current_error from dolt_cluster.dolt_cluster_status order by `database` asc"
result:
columns: ["database","standby_remote","role","epoch","replication_lag_millis","current_error"]
rows:
- ["repo1","standby","primary","1","0","NULL"]
retry_attempts: 100
- on: server2
queries:
- exec: 'use repo1'
- query: "select * from vals order by id asc"
result:
columns: ["id","val"]
rows:
- [1,1]
- [2,2]
- [3,3]
- [4,4]
- exec: 'call dolt_gc()'
error_match: "must be the primary"
- exec: 'call dolt_gc("--shallow")'
@@ -275,3 +275,34 @@ tests:
result:
columns: ["@@GLOBAL.max_connections"]
rows: [["555"]]
- name: "@@global.dolt_log_level behavior"
repos:
- name: repo1
server:
args: ["-l", "warning"]
log_matches:
- "Starting query"
connections:
- on: repo1
queries:
- query: "select @@GLOBAL.dolt_log_level"
result:
columns: ["@@GLOBAL.dolt_log_level"]
rows: [["warning"]]
- exec: "set @@GLOBAL.dolt_log_level = 'trace'"
- query: "select 2+2 from dual"
result:
columns: ["2+2"]
rows: [["4"]]
- query: "select @@GLOBAL.dolt_log_level"
result:
columns: ["@@GLOBAL.dolt_log_level"]
rows: [["trace"]]
restart_server:
args: ["-l", "info"]
- on: repo1
queries:
- query: "select @@GLOBAL.dolt_log_level"
result:
columns: ["@@GLOBAL.dolt_log_level"]
rows: [["info"]]