diff --git a/.github/scripts/import-benchmarking/run-benchmarks.sh b/.github/scripts/import-benchmarking/run-benchmarks.sh index 110011f036..34521bb52d 100755 --- a/.github/scripts/import-benchmarking/run-benchmarks.sh +++ b/.github/scripts/import-benchmarking/run-benchmarks.sh @@ -28,7 +28,7 @@ if [ -z "$MODE" ]; then fi nomsFormat="ldnbf" -if [ "$NOMS_BIN_FORMAT" == "__DOLT__"]; then +if [ "$NOMS_BIN_FORMAT" == "__DOLT__" ]; then nomsFormat="doltnbf" fi diff --git a/.github/scripts/sql-correctness/run-correctness.sh b/.github/scripts/sql-correctness/run-correctness.sh index 5b7d679342..4d4f02c3e9 100755 --- a/.github/scripts/sql-correctness/run-correctness.sh +++ b/.github/scripts/sql-correctness/run-correctness.sh @@ -38,6 +38,11 @@ if [ -z "$MODE" ]; then exit 1 fi +nomsFormat="ldnbf" +if [ "$NOMS_BIN_FORMAT" == "__DOLT__" ]; then + nomsFormat="doltnbf" +fi + # use first 8 characters of TO_VERSION to differentiate # jobs short=${TO_VERSION:0:8} @@ -49,7 +54,7 @@ sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s timesuffix=`date +%s%N` -jobname="$actorShort-$timesuffix" +jobname="$actorShort-$nomsFormat-$timesuffix" timeprefix=$(date +%Y/%m/%d) diff --git a/.github/workflows/ci-bats-unix.yaml b/.github/workflows/ci-bats-unix.yaml index b67b9b3af8..1a4f8dc929 100644 --- a/.github/workflows/ci-bats-unix.yaml +++ b/.github/workflows/ci-bats-unix.yaml @@ -22,10 +22,10 @@ jobs: fail-fast: true matrix: os: [ ubuntu-22.04, macos-latest ] - dolt_fmt: [ "__DOLT__", "__DOLT_DEV__", "__LD_1__" ] + dolt_fmt: [ "__DOLT__", "__LD_1__" ] exclude: - os: "macos-latest" - dolt_fmt: ["__DOLT_DEV__", "__LD_1__" ] + dolt_fmt: "__LD_1__" env: use_credentials: ${{ secrets.AWS_SECRET_ACCESS_KEY != '' && secrets.AWS_ACCESS_KEY_ID != '' }} steps: diff --git a/go/cmd/dolt/commands/diff.go b/go/cmd/dolt/commands/diff.go index 9a677158fb..5c2bf5a482 100644 --- a/go/cmd/dolt/commands/diff.go +++ b/go/cmd/dolt/commands/diff.go @@ -73,10 +73,10 @@ Show changes between the working and staged tables, changes between the working This form is to view the changes you made relative to the staging area for the next commit. In other words, the differences are what you could tell Dolt to further add but you still haven't. You can stage these changes by using dolt add. {{.EmphasisLeft}}dolt diff [--options] [--merge-base] [...]{{.EmphasisRight}} - This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, instead of using {{.LessThan}}commit{{.GreaterThan}}, use the merge base of {{.LessThan}}commit{{.GreaterThan}} and HEAD. {{.EmphasisLeft}}dolt diff --merge-base A{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A HEAD){{.EmphasisRight}}. + This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, instead of using {{.LessThan}}commit{{.GreaterThan}}, use the merge base of {{.LessThan}}commit{{.GreaterThan}} and HEAD. {{.EmphasisLeft}}dolt diff --merge-base A{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A HEAD){{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...HEAD{{.EmphasisRight}}. {{.EmphasisLeft}}dolt diff [--options] [--merge-base] [...]{{.EmphasisRight}} - This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight} is given, use the merge base of the two commits for the "before" side. {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}}. + This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight} is given, use the merge base of the two commits for the "before" side. {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}}. {{.EmphasisLeft}}dolt diff [--options] .. [...]{{.EmphasisRight}} This is synonymous to the above form (without the ..) to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. @@ -139,7 +139,7 @@ func (cmd DiffCmd) ArgParser() *argparser.ArgParser { ap.SupportsInt(limitParam, "", "record_count", "limits to the first N diffs.") ap.SupportsFlag(CachedFlag, "c", "Show only the unstaged data changes.") ap.SupportsFlag(SkinnyFlag, "sk", "Shows only primary key columns and any columns with data changes.") - ap.SupportsFlag(MergeBase, "", "Uses merge base of {{.LessThan}}from_commit{{.GreaterThan}} and {{.LessThan}}to_commit{{.GreaterThan}} (or HEAD if not supplied) as {{.LessThan}}from_commit{{.GreaterThan}}") + ap.SupportsFlag(MergeBase, "", "Uses merge base of the first commit and second commit (or HEAD if not supplied) as the first commit") return ap } @@ -393,7 +393,7 @@ func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv, if len(refs[1]) > 0 { if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok { - return fmt.Errorf("to ref in two dot diff must be valid ref: %s", refs[1]) + return fmt.Errorf("to ref in three dot diff must be valid ref: %s", refs[1]) } dArgs.toRoot = toRoot dArgs.toRef = refs[1] diff --git a/go/libraries/doltcore/doltdb/foreign_key_coll.go b/go/libraries/doltcore/doltdb/foreign_key_coll.go index daff2dae1e..7eadf3b7e3 100644 --- a/go/libraries/doltcore/doltdb/foreign_key_coll.go +++ b/go/libraries/doltcore/doltdb/foreign_key_coll.go @@ -183,7 +183,7 @@ func (fk ForeignKey) ValidateReferencedTableSchema(sch schema.Schema) error { fk.Name, fk.ReferencedTableName) } } - if !sch.Indexes().Contains(fk.ReferencedTableIndex) { + if (fk.ReferencedTableIndex != "" && !sch.Indexes().Contains(fk.ReferencedTableIndex)) || (fk.ReferencedTableIndex == "" && sch.GetPKCols().Size() < len(fk.ReferencedTableColumns)) { return fmt.Errorf("foreign key `%s` has entered an invalid state, referenced table `%s` is missing the index `%s`", fk.Name, fk.ReferencedTableName, fk.ReferencedTableIndex) } @@ -203,7 +203,7 @@ func (fk ForeignKey) ValidateTableSchema(sch schema.Schema) error { return fmt.Errorf("foreign key `%s` has entered an invalid state, table `%s` has unexpected schema", fk.Name, fk.TableName) } } - if !sch.Indexes().Contains(fk.TableIndex) { + if (fk.TableIndex != "" && !sch.Indexes().Contains(fk.TableIndex)) || (fk.TableIndex == "" && sch.GetPKCols().Size() < len(fk.TableColumns)) { return fmt.Errorf("foreign key `%s` has entered an invalid state, table `%s` is missing the index `%s`", fk.Name, fk.TableName, fk.TableIndex) } diff --git a/go/libraries/doltcore/env/actions/infer_schema.go b/go/libraries/doltcore/env/actions/infer_schema.go index fc4af28ad3..74578ae5b9 100644 --- a/go/libraries/doltcore/env/actions/infer_schema.go +++ b/go/libraries/doltcore/env/actions/infer_schema.go @@ -16,6 +16,7 @@ package actions import ( "context" + "errors" "io" "math" "strconv" @@ -61,7 +62,6 @@ func InferColumnTypesFromTableReader(ctx context.Context, rd table.ReadCloser, a var curr, prev row.Row i := newInferrer(rd.GetSchema(), args) - OUTER: for j := 0; true; j++ { var err error @@ -130,10 +130,8 @@ func (inf *inferrer) inferColumnTypes() (*schema.ColCollection, error) { col.TypeInfo = inferredTypes[tag] col.Tag = schema.ReservedTagMin + tag - col.Constraints = []schema.ColConstraint{schema.NotNullConstraint{}} - if inf.nullable.Contains(tag) { - col.Constraints = []schema.ColConstraint(nil) - } + // for large imports, it is possible to miss all the null values, so we cannot accurately add not null constraint + col.Constraints = []schema.ColConstraint(nil) cols = append(cols, col) return false, nil @@ -218,32 +216,27 @@ func leastPermissiveNumericType(strVal string, floatThreshold float64) (ti typei return ti } - if strings.Contains(strVal, "-") { - i, err := strconv.ParseInt(strVal, 10, 64) - if err != nil { - return typeinfo.UnknownType - } - if i >= math.MinInt32 && i <= math.MaxInt32 { - return typeinfo.Int32Type - } else { - return typeinfo.Int64Type - } + // always parse as signed int + i, err := strconv.ParseInt(strVal, 10, 64) + + // use string for out of range + if errors.Is(err, strconv.ErrRange) { + return typeinfo.StringDefaultType + } + + if err != nil { + return typeinfo.UnknownType + } + + // handle leading zero case + if len(strVal) > 1 && strVal[0] == '0' { + return typeinfo.StringDefaultType + } + + if i >= math.MinInt32 && i <= math.MaxInt32 { + return typeinfo.Int32Type } else { - ui, err := strconv.ParseUint(strVal, 10, 64) - if err != nil { - return typeinfo.UnknownType - } - - // handle leading zero case - if len(strVal) > 1 && strVal[0] == '0' { - return typeinfo.StringDefaultType - } - - if ui <= math.MaxUint32 { - return typeinfo.Uint32Type - } else { - return typeinfo.Uint64Type - } + return typeinfo.Int64Type } } @@ -286,14 +279,13 @@ func chronoTypes() []typeinfo.TypeInfo { func numericTypes() []typeinfo.TypeInfo { // prefer: // ints over floats - // unsigned over signed // smaller over larger return []typeinfo.TypeInfo{ //typeinfo.Uint8Type, //typeinfo.Uint16Type, //typeinfo.Uint24Type, - typeinfo.Uint32Type, - typeinfo.Uint64Type, + //typeinfo.Uint32Type, + //typeinfo.Uint64Type, //typeinfo.Int8Type, //typeinfo.Int16Type, @@ -398,12 +390,6 @@ func findCommonNumericType(nums typeInfoSet) typeinfo.TypeInfo { typeinfo.Int24Type, typeinfo.Int16Type, typeinfo.Int8Type, - - typeinfo.Uint64Type, - typeinfo.Uint32Type, - typeinfo.Uint24Type, - typeinfo.Uint16Type, - typeinfo.Uint8Type, } for _, numType := range mostToLeast { if setHasType(nums, numType) { diff --git a/go/libraries/doltcore/env/actions/infer_schema_test.go b/go/libraries/doltcore/env/actions/infer_schema_test.go index 0f15d995d3..5e78a4e6a9 100644 --- a/go/libraries/doltcore/env/actions/infer_schema_test.go +++ b/go/libraries/doltcore/env/actions/infer_schema_test.go @@ -49,14 +49,14 @@ func TestLeastPermissiveType(t *testing.T) { {"lower bool", "true", 0.0, typeinfo.BoolType}, {"upper bool", "FALSE", 0.0, typeinfo.BoolType}, {"yes", "yes", 0.0, typeinfo.StringDefaultType}, - {"one", "1", 0.0, typeinfo.Uint32Type}, + {"one", "1", 0.0, typeinfo.Int32Type}, {"negative one", "-1", 0.0, typeinfo.Int32Type}, {"negative one point 0", "-1.0", 0.0, typeinfo.Float32Type}, {"negative one point 0 with FT of 0.1", "-1.0", 0.1, typeinfo.Int32Type}, {"negative one point one with FT of 0.1", "-1.1", 0.1, typeinfo.Float32Type}, {"negative one point 999 with FT of 1.0", "-1.999", 1.0, typeinfo.Int32Type}, {"zero point zero zero zero zero", "0.0000", 0.0, typeinfo.Float32Type}, - {"max int", strconv.FormatUint(math.MaxInt64, 10), 0.0, typeinfo.Uint64Type}, + {"max int", strconv.FormatUint(math.MaxInt64, 10), 0.0, typeinfo.Int64Type}, {"bigger than max int", strconv.FormatUint(math.MaxUint64, 10) + "0", 0.0, typeinfo.StringDefaultType}, } @@ -75,7 +75,7 @@ func TestLeastPermissiveNumericType(t *testing.T) { floatThreshold float64 expType typeinfo.TypeInfo }{ - {"zero", "0", 0.0, typeinfo.Uint32Type}, + {"zero", "0", 0.0, typeinfo.Int32Type}, {"zero float", "0.0", 0.0, typeinfo.Float32Type}, {"zero float with floatThreshold of 0.1", "0.0", 0.1, typeinfo.Int32Type}, {"negative float", "-1.3451234", 0.0, typeinfo.Float32Type}, @@ -85,8 +85,8 @@ func TestLeastPermissiveNumericType(t *testing.T) { {"all zeroes", "0000", 0.0, typeinfo.StringDefaultType}, {"leading zeroes", "01", 0.0, typeinfo.StringDefaultType}, {"negative int", "-1234", 0.0, typeinfo.Int32Type}, - {"fits in uint64 but not int64", strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.Uint64Type}, - {"negative less than math.MinInt64", "-" + strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.UnknownType}, + {"fits in uint64 but not int64", strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.StringDefaultType}, + {"negative less than math.MinInt64", "-" + strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.StringDefaultType}, {"math.MinInt64", strconv.FormatInt(math.MinInt64, 10), 0.0, typeinfo.Int64Type}, } @@ -142,14 +142,6 @@ func testFindCommonType(t *testing.T) { }, expType: typeinfo.Int64Type, }, - { - name: "all unsigned ints", - inferSet: typeInfoSet{ - typeinfo.Uint32Type: {}, - typeinfo.Uint64Type: {}, - }, - expType: typeinfo.Uint64Type, - }, { name: "all floats", inferSet: typeInfoSet{ @@ -159,35 +151,31 @@ func testFindCommonType(t *testing.T) { expType: typeinfo.Float64Type, }, { - name: "32 bit ints and uints", + name: "32 bit ints", inferSet: typeInfoSet{ - typeinfo.Int32Type: {}, - typeinfo.Uint32Type: {}, + typeinfo.Int32Type: {}, }, expType: typeinfo.Int32Type, }, { - name: "64 bit ints and uints", + name: "64 bit ints", inferSet: typeInfoSet{ - typeinfo.Int64Type: {}, - typeinfo.Uint64Type: {}, + typeinfo.Int64Type: {}, }, expType: typeinfo.Int64Type, }, { - name: "32 bit ints, uints, and floats", + name: "32 bit ints and floats", inferSet: typeInfoSet{ typeinfo.Int32Type: {}, - typeinfo.Uint32Type: {}, typeinfo.Float32Type: {}, }, expType: typeinfo.Float32Type, }, { - name: "64 bit ints, uints, and floats", + name: "64 bit ints and floats", inferSet: typeInfoSet{ typeinfo.Int64Type: {}, - typeinfo.Uint64Type: {}, typeinfo.Float64Type: {}, }, expType: typeinfo.Float64Type, @@ -228,11 +216,6 @@ func testFindCommonType(t *testing.T) { func testFindCommonTypeFromSingleType(t *testing.T) { allTypes := []typeinfo.TypeInfo{ - typeinfo.Uint8Type, - typeinfo.Uint16Type, - typeinfo.Uint24Type, - typeinfo.Uint32Type, - typeinfo.Uint64Type, typeinfo.Int8Type, typeinfo.Int16Type, typeinfo.Int24Type, @@ -388,7 +371,7 @@ func TestInferSchema(t *testing.T) { }, map[string]typeinfo.TypeInfo{ "int": typeinfo.Int32Type, - "uint": typeinfo.Uint64Type, + "uint": typeinfo.StringDefaultType, "uuid": typeinfo.UuidType, "float": typeinfo.Float32Type, "bool": typeinfo.BoolType, @@ -404,7 +387,7 @@ func TestInferSchema(t *testing.T) { floatThreshold: 0, }, map[string]typeinfo.TypeInfo{ - "mix": typeinfo.Uint64Type, + "mix": typeinfo.StringDefaultType, "uuid": typeinfo.UuidType, }, nil, @@ -500,7 +483,7 @@ func TestInferSchema(t *testing.T) { err = allCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) { idx := schema.IndexOfConstraint(col.Constraints, schema.NotNullConstraintType) - assert.True(t, idx == -1 == test.nullableCols.Contains(col.Name), "%s unexpected nullability", col.Name) + assert.True(t, idx == -1, "%s unexpected not null constraint", col.Name) return false, nil }) require.NoError(t, err) diff --git a/go/libraries/doltcore/merge/violations_fk.go b/go/libraries/doltcore/merge/violations_fk.go index 91a28edb97..74692c7de1 100644 --- a/go/libraries/doltcore/merge/violations_fk.go +++ b/go/libraries/doltcore/merge/violations_fk.go @@ -244,7 +244,7 @@ func nomsParentFkConstraintViolations( continue } - postParentIndexPartialKey, err := row.ReduceToIndexPartialKey(postParent.Index, postParentRow) + postParentIndexPartialKey, err := row.ReduceToIndexPartialKey(foreignKey.TableColumns, postParent.Index, postParentRow) if err != nil { return nil, false, err } @@ -362,8 +362,14 @@ func nomsChildFkConstraintViolations( preChildRowData types.Map, ) (*doltdb.Table, bool, error) { foundViolations := false - postParentIndexTags := postParent.Index.IndexedColumnTags() - postChildIndexTags := postChild.Index.IndexedColumnTags() + var postParentIndexTags, postChildIndexTags []uint64 + if postParent.Index.Name() == "" { + postParentIndexTags = foreignKey.ReferencedTableColumns + postChildIndexTags = foreignKey.TableColumns + } else { + postParentIndexTags = postParent.Index.IndexedColumnTags() + postChildIndexTags = postChild.Index.IndexedColumnTags() + } postChildCVMap, err := postChild.Table.GetConstraintViolations(ctx) if err != nil { return nil, false, err @@ -411,7 +417,7 @@ func nomsChildFkConstraintViolations( continue } - postChildIndexPartialKey, err := row.ReduceToIndexPartialKey(postChild.Index, postChildRow) + postChildIndexPartialKey, err := row.ReduceToIndexPartialKey(postChildIndexTags, postChild.Index, postChildRow) if err != nil { return nil, false, err } @@ -496,6 +502,28 @@ func newConstraintViolationsLoadedTable(ctx context.Context, tblName, idxName st if err != nil { return nil, false, err } + + // Create Primary Key Index + if idxName == "" { + pkCols := sch.GetPKCols() + pkIdxColl := schema.NewIndexCollection(pkCols, pkCols) + pkIdxProps := schema.IndexProperties{ + IsUnique: true, + IsUserDefined: false, + Comment: "", + } + pkIdx := schema.NewIndex("", pkCols.SortedTags, pkCols.SortedTags, pkIdxColl, pkIdxProps) + return &constraintViolationsLoadedTable{ + TableName: trueTblName, + Table: tbl, + Schema: sch, + RowData: rowData, + Index: pkIdx, + IndexSchema: pkIdx.Schema(), + IndexData: rowData, + }, true, nil + } + idx, ok := sch.Indexes().GetByNameCaseInsensitive(idxName) if !ok { return &constraintViolationsLoadedTable{ diff --git a/go/libraries/doltcore/merge/violations_fk_prolly.go b/go/libraries/doltcore/merge/violations_fk_prolly.go index 2dc29aea68..e213c13ce0 100644 --- a/go/libraries/doltcore/merge/violations_fk_prolly.go +++ b/go/libraries/doltcore/merge/violations_fk_prolly.go @@ -64,7 +64,7 @@ func prollyParentFkConstraintViolations( err = prolly.DiffMaps(ctx, preParentRowData, postParentRowData, func(ctx context.Context, diff tree.Diff) error { switch diff.Type { case tree.RemovedDiff, tree.ModifiedDiff: - partialKey, hadNulls := makePartialKey(partialKB, postParent.Index, postParent.Schema, val.Tuple(diff.Key), val.Tuple(diff.From), preParentRowData.Pool()) + partialKey, hadNulls := makePartialKey(partialKB, foreignKey.ReferencedTableColumns, postParent.Index, postParent.Schema, val.Tuple(diff.Key), val.Tuple(diff.From), preParentRowData.Pool()) if hadNulls { // row had some nulls previously, so it couldn't have been a parent return nil @@ -147,7 +147,7 @@ func prollyChildFkConstraintViolations( switch diff.Type { case tree.AddedDiff, tree.ModifiedDiff: k, v := val.Tuple(diff.Key), val.Tuple(diff.To) - partialKey, hasNulls := makePartialKey(partialKB, postChild.Index, postChild.Schema, k, v, preChildRowData.Pool()) + partialKey, hasNulls := makePartialKey(partialKB, foreignKey.TableColumns, postChild.Index, postChild.Schema, k, v, preChildRowData.Pool()) if hasNulls { return nil } @@ -289,8 +289,11 @@ func createCVsForPartialKeyMatches( return createdViolation, nil } -func makePartialKey(kb *val.TupleBuilder, idxSch schema.Index, tblSch schema.Schema, k, v val.Tuple, pool pool.BuffPool) (val.Tuple, bool) { - for i, tag := range idxSch.IndexedColumnTags() { +func makePartialKey(kb *val.TupleBuilder, tags []uint64, idxSch schema.Index, tblSch schema.Schema, k, v val.Tuple, pool pool.BuffPool) (val.Tuple, bool) { + if idxSch.Name() != "" { + tags = idxSch.IndexedColumnTags() + } + for i, tag := range tags { if j, ok := tblSch.GetPKCols().TagToIdx[tag]; ok { if k.FieldIsNull(j) { return nil, true diff --git a/go/libraries/doltcore/migrate/transform.go b/go/libraries/doltcore/migrate/transform.go index 902c540e4d..4a37d7a99d 100644 --- a/go/libraries/doltcore/migrate/transform.go +++ b/go/libraries/doltcore/migrate/transform.go @@ -27,6 +27,7 @@ import ( "github.com/dolthub/dolt/go/libraries/doltcore/ref" "github.com/dolthub/dolt/go/libraries/doltcore/schema" "github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo" + "github.com/dolthub/dolt/go/libraries/utils/set" "github.com/dolthub/dolt/go/store/chunks" "github.com/dolthub/dolt/go/store/datas" "github.com/dolthub/dolt/go/store/hash" @@ -277,6 +278,16 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root return nil, err } + removedTables, err := getRemovedTableNames(ctx, oldParent, oldRoot) + if err != nil { + return nil, err + } + + migrated, err = migrated.RemoveTables(ctx, true, false, removedTables...) + if err != nil { + return nil, err + } + err = oldRoot.IterTables(ctx, func(name string, oldTbl *doltdb.Table, sch schema.Schema) (bool, error) { ok, err := oldTbl.HasConflicts(ctx) if err != nil { @@ -345,6 +356,21 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root return migrated, nil } +// renames also get returned here +func getRemovedTableNames(ctx context.Context, prev, curr *doltdb.RootValue) ([]string, error) { + prevNames, err := prev.GetTableNames(ctx) + if err != nil { + return nil, err + } + tblNameSet := set.NewStrSet(prevNames) + currNames, err := curr.GetTableNames(ctx) + if err != nil { + return nil, err + } + tblNameSet.Remove(currNames...) + return tblNameSet.AsSlice(), nil +} + func migrateTable(ctx context.Context, newSch schema.Schema, oldParentTbl, oldTbl, newParentTbl *doltdb.Table) (*doltdb.Table, error) { idx, err := oldParentTbl.GetRowData(ctx) if err != nil { diff --git a/go/libraries/doltcore/row/row.go b/go/libraries/doltcore/row/row.go index 09bb8b4c6d..28766fc3de 100644 --- a/go/libraries/doltcore/row/row.go +++ b/go/libraries/doltcore/row/row.go @@ -163,15 +163,19 @@ func ReduceToIndexKeysFromTagMap(nbf *types.NomsBinFormat, idx schema.Index, tag } // ReduceToIndexPartialKey creates an index record from a primary storage record. -func ReduceToIndexPartialKey(idx schema.Index, r Row) (types.Tuple, error) { +func ReduceToIndexPartialKey(tags []uint64, idx schema.Index, r Row) (types.Tuple, error) { var vals []types.Value - for _, tag := range idx.IndexedColumnTags() { + if idx.Name() != "" { + tags = idx.IndexedColumnTags() + } + for _, tag := range tags { val, ok := r.GetColVal(tag) if !ok { val = types.NullValue } vals = append(vals, types.Uint(tag), val) } + return types.NewTuple(r.Format(), vals...) } diff --git a/go/libraries/doltcore/schema/index.go b/go/libraries/doltcore/schema/index.go index 2a46a68fa7..68cf26fa4d 100644 --- a/go/libraries/doltcore/schema/index.go +++ b/go/libraries/doltcore/schema/index.go @@ -68,12 +68,17 @@ type indexImpl struct { comment string } -func NewIndex(name string, tags, allTags []uint64, indexColl *indexCollectionImpl, props IndexProperties) Index { +func NewIndex(name string, tags, allTags []uint64, indexColl IndexCollection, props IndexProperties) Index { + var indexCollImpl *indexCollectionImpl + if indexColl != nil { + indexCollImpl = indexColl.(*indexCollectionImpl) + } + return &indexImpl{ name: name, tags: tags, allTags: allTags, - indexColl: indexColl, + indexColl: indexCollImpl, isUnique: props.IsUnique, isUserDefined: props.IsUserDefined, comment: props.Comment, diff --git a/go/libraries/doltcore/sqle/alterschema_test.go b/go/libraries/doltcore/sqle/alterschema_test.go index df7a284c7e..a11ebaa284 100644 --- a/go/libraries/doltcore/sqle/alterschema_test.go +++ b/go/libraries/doltcore/sqle/alterschema_test.go @@ -462,7 +462,9 @@ func TestDropPks(t *testing.T) { fk, ok := foreignKeyCollection.GetByNameCaseInsensitive(childFkName) assert.True(t, ok) assert.Equal(t, childName, fk.TableName) - assert.Equal(t, tt.fkIdxName, fk.ReferencedTableIndex) + if tt.fkIdxName != "" && fk.ReferencedTableIndex != "" { + assert.Equal(t, tt.fkIdxName, fk.ReferencedTableIndex) + } parent, ok, err := root.GetTable(ctx, parentName) assert.NoError(t, err) diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_transaction_queries.go b/go/libraries/doltcore/sqle/enginetest/dolt_transaction_queries.go index be7c54758c..7226f22e6e 100755 --- a/go/libraries/doltcore/sqle/enginetest/dolt_transaction_queries.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_transaction_queries.go @@ -1482,7 +1482,7 @@ var DoltConstraintViolationTransactionTests = []queries.TransactionTest{ }, { Query: "/* client b */ INSERT INTO child VALUES (1, 1);", - ExpectedErrStr: "cannot add or update a child row - Foreign key violation on fk: `nk01br56`, table: `child`, referenced table: `parent`, key: `[1]`", + ExpectedErrStr: "cannot add or update a child row - Foreign key violation on fk: `0050p5ek`, table: `child`, referenced table: `parent`, key: `[1]`", }, }, }, diff --git a/go/libraries/doltcore/sqle/tables.go b/go/libraries/doltcore/sqle/tables.go index fa78b1d834..d67aec68b4 100644 --- a/go/libraries/doltcore/sqle/tables.go +++ b/go/libraries/doltcore/sqle/tables.go @@ -1971,76 +1971,30 @@ func (t *AlterableDoltTable) AddForeignKey(ctx *sql.Context, sqlFk sql.ForeignKe refColTags[i] = refCol.Tag } + var tableIndexName, refTableIndexName string tableIndex, ok, err := findIndexWithPrefix(t.sch, sqlFk.Columns) if err != nil { return err } - if !ok { - // The engine matched on a primary key, and Dolt does not yet support using the primary key within the - // schema.Index interface (which is used internally to represent indexes across the codebase). In the - // meantime, we must generate a duplicate key over the primary key. - //TODO: use the primary key as-is - idxReturn, err := creation.CreateIndex(ctx, tbl, "", sqlFk.Columns, false, false, "", editor.Options{ - ForeignKeyChecksDisabled: true, - Deaf: t.opts.Deaf, - Tempdir: t.opts.Tempdir, - }) - if err != nil { - return err - } - tableIndex = idxReturn.NewIndex - tbl = idxReturn.NewTable - root, err = root.PutTable(ctx, t.tableName, idxReturn.NewTable) - if sqlFk.IsSelfReferential() { - refTbl = idxReturn.NewTable - } + // Use secondary index if found; otherwise it will use empty string, indicating primary key + if ok { + tableIndexName = tableIndex.Name() } - refTableIndex, ok, err := findIndexWithPrefix(refSch, sqlFk.ParentColumns) if err != nil { return err } - if !ok { - // The engine matched on a primary key, and Dolt does not yet support using the primary key within the - // schema.Index interface (which is used internally to represent indexes across the codebase). In the - // meantime, we must generate a duplicate key over the primary key. - //TODO: use the primary key as-is - var refPkTags []uint64 - for _, i := range refSch.GetPkOrdinals() { - refPkTags = append(refPkTags, refSch.GetAllCols().GetByIndex(i).Tag) - } - - var colNames []string - for _, t := range refColTags { - c, _ := refSch.GetAllCols().GetByTag(t) - colNames = append(colNames, c.Name) - } - - // Our duplicate index is only unique if it's the entire primary key (which is by definition unique) - unique := len(refPkTags) == len(refColTags) - idxReturn, err := creation.CreateIndex(ctx, refTbl, "", colNames, unique, false, "", editor.Options{ - ForeignKeyChecksDisabled: true, - Deaf: t.opts.Deaf, - Tempdir: t.opts.Tempdir, - }) - if err != nil { - return err - } - refTbl = idxReturn.NewTable - refTableIndex = idxReturn.NewIndex - root, err = root.PutTable(ctx, sqlFk.ParentTable, idxReturn.NewTable) - if err != nil { - return err - } + // Use secondary index if found; otherwise it will use empty string, indicating primary key + if ok { + refTableIndexName = refTableIndex.Name() } - doltFk = doltdb.ForeignKey{ Name: sqlFk.Name, TableName: sqlFk.Table, - TableIndex: tableIndex.Name(), + TableIndex: tableIndexName, TableColumns: colTags, ReferencedTableName: sqlFk.ParentTable, - ReferencedTableIndex: refTableIndex.Name(), + ReferencedTableIndex: refTableIndexName, ReferencedTableColumns: refColTags, OnUpdate: onUpdateRefAction, OnDelete: onDeleteRefAction, diff --git a/go/libraries/doltcore/table/editor/bulk_import_tea.go b/go/libraries/doltcore/table/editor/bulk_import_tea.go index 60c4fcc8ac..69c81287b9 100644 --- a/go/libraries/doltcore/table/editor/bulk_import_tea.go +++ b/go/libraries/doltcore/table/editor/bulk_import_tea.go @@ -105,6 +105,44 @@ func (tea *BulkImportTEA) Get(ctx context.Context, keyHash hash.Hash, key types. return &doltKVP{k: key, v: v}, true, nil } +func (tea *BulkImportTEA) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) { + var err error + var matches []hashedTuple + var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{ + {Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}}) + defer mapIter.Close(ctx) + var r row.Row + for r, err = mapIter.ReadRow(ctx); err == nil; r, err = mapIter.ReadRow(ctx) { + tplKeyVal, err := r.NomsMapKey(idxSch).Value(ctx) + if err != nil { + return nil, err + } + key := tplKeyVal.(types.Tuple) + tplValVal, err := r.NomsMapValue(idxSch).Value(ctx) + if err != nil { + return nil, err + } + val := tplValVal.(types.Tuple) + keyHash, err := key.Hash(key.Format()) + if err != nil { + return nil, err + } + matches = append(matches, hashedTuple{key, val, keyHash}) + } + + if err != io.EOF { + return nil, err + } + + for i := len(matches) - 1; i >= 0; i-- { + if _, ok := tea.deletes[matches[i].hash]; ok { + matches[i] = matches[len(matches)-1] + matches = matches[:len(matches)-1] + } + } + return matches, nil +} + // Commit is the default behavior and does nothing func (tea *BulkImportTEA) Commit(ctx context.Context, nbf *types.NomsBinFormat) error { return nil diff --git a/go/libraries/doltcore/table/editor/pk_table_editor.go b/go/libraries/doltcore/table/editor/pk_table_editor.go index d39270544c..e8ee9999f9 100644 --- a/go/libraries/doltcore/table/editor/pk_table_editor.go +++ b/go/libraries/doltcore/table/editor/pk_table_editor.go @@ -280,18 +280,36 @@ func (te *pkTableEditor) GetIndexedRows(ctx context.Context, key types.Tuple, in if err != nil { return nil, err } - kvp, ok, err := te.tea.Get(ctx, keyHash, key) + + pkKeys, err := te.tea.HasPartial(ctx, te.tSch, keyHash, key) if err != nil { return nil, err } - if !ok { + if len(pkKeys) == 0 { return nil, nil } - dRow, err := row.FromNoms(te.Schema(), kvp.k, kvp.v) - if err != nil { - return nil, err + + rows := make([]row.Row, len(pkKeys)) + for i, pkKey := range pkKeys { + pkKeyHash, err := pkKey.key.Hash(pkKey.key.Format()) + if err != nil { + return nil, err + } + kvp, ok, err := te.tea.Get(ctx, pkKeyHash, pkKey.key) + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + dRow, err := row.FromNoms(te.Schema(), kvp.k, kvp.v) + if err != nil { + return nil, err + } + rows[i] = dRow } - return []row.Row{dRow}, nil + + return rows, nil } return nil, fmt.Errorf("an index editor for `%s` could not be found on table `%s`", indexName, te.name) diff --git a/go/libraries/doltcore/table/editor/table_edit_accumulator.go b/go/libraries/doltcore/table/editor/table_edit_accumulator.go index 43b48c4af6..1b9d13bb1b 100644 --- a/go/libraries/doltcore/table/editor/table_edit_accumulator.go +++ b/go/libraries/doltcore/table/editor/table_edit_accumulator.go @@ -16,7 +16,12 @@ package editor import ( "context" + "io" + "github.com/dolthub/dolt/go/libraries/doltcore/row" + "github.com/dolthub/dolt/go/libraries/doltcore/schema" + "github.com/dolthub/dolt/go/libraries/doltcore/table" + "github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms" "github.com/dolthub/dolt/go/libraries/utils/set" "github.com/dolthub/dolt/go/store/hash" "github.com/dolthub/dolt/go/store/types" @@ -43,6 +48,9 @@ type TableEditAccumulator interface { // This assumes that the given hash is for the given key. Get(ctx context.Context, keyHash hash.Hash, key types.Tuple) (*doltKVP, bool, error) + // HasPartial returns true if the current TableEditAccumulator contains the given partialKey + HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) + // Commit applies the in memory edits to the list of committed in memory edits Commit(ctx context.Context, nbf *types.NomsBinFormat) error @@ -174,6 +182,51 @@ func (tea *tableEditAccumulatorImpl) Get(ctx context.Context, keyHash hash.Hash, return &doltKVP{k: key, v: v}, true, err } +func (tea *tableEditAccumulatorImpl) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) { + var err error + var matches []hashedTuple + var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{ + {Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}}) + defer mapIter.Close(ctx) + var r row.Row + for r, err = mapIter.ReadRow(ctx); err == nil; r, err = mapIter.ReadRow(ctx) { + tplKeyVal, err := r.NomsMapKey(idxSch).Value(ctx) + if err != nil { + return nil, err + } + key := tplKeyVal.(types.Tuple) + tplValVal, err := r.NomsMapValue(idxSch).Value(ctx) + if err != nil { + return nil, err + } + val := tplValVal.(types.Tuple) + keyHash, err := key.Hash(key.Format()) + if err != nil { + return nil, err + } + matches = append(matches, hashedTuple{key, val, keyHash}) + } + + if err != io.EOF { + return nil, err + } + + orderedMods := []*inMemModifications{tea.committed, tea.uncommitted} + for _, mods := range orderedMods { + for i := len(matches) - 1; i >= 0; i-- { + if _, ok := mods.adds[matches[i].hash]; ok { + matches[i] = matches[len(matches)-1] + matches = matches[:len(matches)-1] + } + } + if added, ok := mods.adds[partialKeyHash]; ok { + matches = append(matches, hashedTuple{key: added.k, value: added.v}) + } + } + + return matches, nil +} + func (tea *tableEditAccumulatorImpl) flushUncommitted() { // if we are not already actively writing edits to the uncommittedEA then change the state and push all in mem edits // to a types.EditAccumulator diff --git a/go/store/nbs/block_store_test.go b/go/store/nbs/block_store_test.go index 697ce0e0c5..00934b4563 100644 --- a/go/store/nbs/block_store_test.go +++ b/go/store/nbs/block_store_test.go @@ -194,7 +194,7 @@ func (suite *BlockStoreSuite) TestChunkStorePutMoreThanMemTable() { if suite.putCountFn != nil { suite.Equal(2, suite.putCountFn()) } - specs, err := suite.store.tables.ToSpecs() + specs, err := suite.store.tables.toSpecs() suite.NoError(err) suite.Len(specs, 2) } @@ -415,22 +415,15 @@ func (suite *BlockStoreSuite) TestChunkStorePutWithRebase() { func TestBlockStoreConjoinOnCommit(t *testing.T) { stats := &Stats{} - assertContainAll := func(t *testing.T, store chunks.ChunkStore, srcs ...chunkSource) { - rdrs := make(chunkReaderGroup, len(srcs)) - for i, src := range srcs { - c, err := src.Clone() + assertContainAll := func(t *testing.T, store chunks.ChunkStore, sources ...chunkSource) { + ctx := context.Background() + for _, src := range sources { + err := extractAllChunks(ctx, src, func(rec extractRecord) { + ok, err := store.Has(context.Background(), hash.Hash(rec.a)) + require.NoError(t, err) + assert.True(t, ok) + }) require.NoError(t, err) - rdrs[i] = c - } - chunkChan := make(chan extractRecord, mustUint32(rdrs.count())) - err := rdrs.extract(context.Background(), chunkChan) - require.NoError(t, err) - close(chunkChan) - - for rec := range chunkChan { - ok, err := store.Has(context.Background(), hash.Hash(rec.a)) - require.NoError(t, err) - assert.True(t, ok) } } @@ -509,7 +502,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) { assert.True(t, ok) assertContainAll(t, smallTableStore, srcs...) for _, src := range srcs { - err := src.Close() + err := src.close() require.NoError(t, err) } }) @@ -546,7 +539,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) { assert.True(t, ok) assertContainAll(t, smallTableStore, srcs...) for _, src := range srcs { - err := src.Close() + err := src.close() require.NoError(t, err) } }) diff --git a/go/store/nbs/chunk_source_adapter.go b/go/store/nbs/chunk_source_adapter.go index 1f886e8c14..30608836f6 100644 --- a/go/store/nbs/chunk_source_adapter.go +++ b/go/store/nbs/chunk_source_adapter.go @@ -36,12 +36,12 @@ func newReaderFromIndexData(q MemoryQuotaProvider, idxData []byte, name addr, tr return &chunkSourceAdapter{tr, name}, nil } -func (csa chunkSourceAdapter) Close() error { - return csa.tableReader.Close() +func (csa chunkSourceAdapter) close() error { + return csa.tableReader.close() } -func (csa chunkSourceAdapter) Clone() (chunkSource, error) { - tr, err := csa.tableReader.Clone() +func (csa chunkSourceAdapter) clone() (chunkSource, error) { + tr, err := csa.tableReader.clone() if err != nil { return &chunkSourceAdapter{}, err } diff --git a/go/store/nbs/conjoiner_test.go b/go/store/nbs/conjoiner_test.go index 09bcad595c..e4d9cf0b97 100644 --- a/go/store/nbs/conjoiner_test.go +++ b/go/store/nbs/conjoiner_test.go @@ -64,7 +64,7 @@ func makeTestSrcs(t *testing.T, tableSizes []uint32, p tablePersister) (srcs chu } cs, err := p.Persist(context.Background(), mt, nil, &Stats{}) require.NoError(t, err) - c, err := cs.Clone() + c, err := cs.clone() require.NoError(t, err) srcs = append(srcs, c) } @@ -76,7 +76,7 @@ func TestConjoin(t *testing.T) { makeTestTableSpecs := func(tableSizes []uint32, p tablePersister) (specs []tableSpec) { for _, src := range makeTestSrcs(t, tableSizes, p) { specs = append(specs, tableSpec{mustAddr(src.hash()), mustUint32(src.count())}) - err := src.Close() + err := src.close() require.NoError(t, err) } return @@ -93,28 +93,34 @@ func TestConjoin(t *testing.T) { } assertContainAll := func(t *testing.T, p tablePersister, expect, actual []tableSpec) { - open := func(specs []tableSpec) (srcs chunkReaderGroup) { + open := func(specs []tableSpec) (sources chunkSources) { for _, sp := range specs { cs, err := p.Open(context.Background(), sp.name, sp.chunkCount, nil) - if err != nil { require.NoError(t, err) } - - srcs = append(srcs, cs) + sources = append(sources, cs) } return } - expectSrcs, actualSrcs := open(expect), open(actual) - chunkChan := make(chan extractRecord, mustUint32(expectSrcs.count())) - err := expectSrcs.extract(context.Background(), chunkChan) - require.NoError(t, err) - close(chunkChan) - for rec := range chunkChan { - has, err := actualSrcs.has(rec.a) + expectSrcs, actualSrcs := open(expect), open(actual) + + ctx := context.Background() + for _, src := range expectSrcs { + err := extractAllChunks(ctx, src, func(rec extractRecord) { + var ok bool + for _, src := range actualSrcs { + var err error + ok, err = src.has(rec.a) + require.NoError(t, err) + if ok { + break + } + } + assert.True(t, ok) + }) require.NoError(t, err) - assert.True(t, has) } } diff --git a/go/store/nbs/file_table_reader.go b/go/store/nbs/file_table_reader.go index 467b5fcb8f..6bfa6923ae 100644 --- a/go/store/nbs/file_table_reader.go +++ b/go/store/nbs/file_table_reader.go @@ -118,12 +118,12 @@ func (mmtr *fileTableReader) hash() (addr, error) { return mmtr.h, nil } -func (mmtr *fileTableReader) Close() error { - return mmtr.tableReader.Close() +func (mmtr *fileTableReader) close() error { + return mmtr.tableReader.close() } -func (mmtr *fileTableReader) Clone() (chunkSource, error) { - tr, err := mmtr.tableReader.Clone() +func (mmtr *fileTableReader) clone() (chunkSource, error) { + tr, err := mmtr.tableReader.clone() if err != nil { return &fileTableReader{}, err } diff --git a/go/store/nbs/mem_table.go b/go/store/nbs/mem_table.go index 99fe1fa9bc..36c309582a 100644 --- a/go/store/nbs/mem_table.go +++ b/go/store/nbs/mem_table.go @@ -218,6 +218,6 @@ func (mt *memTable) write(haver chunkReader, stats *Stats) (name addr, data []by return name, buff[:tableSize], count, nil } -func (mt *memTable) Close() error { +func (mt *memTable) close() error { return nil } diff --git a/go/store/nbs/mem_table_test.go b/go/store/nbs/mem_table_test.go index 4c81f2d070..87153fdc59 100644 --- a/go/store/nbs/mem_table_test.go +++ b/go/store/nbs/mem_table_test.go @@ -307,22 +307,10 @@ func (crg chunkReaderGroup) uncompressedLen() (data uint64, err error) { return } -func (crg chunkReaderGroup) extract(ctx context.Context, chunks chan<- extractRecord) error { - for _, haver := range crg { - err := haver.extract(ctx, chunks) - - if err != nil { - return err - } - } - - return nil -} - -func (crg chunkReaderGroup) Close() error { +func (crg chunkReaderGroup) close() error { var firstErr error for _, c := range crg { - err := c.Close() + err := c.close() if err != nil && firstErr == nil { firstErr = err } diff --git a/go/store/nbs/persisting_chunk_source.go b/go/store/nbs/persisting_chunk_source.go index 0c99c14eca..da78f21ef8 100644 --- a/go/store/nbs/persisting_chunk_source.go +++ b/go/store/nbs/persisting_chunk_source.go @@ -95,12 +95,12 @@ func (ccs *persistingChunkSource) getReader() chunkReader { return ccs.cs } -func (ccs *persistingChunkSource) Close() error { +func (ccs *persistingChunkSource) close() error { // persistingChunkSource does not own |cs| or |mt|. No need to close them. return nil } -func (ccs *persistingChunkSource) Clone() (chunkSource, error) { +func (ccs *persistingChunkSource) clone() (chunkSource, error) { // persistingChunkSource does not own |cs| or |mt|. No need to Clone. return ccs, nil } @@ -240,20 +240,6 @@ func (ccs *persistingChunkSource) size() (uint64, error) { return ccs.cs.size() } -func (ccs *persistingChunkSource) extract(ctx context.Context, chunks chan<- extractRecord) error { - err := ccs.wait() - - if err != nil { - return err - } - - if ccs.cs == nil { - return ErrNoChunkSource - } - - return ccs.cs.extract(ctx, chunks) -} - type emptyChunkSource struct{} func (ecs emptyChunkSource) has(h addr) (bool, error) { @@ -304,14 +290,10 @@ func (ecs emptyChunkSource) calcReads(reqs []getRecord, blockSize uint64) (reads return 0, true, nil } -func (ecs emptyChunkSource) extract(ctx context.Context, chunks chan<- extractRecord) error { +func (ecs emptyChunkSource) close() error { return nil } -func (ecs emptyChunkSource) Close() error { - return nil -} - -func (ecs emptyChunkSource) Clone() (chunkSource, error) { +func (ecs emptyChunkSource) clone() (chunkSource, error) { return ecs, nil } diff --git a/go/store/nbs/root_tracker_test.go b/go/store/nbs/root_tracker_test.go index e68e9cce66..7f6debe54d 100644 --- a/go/store/nbs/root_tracker_test.go +++ b/go/store/nbs/root_tracker_test.go @@ -579,18 +579,20 @@ func compactSourcesToBuffer(sources chunkSources) (name addr, data []byte, chunk tw := newTableWriter(buff, nil) errString := "" + ctx := context.Background() for _, src := range sources { - chunks := make(chan extractRecord) + ch := make(chan extractRecord) go func() { - defer close(chunks) - err := src.extract(context.Background(), chunks) - + defer close(ch) + err = extractAllChunks(ctx, src, func(rec extractRecord) { + ch <- rec + }) if err != nil { - chunks <- extractRecord{a: mustAddr(src.hash()), err: err} + ch <- extractRecord{a: mustAddr(src.hash()), err: err} } }() - for rec := range chunks { + for rec := range ch { if rec.err != nil { errString += fmt.Sprintf("Failed to extract %s:\n %v\n******\n\n", rec.a, rec.err) continue @@ -625,3 +627,25 @@ func (ftp fakeTablePersister) Open(ctx context.Context, name addr, chunkCount ui func (ftp fakeTablePersister) PruneTableFiles(_ context.Context, _ manifestContents) error { return chunks.ErrUnsupportedOperation } + +func extractAllChunks(ctx context.Context, src chunkSource, cb func(rec extractRecord)) (err error) { + var index tableIndex + if index, err = src.index(); err != nil { + return err + } + + var a addr + for i := uint32(0); i < index.ChunkCount(); i++ { + _, err = index.IndexEntry(i, &a) + if err != nil { + return err + } + + data, err := src.get(ctx, a, nil) + if err != nil { + return err + } + cb(extractRecord{a: a, data: data}) + } + return +} diff --git a/go/store/nbs/store.go b/go/store/nbs/store.go index 2e798b7250..7fccfdfba7 100644 --- a/go/store/nbs/store.go +++ b/go/store/nbs/store.go @@ -294,7 +294,7 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash. } } - newTables, err := nbs.tables.Rebase(ctx, updatedContents.specs, nbs.stats) + newTables, err := nbs.tables.rebase(ctx, updatedContents.specs, nbs.stats) if err != nil { return manifestContents{}, err } @@ -302,7 +302,7 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash. nbs.upstream = updatedContents oldTables := nbs.tables nbs.tables = newTables - err = oldTables.Close() + err = oldTables.close() if err != nil { return manifestContents{}, err } @@ -371,7 +371,7 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat } } - newTables, err := nbs.tables.Rebase(ctx, updatedContents.specs, nbs.stats) + newTables, err := nbs.tables.rebase(ctx, updatedContents.specs, nbs.stats) if err != nil { return manifestContents{}, err } @@ -379,7 +379,7 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat nbs.upstream = updatedContents oldTables := nbs.tables nbs.tables = newTables - err = oldTables.Close() + err = oldTables.close() if err != nil { return manifestContents{}, err } @@ -587,7 +587,7 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager } if exists { - newTables, err := nbs.tables.Rebase(ctx, contents.specs, nbs.stats) + newTables, err := nbs.tables.rebase(ctx, contents.specs, nbs.stats) if err != nil { return nil, err @@ -596,7 +596,7 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager nbs.upstream = contents oldTables := nbs.tables nbs.tables = newTables - err = oldTables.Close() + err = oldTables.close() if err != nil { return nil, err } @@ -647,7 +647,7 @@ func (nbs *NomsBlockStore) addChunk(ctx context.Context, h addr, data []byte) bo nbs.mt = newMemTable(nbs.mtSize) } if !nbs.mt.addChunk(h, data) { - nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats) + nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats) nbs.mt = newMemTable(nbs.mtSize) return nbs.mt.addChunk(h, data) } @@ -922,7 +922,7 @@ func (nbs *NomsBlockStore) Rebase(ctx context.Context) error { return nil } - newTables, err := nbs.tables.Rebase(ctx, contents.specs, nbs.stats) + newTables, err := nbs.tables.rebase(ctx, contents.specs, nbs.stats) if err != nil { return err } @@ -930,7 +930,7 @@ func (nbs *NomsBlockStore) Rebase(ctx context.Context) error { nbs.upstream = contents oldTables := nbs.tables nbs.tables = newTables - err = oldTables.Close() + err = oldTables.close() if err != nil { return err } @@ -952,7 +952,7 @@ func (nbs *NomsBlockStore) Commit(ctx context.Context, current, last hash.Hash) anyPossiblyNovelChunks := func() bool { nbs.mu.Lock() defer nbs.mu.Unlock() - return nbs.mt != nil || nbs.tables.Novel() > 0 + return nbs.mt != nil || len(nbs.tables.novel) > 0 } if !anyPossiblyNovelChunks() && current == last { @@ -984,7 +984,7 @@ func (nbs *NomsBlockStore) Commit(ctx context.Context, current, last hash.Hash) } if cnt > preflushChunkCount { - nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats) + nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats) nbs.mt = nil } } @@ -1033,7 +1033,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has } handleOptimisticLockFailure := func(upstream manifestContents) error { - newTables, err := nbs.tables.Rebase(ctx, upstream.specs, nbs.stats) + newTables, err := nbs.tables.rebase(ctx, upstream.specs, nbs.stats) if err != nil { return err } @@ -1041,7 +1041,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has nbs.upstream = upstream oldTables := nbs.tables nbs.tables = newTables - err = oldTables.Close() + err = oldTables.close() if last != upstream.root { return errOptimisticLockFailedRoot @@ -1067,7 +1067,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has } if cnt > 0 { - nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats) + nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats) nbs.mt = nil } } @@ -1081,7 +1081,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has return err } - newTables, err := nbs.tables.Rebase(ctx, newUpstream.specs, nbs.stats) + newTables, err := nbs.tables.rebase(ctx, newUpstream.specs, nbs.stats) if err != nil { return err @@ -1090,7 +1090,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has nbs.upstream = newUpstream oldTables := nbs.tables nbs.tables = newTables - err = oldTables.Close() + err = oldTables.close() if err != nil { return err } @@ -1098,7 +1098,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has return errOptimisticLockFailedTables } - specs, err := nbs.tables.ToSpecs() + specs, err := nbs.tables.toSpecs() if err != nil { return err } @@ -1139,7 +1139,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has return handleOptimisticLockFailure(upstream) } - newTables, err := nbs.tables.Flatten(ctx) + newTables, err := nbs.tables.flatten(ctx) if err != nil { return nil @@ -1158,7 +1158,7 @@ func (nbs *NomsBlockStore) Version() string { } func (nbs *NomsBlockStore) Close() error { - return nbs.tables.Close() + return nbs.tables.close() } func (nbs *NomsBlockStore) Stats() interface{} { @@ -1574,7 +1574,7 @@ func (nbs *NomsBlockStore) gcTableSize() (uint64, error) { return 0, err } - avgTableSize := total / uint64(nbs.tables.Upstream()+nbs.tables.Novel()+1) + avgTableSize := total / uint64(nbs.tables.Size()+1) // max(avgTableSize, defaultMemTableSize) if avgTableSize > nbs.mtSize { @@ -1622,14 +1622,14 @@ func (nbs *NomsBlockStore) swapTables(ctx context.Context, specs []tableSpec) (e nbs.mt = newMemTable(nbs.mtSize) // clear nbs.tables.novel - nbs.tables, err = nbs.tables.Flatten(ctx) + nbs.tables, err = nbs.tables.flatten(ctx) if err != nil { return err } // replace nbs.tables.upstream with gc compacted tables nbs.upstream = upstream - nbs.tables, err = nbs.tables.Rebase(ctx, upstream.specs, nbs.stats) + nbs.tables, err = nbs.tables.rebase(ctx, upstream.specs, nbs.stats) if err != nil { return err } diff --git a/go/store/nbs/table.go b/go/store/nbs/table.go index 5f4d7209d3..231a49830f 100644 --- a/go/store/nbs/table.go +++ b/go/store/nbs/table.go @@ -230,12 +230,11 @@ type chunkReader interface { get(ctx context.Context, h addr, stats *Stats) ([]byte, error) getMany(ctx context.Context, eg *errgroup.Group, reqs []getRecord, found func(context.Context, *chunks.Chunk), stats *Stats) (bool, error) getManyCompressed(ctx context.Context, eg *errgroup.Group, reqs []getRecord, found func(context.Context, CompressedChunk), stats *Stats) (bool, error) - extract(ctx context.Context, chunks chan<- extractRecord) error count() (uint32, error) uncompressedLen() (uint64, error) - // Close releases resources retained by the |chunkReader|. - Close() error + // close releases resources retained by the |chunkReader|. + close() error } type chunkSource interface { @@ -253,12 +252,12 @@ type chunkSource interface { // index returns the tableIndex of this chunkSource. index() (tableIndex, error) - // Clone returns a |chunkSource| with the same contents as the + // clone returns a |chunkSource| with the same contents as the // original, but with independent |Close| behavior. A |chunkSource| // cannot be |Close|d more than once, so if a |chunkSource| is being // retained in two objects with independent life-cycle, it should be // |Clone|d first. - Clone() (chunkSource, error) + clone() (chunkSource, error) } type chunkSources []chunkSource diff --git a/go/store/nbs/table_reader.go b/go/store/nbs/table_reader.go index e68db87ac7..b342686842 100644 --- a/go/store/nbs/table_reader.go +++ b/go/store/nbs/table_reader.go @@ -654,11 +654,11 @@ func (tr tableReader) size() (uint64, error) { return i.TableFileSize(), nil } -func (tr tableReader) Close() error { +func (tr tableReader) close() error { return tr.tableIndex.Close() } -func (tr tableReader) Clone() (tableReader, error) { +func (tr tableReader) clone() (tableReader, error) { ti, err := tr.tableIndex.Clone() if err != nil { return tableReader{}, err diff --git a/go/store/nbs/table_set.go b/go/store/nbs/table_set.go index 7355768d47..a00da430af 100644 --- a/go/store/nbs/table_set.go +++ b/go/store/nbs/table_set.go @@ -252,7 +252,7 @@ func (ts tableSet) physicalLen() (uint64, error) { return lenNovel + lenUp, nil } -func (ts tableSet) Close() error { +func (ts tableSet) close() error { var firstErr error setErr := func(err error) { if err != nil && firstErr == nil { @@ -261,11 +261,11 @@ func (ts tableSet) Close() error { } for _, t := range ts.novel { - err := t.Close() + err := t.close() setErr(err) } for _, t := range ts.upstream { - err := t.Close() + err := t.close() setErr(err) } return firstErr @@ -276,20 +276,9 @@ func (ts tableSet) Size() int { return len(ts.novel) + len(ts.upstream) } -// Novel returns the number of tables containing novel chunks in this -// tableSet. -func (ts tableSet) Novel() int { - return len(ts.novel) -} - -// Upstream returns the number of known-persisted tables in this tableSet. -func (ts tableSet) Upstream() int { - return len(ts.upstream) -} - -// Prepend adds a memTable to an existing tableSet, compacting |mt| and +// prepend adds a memTable to an existing tableSet, compacting |mt| and // returning a new tableSet with newly compacted table added. -func (ts tableSet) Prepend(ctx context.Context, mt *memTable, stats *Stats) tableSet { +func (ts tableSet) prepend(ctx context.Context, mt *memTable, stats *Stats) tableSet { newTs := tableSet{ novel: make(chunkSources, len(ts.novel)+1), upstream: make(chunkSources, len(ts.upstream)), @@ -303,29 +292,9 @@ func (ts tableSet) Prepend(ctx context.Context, mt *memTable, stats *Stats) tabl return newTs } -func (ts tableSet) extract(ctx context.Context, chunks chan<- extractRecord) error { - // Since new tables are _prepended_ to a tableSet, extracting chunks in insertOrder requires iterating ts.upstream back to front, followed by ts.novel. - for i := len(ts.upstream) - 1; i >= 0; i-- { - err := ts.upstream[i].extract(ctx, chunks) - - if err != nil { - return err - } - } - for i := len(ts.novel) - 1; i >= 0; i-- { - err := ts.novel[i].extract(ctx, chunks) - - if err != nil { - return err - } - } - - return nil -} - -// Flatten returns a new tableSet with |upstream| set to the union of ts.novel +// flatten returns a new tableSet with |upstream| set to the union of ts.novel // and ts.upstream. -func (ts tableSet) Flatten(ctx context.Context) (tableSet, error) { +func (ts tableSet) flatten(ctx context.Context) (tableSet, error) { flattened := tableSet{ upstream: make(chunkSources, 0, ts.Size()), p: ts.p, @@ -349,9 +318,9 @@ func (ts tableSet) Flatten(ctx context.Context) (tableSet, error) { return flattened, nil } -// Rebase returns a new tableSet holding the novel tables managed by |ts| and +// rebase returns a new tableSet holding the novel tables managed by |ts| and // those specified by |specs|. -func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) { +func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) { merged := tableSet{ novel: make(chunkSources, 0, len(ts.novel)), p: ts.p, @@ -368,7 +337,7 @@ func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats) } if cnt > 0 { - t2, err := t.Clone() + t2, err := t.clone() if err != nil { return tableSet{}, err } @@ -404,7 +373,7 @@ OUTER: return tableSet{}, err } if spec.name == h { - c, err := existing.Clone() + c, err := existing.clone() if err != nil { return tableSet{}, err } @@ -454,7 +423,7 @@ OUTER: if err != nil { // Close any opened chunkSources for _, cs := range opened { - _ = cs.Close() + _ = cs.close() } if r := rp.Load(); r != nil { @@ -466,7 +435,7 @@ OUTER: return merged, nil } -func (ts tableSet) ToSpecs() ([]tableSpec, error) { +func (ts tableSet) toSpecs() ([]tableSpec, error) { tableSpecs := make([]tableSpec, 0, ts.Size()) for _, src := range ts.novel { cnt, err := src.count() diff --git a/go/store/nbs/table_set_test.go b/go/store/nbs/table_set_test.go index 3dff34f541..be9c8e5539 100644 --- a/go/store/nbs/table_set_test.go +++ b/go/store/nbs/table_set_test.go @@ -33,8 +33,8 @@ import ( var testChunks = [][]byte{[]byte("hello2"), []byte("goodbye2"), []byte("badbye2")} func TestTableSetPrependEmpty(t *testing.T) { - ts := newFakeTableSet(&noopQuotaProvider{}).Prepend(context.Background(), newMemTable(testMemTableSize), &Stats{}) - specs, err := ts.ToSpecs() + ts := newFakeTableSet(&noopQuotaProvider{}).prepend(context.Background(), newMemTable(testMemTableSize), &Stats{}) + specs, err := ts.toSpecs() require.NoError(t, err) assert.Empty(t, specs) } @@ -42,23 +42,23 @@ func TestTableSetPrependEmpty(t *testing.T) { func TestTableSetPrepend(t *testing.T) { assert := assert.New(t) ts := newFakeTableSet(&noopQuotaProvider{}) - specs, err := ts.ToSpecs() + specs, err := ts.toSpecs() require.NoError(t, err) assert.Empty(specs) mt := newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[0]), testChunks[0]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) - firstSpecs, err := ts.ToSpecs() + firstSpecs, err := ts.toSpecs() require.NoError(t, err) assert.Len(firstSpecs, 1) mt = newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[1]), testChunks[1]) mt.addChunk(computeAddr(testChunks[2]), testChunks[2]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) - secondSpecs, err := ts.ToSpecs() + secondSpecs, err := ts.toSpecs() require.NoError(t, err) assert.Len(secondSpecs, 2) assert.Equal(firstSpecs, secondSpecs[1:]) @@ -67,22 +67,22 @@ func TestTableSetPrepend(t *testing.T) { func TestTableSetToSpecsExcludesEmptyTable(t *testing.T) { assert := assert.New(t) ts := newFakeTableSet(&noopQuotaProvider{}) - specs, err := ts.ToSpecs() + specs, err := ts.toSpecs() require.NoError(t, err) assert.Empty(specs) mt := newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[0]), testChunks[0]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) mt = newMemTable(testMemTableSize) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) mt = newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[1]), testChunks[1]) mt.addChunk(computeAddr(testChunks[2]), testChunks[2]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) - specs, err = ts.ToSpecs() + specs, err = ts.toSpecs() require.NoError(t, err) assert.Len(specs, 2) } @@ -90,61 +90,26 @@ func TestTableSetToSpecsExcludesEmptyTable(t *testing.T) { func TestTableSetFlattenExcludesEmptyTable(t *testing.T) { assert := assert.New(t) ts := newFakeTableSet(&noopQuotaProvider{}) - specs, err := ts.ToSpecs() + specs, err := ts.toSpecs() require.NoError(t, err) assert.Empty(specs) mt := newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[0]), testChunks[0]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) mt = newMemTable(testMemTableSize) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) mt = newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[1]), testChunks[1]) mt.addChunk(computeAddr(testChunks[2]), testChunks[2]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) - ts, err = ts.Flatten(context.Background()) + ts, err = ts.flatten(context.Background()) require.NoError(t, err) assert.EqualValues(ts.Size(), 2) } -func TestTableSetExtract(t *testing.T) { - assert := assert.New(t) - ts := newFakeTableSet(&noopQuotaProvider{}) - specs, err := ts.ToSpecs() - require.NoError(t, err) - assert.Empty(specs) - - // Put in one table - mt := newMemTable(testMemTableSize) - mt.addChunk(computeAddr(testChunks[0]), testChunks[0]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) - - // Put in a second - mt = newMemTable(testMemTableSize) - mt.addChunk(computeAddr(testChunks[1]), testChunks[1]) - mt.addChunk(computeAddr(testChunks[2]), testChunks[2]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) - - chunkChan := make(chan extractRecord) - go func() { - defer close(chunkChan) - err := ts.extract(context.Background(), chunkChan) - - require.NoError(t, err) - }() - i := 0 - for rec := range chunkChan { - a := computeAddr(testChunks[i]) - assert.NotNil(rec.data, "Nothing for", a) - assert.Equal(testChunks[i], rec.data, "Item %d: %s != %s", i, string(testChunks[i]), string(rec.data)) - assert.Equal(a, rec.a) - i++ - } -} - func persist(t *testing.T, p tablePersister, chunks ...[]byte) { for _, c := range chunks { mt := newMemTable(testMemTableSize) @@ -166,37 +131,37 @@ func TestTableSetRebase(t *testing.T) { for _, c := range chunks { mt := newMemTable(testMemTableSize) mt.addChunk(computeAddr(c), c) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) } return ts } fullTS := newTableSet(persister, q) defer func() { - require.NoError(t, fullTS.Close()) + require.NoError(t, fullTS.close()) }() - specs, err := fullTS.ToSpecs() + specs, err := fullTS.toSpecs() require.NoError(t, err) assert.Empty(specs) fullTS = insert(fullTS, testChunks...) - fullTS, err = fullTS.Flatten(context.Background()) + fullTS, err = fullTS.flatten(context.Background()) require.NoError(t, err) ts := newTableSet(persister, q) ts = insert(ts, testChunks[0]) assert.Equal(1, ts.Size()) - ts, err = ts.Flatten(context.Background()) + ts, err = ts.flatten(context.Background()) require.NoError(t, err) ts = insert(ts, []byte("novel")) - specs, err = fullTS.ToSpecs() + specs, err = fullTS.toSpecs() require.NoError(t, err) - ts2, err := ts.Rebase(context.Background(), specs, nil) + ts2, err := ts.rebase(context.Background(), specs, nil) require.NoError(t, err) defer func() { - require.NoError(t, ts2.Close()) + require.NoError(t, ts2.close()) }() - err = ts.Close() + err = ts.close() require.NoError(t, err) assert.Equal(4, ts2.Size()) } @@ -204,17 +169,17 @@ func TestTableSetRebase(t *testing.T) { func TestTableSetPhysicalLen(t *testing.T) { assert := assert.New(t) ts := newFakeTableSet(&noopQuotaProvider{}) - specs, err := ts.ToSpecs() + specs, err := ts.toSpecs() require.NoError(t, err) assert.Empty(specs) mt := newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[0]), testChunks[0]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) mt = newMemTable(testMemTableSize) mt.addChunk(computeAddr(testChunks[1]), testChunks[1]) mt.addChunk(computeAddr(testChunks[2]), testChunks[2]) - ts = ts.Prepend(context.Background(), mt, &Stats{}) + ts = ts.prepend(context.Background(), mt, &Stats{}) assert.True(mustUint64(ts.physicalLen()) > indexSize(mustUint32(ts.count()))) } @@ -241,7 +206,7 @@ func TestTableSetClosesOpenedChunkSourcesOnErr(t *testing.T) { } ts := tableSet{p: p, q: q, rl: make(chan struct{}, 1)} - _, err := ts.Rebase(context.Background(), specs, &Stats{}) + _, err := ts.rebase(context.Background(), specs, &Stats{}) require.Error(t, err) for _ = range p.opened { diff --git a/integration-tests/bats/config.bats b/integration-tests/bats/config.bats index ee171f795f..5e98079829 100644 --- a/integration-tests/bats/config.bats +++ b/integration-tests/bats/config.bats @@ -190,10 +190,12 @@ teardown() { start_sql_server - server_query "" 1 dolt "" "create database testdb" "" - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntestdb" "" - server_query "testdb" 1 dolt "" "create table a(x int)" "" - server_query "testdb" 1 dolt "" "insert into a values (1), (2)" "" + dolt sql-client --use-db '' -u dolt -P $PORT -q "create database testdb" + run dolt sql-client --use-db '' -u dolt -P $PORT -r csv -q "show databases" + [ $status -eq 0 ] + [[ "$output" =~ "testdb" ]] || false + dolt sql-client --use-db testdb -u dolt -P $PORT -q "create table a(x int)" + dolt sql-client --use-db testdb -u dolt -P $PORT -q "insert into a values (1), (2)" [ -d "testdb" ] cd testdb diff --git a/integration-tests/bats/constraint-violations.bats b/integration-tests/bats/constraint-violations.bats index bd995f3781..0b54154c0d 100644 --- a/integration-tests/bats/constraint-violations.bats +++ b/integration-tests/bats/constraint-violations.bats @@ -2788,7 +2788,7 @@ SQL run dolt sql -q "SELECT * FROM dolt_constraint_violations_test" -r=csv log_status_eq "0" [[ "$output" =~ "violation_type,pk,v1,violation_info" ]] || false - [[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": ""pk"", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false + [[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": """", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false [[ "${#lines[@]}" = "2" ]] || false run dolt sql -q "SELECT * FROM test" -r=csv log_status_eq "0" @@ -2825,7 +2825,7 @@ SQL run dolt sql -q "SELECT * FROM dolt_constraint_violations_test" -r=csv log_status_eq "0" [[ "$output" =~ "violation_type,pk,v1,violation_info" ]] || false - [[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": ""pk"", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false + [[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": """", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false [[ "${#lines[@]}" = "2" ]] || false run dolt sql -q "SELECT * FROM test" -r=csv log_status_eq "0" diff --git a/integration-tests/bats/deleted-branches.bats b/integration-tests/bats/deleted-branches.bats index a005fe41d5..500d8b2387 100644 --- a/integration-tests/bats/deleted-branches.bats +++ b/integration-tests/bats/deleted-branches.bats @@ -65,11 +65,14 @@ make_it() { start_sql_server "dolt_repo_$$" - server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" "" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" - server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' "" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_checkout('to_keep')" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_branch('-D', 'main');" - server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "id\n" "" + run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "describe test" + [ $status -eq 0 ] + [[ "$output" =~ "id" ]] || false } @test "deleted-branches: can SQL connect with existing branch revision specifier when checked out branch is deleted" { @@ -77,11 +80,12 @@ make_it() { start_sql_server "dolt_repo_$$" + # Can't string together multiple queries in dolt sql-client server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' "" - + # Against the default branch it fails - run server_query "dolt_repo_$$" 1 "" dolt "" "SELECT * FROM test" "id\n" "" - [ "$status" -eq 1 ] || fail "expected query against the default branch, which was deleted, to fail" + run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test" + [ $status -ne 0 ] # Against to_keep it succeeds server_query "dolt_repo_$$/to_keep" 1 dolt "" "SELECT * FROM test" "id\n" "" @@ -92,10 +96,11 @@ make_it() { start_sql_server "dolt_repo_$$" - server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" "" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" # Against the default branch it fails - server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "" 1 + run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test" "" + [ $status -ne 0 ] # Against main, which exists it succeeds server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test" "id\n" "" @@ -106,7 +111,7 @@ make_it() { start_sql_server "dolt_repo_$$" - server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" "" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" "" # We are able to use a database branch revision in the connection string server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test;" @@ -141,11 +146,11 @@ make_it() { start_sql_server "dolt_repo_$$" - server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" "" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" "" server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' "" - server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test" - server_query "dolt_repo_$$" 1 dolt "" "CALL DOLT_CHECKOUT('to_checkout');" "" + dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "CALL DOLT_CHECKOUT('to_checkout')" } diff --git a/integration-tests/bats/foreign-keys-invert-pk.bats b/integration-tests/bats/foreign-keys-invert-pk.bats index d54b7bbecd..6a4da26cbe 100644 --- a/integration-tests/bats/foreign-keys-invert-pk.bats +++ b/integration-tests/bats/foreign-keys-invert-pk.bats @@ -22,6 +22,11 @@ teardown() { dolt commit -am "cm" } +@test "foreign-keys-invert-pk: no secondary indexes made" { + run dolt index ls + [[ $output = "No indexes in the working set" ]] || false +} + @test "foreign-keys-invert-pk: check referential integrity on merge" { dolt commit -am "main" dolt checkout -b feat diff --git a/integration-tests/bats/foreign-keys.bats b/integration-tests/bats/foreign-keys.bats index 170d91926e..72e4ce7ff9 100644 --- a/integration-tests/bats/foreign-keys.bats +++ b/integration-tests/bats/foreign-keys.bats @@ -1971,7 +1971,7 @@ SQL # the prefix key should not be unique run dolt sql -q "show create table parent" [ $status -eq 0 ] - [[ $output =~ "KEY \`b\` (\`b\`)" ]] || false + [[ ! $output =~ "KEY \`b\` (\`b\`)" ]] || false [[ ! $output =~ "UNIQUE" ]] || false run dolt sql -q "show create table child" diff --git a/integration-tests/bats/import-create-tables.bats b/integration-tests/bats/import-create-tables.bats index 3917525bc2..57297909ca 100755 --- a/integration-tests/bats/import-create-tables.bats +++ b/integration-tests/bats/import-create-tables.bats @@ -169,7 +169,7 @@ pk,c1,c2,c3,c4,c5 9,1,2,3,4,5 DELIM dolt table import -c --pk=pk test 1pk5col-ints.csv - run dolt sql -q "create table fktest(id int not null, tpk int unsigned, c2 int, primary key(id), foreign key (tpk) references test(pk))" + run dolt sql -q "create table fktest(id int not null, tpk int, c2 int, primary key(id), foreign key (tpk) references test(pk))" [ "$status" -eq 0 ] run dolt sql -q "insert into fktest values (1, 0, 1)" [ "$status" -eq 0 ] @@ -567,7 +567,7 @@ DELIM [[ "$output" =~ "CREATE TABLE \`test\`" ]] [[ "$output" =~ "\`pk\` int" ]] [[ "$output" =~ "\`str\` varchar(16383)" ]] - [[ "$output" =~ "\`int\` int unsigned" ]] + [[ "$output" =~ "\`int\` int" ]] [[ "$output" =~ "\`bool\` tinyint" ]] [[ "$output" =~ "\`float\` float" ]] [[ "$output" =~ "\`date\` date" ]] diff --git a/integration-tests/bats/migrate.bats b/integration-tests/bats/migrate.bats index 33f13fdd4f..562938cfe4 100644 --- a/integration-tests/bats/migrate.bats +++ b/integration-tests/bats/migrate.bats @@ -248,3 +248,21 @@ SQL run dolt schema show t [[ "$output" =~ "PRIMARY KEY (\`pk1\`,\`pk2\`)" ]] || false } + +@test "migrate: removed tables stay removed" { + dolt sql -q "create table alpha (pk int primary key);" + dolt sql -q "create table beta (pk int primary key);" + dolt commit -Am "create tables" + + dolt sql -q "alter table alpha rename to zulu;" + dolt sql -q "drop table beta" + dolt commit -Am "rename table alpha to zeta, drop table beta" + + dolt migrate + + run dolt ls + [ $status -eq 0 ] + [[ "$output" =~ "zulu" ]] || false + [[ ! "$output" =~ "alpha" ]] || false + [[ ! "$output" =~ "beta" ]] || false +} diff --git a/integration-tests/bats/migration-integration.bats b/integration-tests/bats/migration-integration.bats index 9cbbde20f2..a3733b171a 100644 --- a/integration-tests/bats/migration-integration.bats +++ b/integration-tests/bats/migration-integration.bats @@ -14,8 +14,8 @@ teardown() { } @test "migration-integration: first-hour-db" { - dolt clone dolthub/first-hour-db - cd first-hour-db + dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int + cd first-hour-db-migration-int dolt tag -v run dolt tag -v @@ -39,8 +39,8 @@ teardown() { } @test "migration-integration: first-hour-db after garbage collection" { - dolt clone dolthub/first-hour-db - cd first-hour-db + dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int + cd first-hour-db-migration-int dolt gc dolt tag -v @@ -65,8 +65,8 @@ teardown() { } @test "migration-integration: us-jails" { - dolt clone dolthub/us-jails - cd us-jails + dolt clone https://doltremoteapi.dolthub.com/dolthub/us-jails-migration-integration + cd us-jails-migration-integration dolt tag -v run dolt tag -v diff --git a/integration-tests/bats/multidb.bats b/integration-tests/bats/multidb.bats index bd135e9628..9db78b7220 100644 --- a/integration-tests/bats/multidb.bats +++ b/integration-tests/bats/multidb.bats @@ -31,7 +31,7 @@ teardown() { cd dbs1 start_multi_db_server repo1 server_query repo1 1 dolt "" "create database new; use new; call dcheckout('-b', 'feat'); create table t (x int); call dolt_add('.'); call dcommit('-am', 'cm'); set @@global.new_default_branch='feat'" - server_query repo1 1 dolt "" "use repo1" + dolt sql-client -u dolt --use-db '' -P $PORT -q "use repo1" } @test "multidb: incompatible BIN FORMATs" { diff --git a/integration-tests/bats/remotes-sql-server.bats b/integration-tests/bats/remotes-sql-server.bats index 2355c1baed..79750c1a5b 100644 --- a/integration-tests/bats/remotes-sql-server.bats +++ b/integration-tests/bats/remotes-sql-server.bats @@ -45,13 +45,14 @@ teardown() { dolt checkout -b other start_sql_server repo1 - run server_query repo1 1 dolt "" "call dolt_push()" "" "" 1 + run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()" + [ $status -ne 0 ] [[ "$output" =~ "the current branch has no upstream branch" ]] || false - - server_query repo1 1 dolt "" "call dolt_push('--set-upstream', 'origin', 'other') " "" + + dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push('--set-upstream', 'origin', 'other')" skip "In-memory branch doesn't track upstream" - server_query repo1 1 dolt "" "call dolt_push()" "" + dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()" } @test "remotes-sql-server: push on sql-session commit" { @@ -61,7 +62,7 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_to_remote remote1 start_sql_server repo1 - server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');" + dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');" cd ../repo2 dolt pull remote1 @@ -81,7 +82,7 @@ teardown() { dolt config --local --add sqlserver.global.dolt_async_replication 1 start_sql_server repo1 - server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');" + dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');" # wait for the process to exit after we stop it stop_sql_server 1 @@ -108,7 +109,10 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main start_sql_server repo2 - server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest" + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" -r csv + [ $status -eq 0 ] + [[ "$output" =~ "Tables_in_repo2" ]] || false + [[ "$output" =~ "test" ]] || false } @test "remotes-sql-server: pull remote not found error" { @@ -133,7 +137,9 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main start_sql_server repo1 - run server_query repo1 1 dolt "" "show tables" "Table\n" + run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables" + [ $status -eq 0 ] + [[ "$output" =~ "Table" ]] || false } @test "remotes-sql-server: push remote not found error" { @@ -156,7 +162,10 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_to_remote unknown start_sql_server repo1 - server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\ntest" + run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables" + [ $status -eq 0 ] + [[ "$output" =~ "Tables_in_repo1" ]] || false + [[ "$output" =~ "test" ]] || false } @test "remotes-sql-server: pull multiple heads" { @@ -172,8 +181,16 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main,new_feature start_sql_server repo2 - server_query repo2 1 dolt "" "select dolt_checkout('new_feature') as b" "b\n0" - server_query repo2 1 dolt "" "select name from dolt_branches order by name" "name\nmain\nnew_feature" + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select dolt_checkout('new_feature') as b" + [ $status -eq 0 ] + [[ "$output" =~ "b" ]] || false + [[ "$output" =~ "0" ]] || false + + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select name from dolt_branches order by name" + [ $status -eq 0 ] + [[ "$output" =~ "name" ]] || false + [[ "$output" =~ "main" ]] || false + [[ "$output" =~ "new_feature" ]] || false } @test "remotes-sql-server: connect to remote head" { @@ -194,13 +211,17 @@ teardown() { start_sql_server repo2 # No data on main - server_query repo2 1 dolt "" "show tables" "" + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" + [ $status -eq 0 ] + [ "$output" = "" ] + + # Can't use dolt sql-client to connect to branches # Connecting to heads that exist only on the remote should work fine (they get fetched) server_query "repo2/new_feature" 1 dolt "" "show tables" "Tables_in_repo2/new_feature\ntest" server_query repo2 1 dolt "" 'use `repo2/new_feature2`' "" server_query repo2 1 dolt "" 'select * from `repo2/new_feature2`.test' "pk\n0\n1\n2" - + # Connecting to heads that don't exist should error out run server_query "repo2/notexist" 1 dolt "" 'use `repo2/new_feature2`' "" 1 [[ $output =~ "database not found" ]] || false @@ -228,7 +249,10 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main start_sql_server repo2 - server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest" + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" + [ $status -eq 0 ] + [[ $output =~ "Tables_in_repo2" ]] || false + [[ $output =~ "test" ]] || false } @test "remotes-sql-server: pull invalid head" { @@ -240,7 +264,8 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads unknown start_sql_server repo2 - server_query repo2 1 dolt "" "show tables" "" 1 + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" + [ $status -ne 0 ] [[ "$output" =~ "remote not found: 'unknown'" ]] || false } @@ -253,7 +278,8 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main start_sql_server repo2 - server_query repo2 1 dolt "" "show tables" "" 1 + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" + [ $status -ne 0 ] [[ "$output" =~ "remote not found: 'unknown'" ]] || false } @@ -270,7 +296,9 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main start_sql_server repo2 - server_query repo2 1 dolt "" "show tables" "Table\n" + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables" + [ $status -eq 0 ] + [ "$output" = "" ] } @test "remotes-sql-server: connect to missing branch pulls remote" { @@ -286,7 +314,11 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main start_sql_server repo2 - server_query repo2 1 dolt "" "SHOW tables" "" # no tables on main + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables" + [ $status -eq 0 ] + [ "$output" = "" ] + + # Can't connect to a specific branch with dolt sql-client server_query "repo2/feature-branch" 1 dolt "" "SHOW Tables" "Tables_in_repo2/feature-branch\ntest" } @@ -303,8 +335,14 @@ teardown() { dolt config --local --add sqlserver.global.dolt_replicate_heads main start_sql_server repo2 - server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest" - server_query repo2 1 dolt "" "use \`repo2/$head_hash\`" "" + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" + [ $status -eq 0 ] + [[ $output =~ "Tables_in_repo2" ]] || false + [[ $output =~ "test" ]] || false + + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q"use \`repo2/$head_hash\`" + [ $status -eq 0 ] + [ "$output" = "" ] } @test "remotes-sql-server: connect to tag works" { @@ -321,8 +359,14 @@ teardown() { dolt tag v1 start_sql_server repo2 - server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest" - server_query repo2 1 dolt "" "use \`repo2/v1\`" "" + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" + [ $status -eq 0 ] + [[ $output =~ "Tables_in_repo2" ]] || false + [[ $output =~ "test" ]] || false + + run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "use \`repo2/v1\`" + [ $status -eq 0 ] + [ "$output" = "" ] } get_head_commit() { diff --git a/integration-tests/bats/replication-multidb.bats b/integration-tests/bats/replication-multidb.bats index fc588029af..9f2c6cb75e 100644 --- a/integration-tests/bats/replication-multidb.bats +++ b/integration-tests/bats/replication-multidb.bats @@ -290,15 +290,15 @@ SQL start_multi_db_server repo1 cd .. - server_query repo1 1 dolt "" "create table t1 (a int primary key)" - server_query repo1 1 dolt "" "call dolt_add('.')" - server_query repo1 1 dolt "" "call dolt_commit('-am', 'cm')" - server_query repo2 1 dolt "" "create table t2 (a int primary key)" - server_query repo2 1 dolt "" "call dolt_add('.')" - server_query repo2 1 dolt "" "call dolt_commit('-am', 'cm')" - server_query repo3 1 dolt "" "create table t3 (a int primary key)" - server_query repo3 1 dolt "" "call dolt_add('.')" - server_query repo3 1 dolt "" "call dolt_commit('-am', 'cm')" + dolt sql-client --use-db repo1 -u dolt -P $PORT -q "create table t1 (a int primary key)" + dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_add('.')" + dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')" + dolt sql-client --use-db repo2 -u dolt -P $PORT -q "create table t2 (a int primary key)" + dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_add('.')" + dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')" + dolt sql-client --use-db repo3 -u dolt -P $PORT -q "create table t3 (a int primary key)" + dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_add('.')" + dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')" clone_helper $TMPDIRS @@ -344,7 +344,18 @@ SQL cd dbs1 start_multi_db_server repo1 - server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\nt1" - server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\nt2" - server_query repo3 1 dolt "" "show tables" "Tables_in_repo3\nt3" + run dolt sql-client --use-db repo1 -u dolt -P $PORT -q "show tables" + [ $status -eq 0 ] + [[ "$output" =~ Tables_in_repo1 ]] || false + [[ "$output" =~ t1 ]] || false + + run dolt sql-client --use-db repo2 -u dolt -P $PORT -q "show tables" + [ $status -eq 0 ] + [[ "$output" =~ Tables_in_repo2 ]] || false + [[ "$output" =~ t2 ]] || false + + run dolt sql-client --use-db repo3 -u dolt -P $PORT -q "show tables" + [ $status -eq 0 ] + [[ "$output" =~ Tables_in_repo3 ]] || false + [[ "$output" =~ t3 ]] || false } diff --git a/integration-tests/bats/schema-import.bats b/integration-tests/bats/schema-import.bats index 0fd97086da..83ffb3e5ae 100755 --- a/integration-tests/bats/schema-import.bats +++ b/integration-tests/bats/schema-import.bats @@ -82,7 +82,7 @@ teardown() { [[ "$output" =~ "\`string\` varchar(16383)" ]] || false [[ "$output" =~ "\`boolean\` tinyint" ]] || false [[ "$output" =~ "\`float\` float" ]] || false - [[ "$output" =~ "\`uint\` int unsigned" ]] || false + [[ "$output" =~ "\`uint\` int" ]] || false [[ "$output" =~ "\`uuid\` char(36) CHARACTER SET ascii COLLATE ascii_bin" ]] || false } @@ -259,9 +259,9 @@ DELIM run dolt diff --schema [ "$status" -eq 0 ] - [[ "$output" =~ '+ `x` varchar(16383) NOT NULL,' ]] || false - [[ "$output" =~ '+ `y` float NOT NULL,' ]] || false - [[ "$output" =~ '+ `z` int NOT NULL,' ]] || false + [[ "$output" =~ '+ `x` varchar(16383),' ]] || false + [[ "$output" =~ '+ `y` float,' ]] || false + [[ "$output" =~ '+ `z` int,' ]] || false # assert no columns were deleted/replaced [[ ! "$output" = "- \`" ]] || false @@ -282,9 +282,9 @@ DELIM run dolt diff --schema [ "$status" -eq 0 ] - [[ "$output" =~ '+ `x` varchar(16383) NOT NULL,' ]] || false - [[ "$output" =~ '+ `y` float NOT NULL,' ]] || false - [[ "$output" =~ '+ `z` int NOT NULL,' ]] || false + [[ "$output" =~ '+ `x` varchar(16383),' ]] || false + [[ "$output" =~ '+ `y` float,' ]] || false + [[ "$output" =~ '+ `z` int,' ]] || false # assert no columns were deleted/replaced [[ ! "$output" = "- \`" ]] || false @@ -308,9 +308,9 @@ DELIM run dolt diff --schema [ "$status" -eq 0 ] - [[ "$output" =~ '- `a` varchar(16383) NOT NULL,' ]] || false - [[ "$output" =~ '- `b` float NOT NULL,' ]] || false - [[ "$output" =~ '- `c` tinyint NOT NULL,' ]] || false + [[ "$output" =~ '- `a` varchar(16383),' ]] || false + [[ "$output" =~ '- `b` float,' ]] || false + [[ "$output" =~ '- `c` tinyint,' ]] || false # assert no columns were added [[ ! "$output" = "+ \`" ]] || false } diff --git a/integration-tests/bats/sql-charsets-collations.bats b/integration-tests/bats/sql-charsets-collations.bats index ee14a38714..c3dbcd668d 100644 --- a/integration-tests/bats/sql-charsets-collations.bats +++ b/integration-tests/bats/sql-charsets-collations.bats @@ -39,10 +39,10 @@ teardown() { @test "sql-charsets-collations: define charset and collation on a database" { start_sql_server - server_query "" 1 dolt "" "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;" + dolt sql-client -u dolt --use-db '' -P $PORT -q "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;" skip "Defining charsets and collations on a database not supported" - server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1" - server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci" + dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1" + dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci" } @test "sql-charsets-collations: define and use a collation and charset" { diff --git a/integration-tests/bats/sql-privs.bats b/integration-tests/bats/sql-privs.bats index 5dccaea311..e8be4dfb52 100644 --- a/integration-tests/bats/sql-privs.bats +++ b/integration-tests/bats/sql-privs.bats @@ -60,9 +60,15 @@ teardown() { SERVER_PID=$! # will get killed by teardown_common sleep 5 # not using python wait so this works on windows - server_query test_db 1 root "" "select user from mysql.user order by user" "User\nroot" - server_query test_db 1 root "" "create user new_user" "" - server_query test_db 1 root "" "select user from mysql.user order by user" "User\nnew_user\nroot" + run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user" + [ $status -eq 0 ] + [[ $output =~ "root" ]] || false + + dolt sql-client -P $PORT -u root --use-db test_db -q "create user new_user" + run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user" + [ $status -eq 0 ] + [[ $output =~ "root" ]] || false + [[ $output =~ "new_user" ]] || false stop_sql_server rm -f .dolt/sql-server.lock @@ -73,7 +79,8 @@ teardown() { SERVER_PID=$! # will get killed by teardown_common sleep 5 # not using python wait so this works on windows - server_query test_db 1 root "" "select user from mysql.user order by user" "" 1 + run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user" + [ $status -ne 0 ] } @test "sql-privs: starting server with empty config works" { @@ -82,10 +89,16 @@ teardown() { start_sql_server_with_config test_db server.yaml - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" - + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user" + [ $status -eq 0 ] + [[ $output =~ "dolt" ]] || false + + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user" + [ $status -eq 0 ] + [[ $output =~ "dolt" ]] || false + [[ $output =~ "new_user" ]] || false + run ls -a [[ "$output" =~ ".doltcfg" ]] || false @@ -111,8 +124,12 @@ behavior: dolt sql-server --port=$PORT --config server.yaml --user cmddolt & SERVER_PID=$! + sleep 5 - server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt" + + run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ "cmddolt" ]] || false } @test "sql-privs: yaml with user is also replaced with command line user" { @@ -135,8 +152,11 @@ behavior: dolt sql-server --port=$PORT --config server.yaml --user cmddolt & SERVER_PID=$! + sleep 5 - server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt" + run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ "cmddolt" ]] || false } @test "sql-privs: yaml specifies doltcfg dir" { @@ -146,9 +166,16 @@ behavior: start_sql_server_with_config test_db server.yaml - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false @@ -165,10 +192,17 @@ behavior: start_sql_server_with_config test_db server.yaml - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false + run ls -a [[ "$output" =~ ".doltcfg" ]] || false [[ "$output" =~ "privs.db" ]] || false @@ -184,9 +218,18 @@ behavior: start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nprivs_user" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ privs_user ]] || false + + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false + [[ $output =~ privs_user ]] || false # Test that privs.json file is not in json format run cat privs.json @@ -196,7 +239,12 @@ behavior: rm -f ./.dolt/sql-server.lock stop_sql_server start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false + [[ $output =~ privs_user ]] || false } @test "sql-privs: errors instead of panic when reading badly formatted privilege file" { @@ -217,9 +265,16 @@ behavior: run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a [[ "$output" =~ ".doltcfg" ]] || false @@ -232,7 +287,9 @@ behavior: make_test_repo start_sql_server_with_args --host 127.0.0.1 --user=dolt - server_query test_db 1 dolt "" "select user, host from mysql.user order by user" "User,Host\ndolt,%" + run dolt sql-client -P $PORT -u dolt --use-db test_db --result-format csv -q "select user, host from mysql.user order by user" + [ $status -eq 0 ] + [[ "$output" =~ "dolt,%" ]] || false } @test "sql-privs: multiple doltcfg directories causes error" { @@ -267,10 +324,24 @@ behavior: ! [[ "$output" =~ ".doltcfg" ]] || false ! [[ "$output" =~ "privileges.db" ]] || false - server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query db1 1 dolt "" "create user new_user" "" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases" + [ $status -eq 0 ] + [[ $output =~ db1 ]] || false + [[ $output =~ db2 ]] || false + [[ $output =~ db3 ]] || false + [[ $output =~ information_schema ]] || false + [[ $output =~ mysql ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false @@ -293,10 +364,17 @@ behavior: ! [[ "$output" =~ ".doltcfg" ]] || false ! [[ "$output" =~ "doltcfgdir" ]] || false - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false + run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false [[ "$output" =~ "doltcfgdir" ]] || false @@ -314,9 +392,16 @@ behavior: ! [[ "$output" =~ ".doltcfg" ]] || false ! [[ "$output" =~ "privs.db" ]] || false - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a [[ "$output" =~ ".doltcfg" ]] || false @@ -337,10 +422,24 @@ behavior: ! [[ "$output" =~ ".doltcfg" ]] || false ! [[ "$output" =~ "privileges.db" ]] || false - server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query db1 1 dolt "" "create user new_user" "" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases" + [ $status -eq 0 ] + [[ $output =~ db1 ]] || false + [[ $output =~ db2 ]] || false + [[ $output =~ db3 ]] || false + [[ $output =~ information_schema ]] || false + [[ $output =~ mysql ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false @@ -368,10 +467,24 @@ behavior: ! [[ "$output" =~ ".doltcfg" ]] || false ! [[ "$output" =~ "privs.db" ]] || false - server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query db1 1 dolt "" "create user new_user" "" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases" + [ $status -eq 0 ] + [[ $output =~ db1 ]] || false + [[ $output =~ db2 ]] || false + [[ $output =~ db3 ]] || false + [[ $output =~ information_schema ]] || false + [[ $output =~ mysql ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false @@ -395,9 +508,16 @@ behavior: ! [[ "$output" =~ "doltcfgdir" ]] || false ! [[ "$output" =~ "privs.db" ]] || false - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query test_db 1 dolt "" "create user new_user" "" - server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false @@ -420,10 +540,24 @@ behavior: ! [[ "$output" =~ "privileges.db" ]] || false ! [[ "$output" =~ "privs.db" ]] || false - server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt" - server_query db1 1 dolt "" "create user new_user" "" - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases" + [ $status -eq 0 ] + [[ $output =~ db1 ]] || false + [[ $output =~ db2 ]] || false + [[ $output =~ db3 ]] || false + [[ $output =~ information_schema ]] || false + [[ $output =~ mysql ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + + dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user" + + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false run ls -a ! [[ "$output" =~ ".doltcfg" ]] || false @@ -447,7 +581,7 @@ behavior: dolt init start_sql_server_with_args --host 0.0.0.0 --user=dolt - server_query test_db 1 dolt "" "create user new_user" "" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user" stop_sql_server sleep 1 run ls -a @@ -457,65 +591,91 @@ behavior: cd db_dir start_sql_server_with_args --host 0.0.0.0 --user=dolt - server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user" + run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ new_user ]] || false } @test "sql-privs: basic lack of privileges tests" { make_test_repo start_sql_server - server_query test_db 1 dolt "" "create table t1(c1 int)" - server_query test_db 1 dolt "" "create user test" - server_query test_db 1 dolt "" "grant select on test_db.* to test" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test" # Should only see test_db database - server_query "" 1 test "" "show databases" "Database\ntest_db" - server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ test_db ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db test_db -q "show tables" + [ $status -eq 0 ] + [[ $output =~ t1 ]] || false # Revoke works as expected - server_query test_db 1 dolt "" "revoke select on test_db.* from test" - server_query test_db 1 test "" "show tables" "" 1 + dolt sql-client -P $PORT -u dolt --use-db test_db -q "revoke select on test_db.* from test" + run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables" + [ $status -ne 0 ] # Host in privileges is respected - server_query test_db 1 dolt "" "drop user test" - server_query test_db 1 dolt "" "create user test@'127.0.0.1'" - server_query test_db 1 dolt "" "grant select on test_db.* to test@'127.0.0.1'" - server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1" - server_query test_db 1 dolt "" "drop user test@'127.0.0.1'" - server_query test_db 1 dolt "" "create user test@'10.10.10.10'" - server_query test_db 1 dolt "" "grant select on test_db.* to test@'10.10.10.10'" - server_query test_db 1 test "" "show tables" "" 1 + dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'127.0.0.1'" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'127.0.0.1'" + run dolt sql-client -P $PORT -u test -H 127.0.0.1 --use-db test_db -q "show tables" + [ $status -eq 0 ] + [[ $output =~ t1 ]] || false + + + dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test@'127.0.0.1'" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'10.10.10.10'" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'10.10.10.10'" + run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables" + [ $status -ne 0 ] } @test "sql-privs: creating user identified by password" { make_test_repo start_sql_server - server_query test_db 1 dolt "" "create user test identified by 'test'" "" - server_query test_db 1 dolt "" "grant select on mysql.user to test" "" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test identified by 'test'" + dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on mysql.user to test" # Should not be able to connect to test_db - server_query test_db 1 test test "select user from mysql.user order by user" "" 1 + run dolt sql-client -P $PORT -u test -p test --use-db test_db -q "select user from mysql.user order by user" + [ $status -ne 0 ] - server_query "" 1 test test "select user from mysql.user order by user" "User\ndolt\ntest" + run dolt sql-client -P $PORT -u test -p test --use-db '' -q "select user from mysql.user" + [ $status -eq 0 ] + [[ $output =~ dolt ]] || false + [[ $output =~ test ]] || false # Bad password can't connect - server_query "" 1 test bad "select user from mysql.user order by user" "" 1 + run dolt sql-client -P $PORT -u test -p bad --use-db '' -q "select user from mysql.user order by user" + [ $status -ne 0 ] # Should only see mysql database - server_query "" 1 test test "show databases" "Database\nmysql" + run dolt sql-client -P $PORT -u test -p test --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ mysql ]] || false + ! [[ $output =~ test_db ]] || false } @test "sql-privs: deleting user prevents access by that user" { make_test_repo start_sql_server - server_query test_db 1 dolt "" "create user test" - server_query test_db 1 dolt "" "grant select on test_db.* to test" "" + dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test" + dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on test_db.* to test" + run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables" + [ $status -eq 0 ] + echo $output + [[ $output =~ t1 ]] || false - server_query test_db 1 test "" "show tables" "" + dolt sql-client -P $PORT -u dolt --use-db '' -q "drop user test" - server_query test_db 1 dolt "" "drop user test" - - server_query test_db 1 test "" "show tables" "" 1 + run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables" + [ $status -ne 0 ] } diff --git a/integration-tests/bats/sql-server.bats b/integration-tests/bats/sql-server.bats index 547ca77afd..9d83222fbc 100644 --- a/integration-tests/bats/sql-server.bats +++ b/integration-tests/bats/sql-server.bats @@ -22,6 +22,9 @@ setup() { teardown() { stop_sql_server + # Added this sleep because it was leaving garbage without it. + sleep 1 + rm -rf $BATS_TMPDIR/sql-server-test$$ teardown_common } @@ -41,12 +44,14 @@ teardown() { # start the server and ensure there are no databases yet cd $tempDir/empty_server start_sql_server - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ information_schema ]] || false + [[ $output =~ mysql ]] || false # verify that dolt_clone works - # TODO: Once dolt_clone can be called without a selected database, this can be removed - server_query "" 1 dolt "" "create database test01;" "" - server_query "test01" 1 dolt "" "call dolt_clone('file:///$tempDir/remote');" "status\n0" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test01" "" + dolt sql-client -P $PORT -u dolt --use-db 'test01' -q"call dolt_clone('file:///$tempDir/remote')" } @test "sql-server: server assumes existing user" { @@ -70,27 +75,40 @@ teardown() { dolt branch other start_sql_server - server_query repo1 1 dolt "" "SET PERSIST repo1_default_branch = 'dev';" "" + dolt sql-client -P $PORT -u dolt --use-db '' -q "SET PERSIST repo1_default_branch = 'dev'" stop_sql_server start_sql_server - server_query repo1 1 dolt "" "SELECT @@repo1_default_branch;" "@@SESSION.repo1_default_branch\ndev" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT @@repo1_default_branch;" + [ $status -eq 0 ] + [[ $output =~ "@@SESSION.repo1_default_branch" ]] || false + [[ $output =~ "dev" ]] || false stop_sql_server # system variable is lost when starting sql-server outside of the folder # because global config is used. cd .. start_sql_server - server_query repo1 1 dolt "" "SELECT LENGTH(@@repo1_default_branch);" "LENGTH(@@repo1_default_branch)\n0" - server_query repo1 1 dolt "" "SET PERSIST repo1_default_branch = 'other';" "" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT LENGTH(@@repo1_default_branch);" + [ $status -eq 0 ] + [[ $output =~ "LENGTH(@@repo1_default_branch)" ]] || false + [[ $output =~ " 0 " ]] || false + + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SET PERSIST repo1_default_branch = 'other'" stop_sql_server start_sql_server - server_query repo1 1 dolt "" "SELECT @@repo1_default_branch;" "@@SESSION.repo1_default_branch\nother" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT @@repo1_default_branch" + [ $status -eq 0 ] + [[ $output =~ "@@SESSION.repo1_default_branch" ]] || false + [[ $output =~ "other" ]] || false stop_sql_server # ensure we didn't blow away local setting cd repo1 start_sql_server_with_args --user dolt --doltcfg-dir './' - server_query repo1 1 dolt "" "SELECT @@repo1_default_branch;" "@@SESSION.repo1_default_branch\ndev" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT @@repo1_default_branch" + [ $status -eq 0 ] + [[ $output =~ "@@SESSION.repo1_default_branch" ]] || false + [[ $output =~ "dev" ]] || false } @test "sql-server: user session variables from config" { @@ -117,25 +135,21 @@ user_session_vars: run dolt sql-client --host=127.0.0.1 --port=$PORT --user=user0 --password=pass0< import.csv echo '2,2,2' >> import.csv run dolt table import -u one_pk import.csv [ "$status" -eq 1 ] - server_query repo1 1 dolt "" "SELECT * FROM one_pk ORDER by pk" "" + + run dolt sql-client -P $PORT -u dolt -q "SELECT * FROM one_pk" + [ $status -eq 0 ] + ! [[ $output =~ " 2 " ]] || false } @test "sql-server: test dolt sql interface works properly with autocommit" { @@ -188,11 +207,11 @@ SQL # create table with autocommit off and verify there are still no tables server_query repo1 0 dolt "" "CREATE TABLE one_pk ( - pk BIGINT NOT NULL COMMENT 'tag:0', - c1 BIGINT COMMENT 'tag:1', - c2 BIGINT COMMENT 'tag:2', - PRIMARY KEY (pk) - )" "" + pk BIGINT NOT NULL, + c1 BIGINT, + c2 BIGINT, + PRIMARY KEY (pk))" + run dolt ls [ "$status" -eq 0 ] [[ "$output" =~ "No tables in working set" ]] || false @@ -206,12 +225,12 @@ SQL [[ "$output" =~ "No tables in working set" ]] || false # create table with autocommit on and verify table creation - server_query repo1 1 dolt "" "CREATE TABLE one_pk ( + dolt sql-client -P $PORT -u dolt -q "CREATE TABLE one_pk ( pk BIGINT NOT NULL COMMENT 'tag:0', c1 BIGINT COMMENT 'tag:1', c2 BIGINT COMMENT 'tag:2', PRIMARY KEY (pk) - )" "" + )" run dolt ls [ "$status" -eq 0 ] [[ "$output" =~ "one_pk" ]] || false @@ -253,12 +272,12 @@ SQL start_sql_server repo1 # add some working changes - server_query repo1 1 dolt "" "INSERT INTO test VALUES (7,7);" + dolt sql-client -P $PORT -u dolt -q "INSERT INTO test VALUES (7,7);" run dolt status [ "$status" -eq 0 ] [[ "$output" =~ "test" ]] || false - server_query repo1 1 dolt "" "SELECT DOLT_RESET('--hard');" + dolt sql-client -P $PORT -u dolt -q "CALL DOLT_RESET('--hard');" run dolt status [ "$status" -eq 0 ] @@ -269,7 +288,7 @@ SQL server_query repo1 1 dolt "" " INSERT INTO test VALUES (8,8); - SELECT DOLT_RESET('--hard');" + CALL DOLT_RESET('--hard');" run dolt status [ "$status" -eq 0 ] @@ -285,12 +304,11 @@ SQL start_multi_db_server repo1 # create a table in repo1 - server_query repo1 1 dolt "" "CREATE TABLE r1_one_pk ( + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE r1_one_pk ( pk BIGINT NOT NULL COMMENT 'tag:0', c1 BIGINT COMMENT 'tag:1', c2 BIGINT COMMENT 'tag:2', - PRIMARY KEY (pk) - )" "" + PRIMARY KEY (pk))" # create a table in repo2 server_query repo1 1 dolt "" "USE repo2; CREATE TABLE r2_one_pk ( @@ -298,11 +316,13 @@ SQL c3 BIGINT COMMENT 'tag:1', c4 BIGINT COMMENT 'tag:2', PRIMARY KEY (pk) - )" ";" + )" # validate tables in repos - server_query repo1 1 dolt "" "SHOW tables" "Tables_in_repo1\nr1_one_pk" - server_query repo1 1 dolt "" "USE repo2;SHOW tables" ";Tables_in_repo2\nr2_one_pk" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW tables" + [ $status -eq 0 ] + [[ $output =~ "r1_one_pk" ]] || false + server_query repo1 1 dolt "" "USE repo2; SHOW tables" ";Tables_in_repo2\nr2_one_pk" # put data in both server_query repo1 1 dolt "" " @@ -314,78 +334,147 @@ SQL INSERT INTO r2_one_pk (pk,c3) VALUES (1,1); INSERT INTO r2_one_pk (pk,c3,c4) VALUES (2,2,2),(3,3,3)" - server_query repo1 1 dolt "" "SELECT * FROM repo1.r1_one_pk ORDER BY pk" "pk,c1,c2\n0,None,None\n1,1,None\n2,2,2\n3,3,3" - server_query repo1 1 dolt "" "SELECT * FROM repo2.r2_one_pk ORDER BY pk" "pk,c3,c4\n0,None,None\n1,1,None\n2,2,2\n3,3,3" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo1.r1_one_pk ORDER BY pk" + [ $status -eq 0 ] + [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db repo2 --result-format csv -q "SELECT * FROM repo2.r2_one_pk ORDER BY pk" + [ $status -eq 0 ] + [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false server_query repo1 1 dolt "" " DELETE FROM r1_one_pk where pk=0; USE repo2; DELETE FROM r2_one_pk where pk=0" - server_query repo1 1 dolt "" "SELECT * FROM repo1.r1_one_pk ORDER BY pk" "pk,c1,c2\n1,1,None\n2,2,2\n3,3,3" - server_query repo1 1 dolt "" "SELECT * FROM repo2.r2_one_pk ORDER BY pk" "pk,c3,c4\n1,1,None\n2,2,2\n3,3,3" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo1.r1_one_pk ORDER BY pk" + [ $status -eq 0 ] + ! [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false + run dolt sql-client -P $PORT -u dolt --use-db repo2 --result-format csv -q "SELECT * FROM repo2.r2_one_pk ORDER BY pk" + [ $status -eq 0 ] + ! [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false + server_query repo1 1 dolt "" " UPDATE r1_one_pk SET c2=1 WHERE pk=1; USE repo2; UPDATE r2_one_pk SET c4=1 where pk=1" - server_query repo1 1 dolt "" "SELECT * FROM repo1.r1_one_pk ORDER BY pk" "pk,c1,c2\n1,1,1\n2,2,2\n3,3,3" - server_query repo1 1 dolt "" "SELECT * FROM repo2.r2_one_pk ORDER BY pk" "pk,c3,c4\n1,1,1\n2,2,2\n3,3,3" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo1.r1_one_pk ORDER BY pk" + [ $status -eq 0 ] + echo $output + ! [[ $output =~ "0,," ]] || false + ! [[ $output =~ "1,1, " ]] || false + [[ $output =~ "1,1,1" ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db repo2 --result-format csv -q "SELECT * FROM repo2.r2_one_pk ORDER BY pk" + [ $status -eq 0 ] + ! [[ $output =~ "0,," ]] || false + ! [[ $output =~ "1,1, " ]] || false + [[ $output =~ "1,1,1" ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false } @test "sql-server: test multi db without use statements" { - skip "autocommit fails when the current db is not the one being written" start_multi_db_server repo1 # create a table in repo1 - server_query repo1 1 dolt "" "CREATE TABLE repo1.r1_one_pk ( + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE repo1.r1_one_pk ( pk BIGINT NOT NULL COMMENT 'tag:0', c1 BIGINT COMMENT 'tag:1', c2 BIGINT COMMENT 'tag:2', - PRIMARY KEY (pk) - )" "" + PRIMARY KEY (pk))" # create a table in repo2 - server_query repo1 1 dolt "" "USE repo2; CREATE TABLE repo2.r2_one_pk ( + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE repo2.r2_one_pk ( pk BIGINT NOT NULL COMMENT 'tag:0', c3 BIGINT COMMENT 'tag:1', c4 BIGINT COMMENT 'tag:2', PRIMARY KEY (pk) - )" ";" + )" # validate tables in repos - server_query repo1 1 dolt "" "SHOW tables" "Table\nr1_one_pk" - server_query repo1 1 dolt "" "USE repo2;SHOW tables" ";Table\nr2_one_pk" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW tables" + [ $status -eq 0 ] + [[ $output =~ "r1_one_pk" ]] || false + run dolt sql-client -P $PORT -u dolt --use-db repo2 -q "SHOW tables" + [ $status -eq 0 ] + [[ $output =~ "r2_one_pk" ]] || false - # put data in both - server_query repo1 1 dolt "" " - INSERT INTO repo1.r1_one_pk (pk) VALUES (0); - INSERT INTO repo1.r1_one_pk (pk,c1) VALUES (1,1); - INSERT INTO repo1.r1_one_pk (pk,c1,c2) VALUES (2,2,2),(3,3,3); - USE repo2; - INSERT INTO repo2.r2_one_pk (pk) VALUES (0); - INSERT INTO repo2.r2_one_pk (pk,c3) VALUES (1,1); - INSERT INTO repo2.r2_one_pk (pk,c3,c4) VALUES (2,2,2),(3,3,3)" + # put data in both using database scoped inserts + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo1.r1_one_pk (pk) VALUES (0)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo1.r1_one_pk (pk,c1) VALUES (1,1)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo1.r1_one_pk (pk,c1,c2) VALUES (2,2,2),(3,3,3)" + + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo2.r2_one_pk (pk) VALUES (0)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo2.r2_one_pk (pk,c3) VALUES (1,1)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO repo2.r2_one_pk (pk,c3,c4) VALUES (2,2,2),(3,3,3)" - server_query repo1 1 dolt "" "SELECT * FROM repo1.r1_one_pk" "pk,c1,c2\n0,None,None\n1,1,None\n2,2,2\n3,3,3" - server_query repo1 1 dolt "" "SELECT * FROM repo2.r2_one_pk" "pk,c3,c4\n0,None,None\n1,1,None\n2,2,2\n3,3,3" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo1.r1_one_pk ORDER BY pk" + [ $status -eq 0 ] + [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false - server_query repo1 1 dolt "" " - DELETE FROM repo1.r1_one_pk where pk=0; - USE repo2; - DELETE FROM repo2.r2_one_pk where pk=0" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo2.r2_one_pk ORDER BY pk" + [ $status -eq 0 ] + [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false + + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "DELETE FROM repo1.r1_one_pk where pk=0" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "DELETE FROM repo2.r2_one_pk where pk=0" + + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo1.r1_one_pk ORDER BY pk" + [ $status -eq 0 ] + ! [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false - server_query repo1 1 dolt "" "SELECT * FROM repo1.r1_one_pk" "pk,c1,c2\n1,1,None\n2,2,2\n3,3,3" - server_query repo1 1 dolt "" "SELECT * FROM repo2.r2_one_pk" "pk,c3,c4\n1,1,None\n2,2,2\n3,3,3" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo2.r2_one_pk ORDER BY pk" + [ $status -eq 0 ] + ! [[ $output =~ "0,," ]] || false + [[ $output =~ "1,1," ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false - server_query repo1 1 dolt "" " - UPDATE repo1.r1_one_pk SET c2=1 WHERE pk=1; - USE repo2; - UPDATE repo2.r2_one_pk SET c4=1 where pk=1" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "UPDATE repo1.r1_one_pk SET c2=1 WHERE pk=1" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "UPDATE repo2.r2_one_pk SET c4=1 where pk=1" - server_query repo1 1 dolt "" "SELECT * FROM repo1.r1_one_pk" "pk,c1,c2\n1,1,1\n2,2,2\n3,3,3" - server_query repo1 1 dolt "" "SELECT * FROM repo2.r2_one_pk" "pk,c3,c4\n1,1,1\n2,2,2\n3,3,3" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo1.r1_one_pk ORDER BY pk" + [ $status -eq 0 ] + echo $output + ! [[ $output =~ "0,," ]] || false + ! [[ $output =~ "1,1, " ]] || false + [[ $output =~ "1,1,1" ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM repo2.r2_one_pk ORDER BY pk" + [ $status -eq 0 ] + ! [[ $output =~ "0,," ]] || false + ! [[ $output =~ "1,1, " ]] || false + [[ $output =~ "1,1,1" ]] || false + [[ $output =~ "2,2,2" ]] || false + [[ $output =~ "3,3,3" ]] || false } @test "sql-server: DOLT_ADD, DOLT_COMMIT, DOLT_CHECKOUT, DOLT_MERGE work together in server mode" { @@ -394,20 +483,22 @@ SQL cd repo1 start_sql_server repo1 - server_query repo1 1 dolt "" " - CREATE TABLE test ( + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE test ( pk int primary key - ); - INSERT INTO test VALUES (0),(1),(2); - SELECT DOLT_ADD('.'); - SELECT DOLT_COMMIT('-a', '-m', 'Step 1'); - SELECT DOLT_CHECKOUT('-b', 'feature-branch'); - " + )" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO test VALUES (0),(1),(2)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CALL DOLT_ADD('test')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CALL DOLT_COMMIT('-a', '-m', 'Step 1')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CALL DOLT_CHECKOUT('-b', 'feature-branch')" - server_query repo1 1 dolt "" "SELECT * FROM test order by pk" "pk\n0\n1\n2" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT * FROM test" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 2 " ]] || false server_query repo1 1 dolt "" " - SELECT DOLT_CHECKOUT('feature-branch'); + CALL DOLT_CHECKOUT('feature-branch'); INSERT INTO test VALUES (3); INSERT INTO test VALUES (4); INSERT INTO test VALUES (21232); @@ -415,11 +506,17 @@ SQL UPDATE test SET pk=21 WHERE pk=21232; " - server_query repo1 1 dolt "" "SELECT * FROM test" "pk\n0\n1\n2" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT * FROM test" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 2 " ]] || false + ! [[ $output =~ " 3 " ]] || false + ! [[ $output =~ " 21 " ]] || false server_query repo1 1 dolt "" " - SELECT DOLT_CHECKOUT('feature-branch'); - SELECT DOLT_COMMIT('-a', '-m', 'Insert 3'); + CALL DOLT_CHECKOUT('feature-branch'); + CALL DOLT_COMMIT('-a', '-m', 'Insert 3'); " server_query repo1 1 dolt "" " @@ -427,12 +524,19 @@ SQL INSERT INTO test VALUES (500001); DELETE FROM test WHERE pk=500001; UPDATE test SET pk=60 WHERE pk=500000; - SELECT DOLT_ADD('.'); - SELECT DOLT_COMMIT('-m', 'Insert 60'); - SELECT DOLT_MERGE('feature-branch','-m','merge feature-branch'); + CALL DOLT_ADD('.'); + CALL DOLT_COMMIT('-m', 'Insert 60'); + CALL DOLT_MERGE('feature-branch','-m','merge feature-branch'); " - server_query repo1 1 dolt "" "SELECT * FROM test order by pk" "pk\n0\n1\n2\n3\n21\n60" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT * FROM test" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 2 " ]] || false + [[ $output =~ " 3 " ]] || false + [[ $output =~ " 21 " ]] || false + [[ $output =~ " 60 " ]] || false run dolt status [ $status -eq 0 ] @@ -460,9 +564,18 @@ SQL SELECT DOLT_MERGE('feature-branch'); " - server_query repo1 1 dolt "" "SELECT * FROM test ORDER BY pk" "pk\n1\n2\n3\n1000" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT * FROM test" + [ $status -eq 0 ] + echo $output + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 2 " ]] || false + [[ $output =~ " 3 " ]] || false + [[ $output =~ " 1000 " ]] || false + ! [[ $output =~ " 0 " ]] || false - server_query repo1 1 dolt "" "SELECT COUNT(*) FROM dolt_log" "COUNT(*)\n3" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SELECT COUNT(*) FROM dolt_log" + [ $status -eq 0 ] + [[ $output =~ " 3 " ]] || false } @test "sql-server: Run queries on database without ever selecting it" { @@ -471,42 +584,70 @@ SQL start_multi_db_server repo1 # create table with autocommit on and verify table creation - server_query "" 1 dolt "" "CREATE TABLE repo2.one_pk ( + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE TABLE repo2.one_pk ( pk int, - PRIMARY KEY (pk) - )" + PRIMARY KEY (pk))" - server_query "" 1 dolt "" "INSERT INTO repo2.one_pk VALUES (0), (1), (2)" - server_query "" 1 dolt "" "SELECT * FROM repo2.one_pk" "pk\n0\n1\n2" + dolt sql-client -P $PORT -u dolt --use-db '' -q "INSERT INTO repo2.one_pk VALUES (0), (1), (2)" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SELECT * FROM repo2.one_pk" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 2 " ]] || false - server_query "" 1 dolt "" "UPDATE repo2.one_pk SET pk=3 WHERE pk=2" - server_query "" 1 dolt "" "SELECT * FROM repo2.one_pk" "pk\n0\n1\n3" + dolt sql-client -P $PORT -u dolt --use-db '' -q "UPDATE repo2.one_pk SET pk=3 WHERE pk=2" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SELECT * FROM repo2.one_pk" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 3 " ]] || false + ! [[ $output =~ " 2 " ]] || false - server_query "" 1 dolt "" "DELETE FROM repo2.one_pk WHERE pk=3" - server_query "" 1 dolt "" "SELECT * FROM repo2.one_pk" "pk\n0\n1" + dolt sql-client -P $PORT -u dolt --use-db '' -q "DELETE FROM repo2.one_pk WHERE pk=3" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SELECT * FROM repo2.one_pk" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + ! [[ $output =~ " 3 " ]] || false # Empty commit statements should not error - server_query "" 1 dolt "" "commit" + dolt sql-client -P $PORT -u dolt --use-db '' -q "commit" # create a new database and table and rerun - server_query "" 1 dolt "" "CREATE DATABASE testdb" "" - server_query "" 1 dolt "" "CREATE TABLE testdb.one_pk ( + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE DATABASE testdb" + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE TABLE testdb.one_pk ( pk int, - PRIMARY KEY (pk) - )" "" + PRIMARY KEY (pk))" - server_query "" 1 dolt "" "INSERT INTO testdb.one_pk VALUES (0), (1), (2)" - server_query "" 1 dolt "" "SELECT * FROM testdb.one_pk" "pk\n0\n1\n2" + dolt sql-client -P $PORT -u dolt --use-db '' -q "INSERT INTO testdb.one_pk VALUES (0), (1), (2)" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SELECT * FROM testdb.one_pk" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 2 " ]] || false - server_query "" 1 dolt "" "UPDATE testdb.one_pk SET pk=3 WHERE pk=2" - server_query "" 1 dolt "" "SELECT * FROM testdb.one_pk" "pk\n0\n1\n3" + dolt sql-client -P $PORT -u dolt --use-db '' -q "UPDATE testdb.one_pk SET pk=3 WHERE pk=2" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SELECT * FROM testdb.one_pk" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 3 " ]] || false + ! [[ $output =~ " 2 " ]] || false - server_query "" 1 dolt "" "DELETE FROM testdb.one_pk WHERE pk=3" - server_query "" 1 dolt "" "SELECT * FROM testdb.one_pk" "pk\n0\n1" + dolt sql-client -P $PORT -u dolt --use-db '' -q "DELETE FROM testdb.one_pk WHERE pk=3" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SELECT * FROM testdb.one_pk" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + ! [[ $output =~ " 3 " ]] || false # one last query on insert db. - server_query "" 1 dolt "" "INSERT INTO repo2.one_pk VALUES (4)" - server_query "" 1 dolt "" "SELECT * FROM repo2.one_pk" "pk\n0\n1\n4" + dolt sql-client -P $PORT -u dolt --use-db '' -q "INSERT INTO repo2.one_pk VALUES (4)" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SELECT * FROM repo2.one_pk" + [ $status -eq 0 ] + [[ $output =~ " 0 " ]] || false + [[ $output =~ " 1 " ]] || false + [[ $output =~ " 4 " ]] || false # verify changes outside the session cd repo2 @@ -516,9 +657,9 @@ SQL run dolt sql --user=dolt -q "select * from one_pk" [ "$status" -eq 0 ] - [[ "$output" =~ "0" ]] || false - [[ "$output" =~ "1" ]] || false - [[ "$output" =~ "4" ]] || false + [[ "$output" =~ " 0 " ]] || false + [[ "$output" =~ " 1 " ]] || false + [[ "$output" =~ " 4 " ]] || false } @test "sql-server: create database without USE" { @@ -526,8 +667,8 @@ SQL start_multi_db_server repo1 - server_query "" 1 dolt "" "CREATE DATABASE newdb" "" - server_query "" 1 dolt "" "CREATE TABLE newdb.test (a int primary key)" "" + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE DATABASE newdb" "" + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE TABLE newdb.test (a int primary key)" "" # verify changes outside the session cd newdb @@ -543,7 +684,9 @@ SQL start_sql_server repo1 # check no tables on main - server_query repo1 1 dolt "" "SHOW Tables" "" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [ "${#lines[@]}" -eq 0 ] # make some changes to main and commit to branch test_branch server_query repo1 1 dolt "" " @@ -557,8 +700,7 @@ SQL INSERT INTO one_pk (pk,c1,c2) VALUES (2,2,2),(3,3,3); CALL DOLT_ADD('.'); CALL dolt_commit('-am', 'test commit message', '--author', 'John Doe ');" - - server_query repo1 1 dolt "" "call dolt_add('.')" "status\n0" + run dolt ls [ "$status" -eq 0 ] [[ "$output" =~ "one_pk" ]] || false @@ -566,15 +708,15 @@ SQL run dolt sql --user=dolt -q "drop table one_pk" [ "$status" -eq 1 ] - server_query repo1 1 dolt "" "drop table one_pk" "" - server_query repo1 1 dolt "" "call dolt_commit('-am', 'Dropped table one_pk')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "drop table one_pk" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_commit('-am', 'Dropped table one_pk')" run dolt ls [ "$status" -eq 0 ] ! [[ "$output" =~ "one_pk" ]] || false } -# TODO: Need to update testing logic allow queries for a multiple session. @test "sql-server: Create a temporary table and validate that it doesn't persist after a session closes" { skiponwindows "Missing dependencies" @@ -582,17 +724,21 @@ SQL start_sql_server repo1 # check no tables on main - server_query repo1 1 dolt "" "SHOW Tables" "" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [ "${#lines[@]}" -eq 0 ] # Create a temporary table with some indexes - server_query repo1 1 dolt "" "CREATE TEMPORARY TABLE one_pk ( + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TEMPORARY TABLE one_pk ( pk int, c1 int, c2 int, PRIMARY KEY (pk), - INDEX idx_v1 (c1, c2) COMMENT 'hello there' - )" "" - server_query repo1 1 dolt "" "SHOW tables" "" # validate that it does have show tables + INDEX idx_v1 (c1, c2) COMMENT 'hello there')" + + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [ "${#lines[@]}" -eq 0 ] } @test "sql-server: connect to another branch with connection string" { @@ -609,7 +755,9 @@ SQL PRIMARY KEY (pk) )" "" - server_query repo1 1 dolt "" "SHOW tables" "" # no tables on main + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [ "${#lines[@]}" -eq 0 ] server_query "repo1/feature-branch" 1 dolt "" "SHOW Tables" "Tables_in_repo1/feature-branch\ntest" } @@ -649,15 +797,24 @@ SQL start_sql_server repo1 server_query repo1 1 dolt "" ' - select dolt_checkout("new"); + CALL dolt_checkout("new"); CREATE TABLE t (a int primary key, b int); INSERT INTO t VALUES (2,2),(3,3);' "" - server_query repo1 1 dolt "" "SHOW tables" "" # no tables on main - server_query repo1 1 dolt "" "set GLOBAL repo1_default_branch = 'refs/heads/new';" "" - server_query repo1 1 dolt "" "select @@GLOBAL.repo1_default_branch;" "@@GLOBAL.repo1_default_branch\nrefs/heads/new" - server_query repo1 1 dolt "" "select active_branch()" "active_branch()\nnew" - server_query repo1 1 dolt "" "SHOW tables" "Tables_in_repo1\nt" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [ "${#lines[@]}" -eq 0 ] + + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "set GLOBAL repo1_default_branch = 'refs/heads/new'" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "select @@GLOBAL.repo1_default_branch;" + [ $status -eq 0 ] + [[ $output =~ "refs/heads/new" ]] || false + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "select active_branch()" + [ $status -eq 0 ] + [[ $output =~ "new" ]] || false + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [[ $output =~ " t " ]] || false } @test "sql-server: SET GLOBAL default branch as branch name" { @@ -673,11 +830,20 @@ SQL CREATE TABLE t (a int primary key, b int); INSERT INTO t VALUES (2,2),(3,3);' "" - server_query repo1 1 dolt "" "SHOW tables" "" # no tables on main - server_query repo1 1 dolt "" "set GLOBAL repo1_default_branch = 'new';" "" - server_query repo1 1 dolt "" "select @@GLOBAL.repo1_default_branch;" "@@GLOBAL.repo1_default_branch\nnew" - server_query repo1 1 dolt "" "select active_branch()" "active_branch()\nnew" - server_query repo1 1 dolt "" "SHOW tables" "Tables_in_repo1\nt" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [ "${#lines[@]}" -eq 0 ] + + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "set GLOBAL repo1_default_branch = 'new'" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "select @@GLOBAL.repo1_default_branch;" + [ $status -eq 0 ] + [[ $output =~ " new " ]] || false + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "select active_branch()" + [ $status -eq 0 ] + [[ $output =~ " new " ]] || false + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "SHOW Tables" + [ $status -eq 0 ] + [[ $output =~ " t " ]] || false } @test "sql-server: disable_client_multi_statements makes create trigger work" { @@ -697,7 +863,10 @@ listener: EOF dolt sql-server --config ./config.yml --socket "dolt.$PORT.sock" & SERVER_PID=$! - # We do things manually here because we need to control CLIENT_MULTI_STATEMENTS. + sleep 1 + + # We do things manually here because we need to control + # CLIENT_MULTI_STATEMENTS. python3 -c ' import mysql.connector import sys @@ -742,7 +911,8 @@ listener: EOF dolt sql-server --config ./config.yml --socket "dolt.$PORT.sock" & SERVER_PID=$! - # We do things manually here because we need to control CLIENT_MULTI_STATEMENTS. + # We do things manually here because we need to control + # CLIENT_MULTI_STATEMENTS. python3 -c ' import mysql.connector import sys @@ -778,28 +948,42 @@ END""") cd repo1 start_sql_server repo1 - server_query repo1 1 dolt "" "CREATE TABLE t1(pk bigint primary key auto_increment, val int)" "" - server_query repo1 1 dolt "" "INSERT INTO t1 (val) VALUES (1)" - server_query repo1 1 dolt "" "SELECT * FROM t1" "pk,val\n1,1" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE t1(pk bigint primary key auto_increment, val int)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO t1 (val) VALUES (1)" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM t1" + [ $status -eq 0 ] + [[ $output =~ "1,1" ]] || false - server_query repo1 1 dolt "" "INSERT INTO t1 (val) VALUES (2)" - server_query repo1 1 dolt "" "SELECT * FROM t1" "pk,val\n1,1\n2,2" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO t1 (val) VALUES (2)" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM t1" + [ $status -eq 0 ] + [[ $output =~ "1,1" ]] || false + [[ $output =~ "2,2" ]] || false - run server_query repo1 1 dolt "" "call dolt_add('.')" - run server_query repo1 1 dolt "" "call dolt_commit('-am', 'table with two values')" - run server_query repo1 1 dolt "" "call dolt_branch('new_branch')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_commit('-am', 'table with two values')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_branch('new_branch')" server_query repo1/new_branch 1 dolt "" "INSERT INTO t1 (val) VALUES (3)" server_query repo1/new_branch 1 dolt "" "SELECT * FROM t1" "pk,val\n1,1\n2,2\n3,3" - server_query repo1 1 dolt "" "INSERT INTO t1 (val) VALUES (4)" - server_query repo1 1 dolt "" "SELECT * FROM t1" "pk,val\n1,1\n2,2\n4,4" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO t1 (val) VALUES (4)" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM t1" + [ $status -eq 0 ] + [[ $output =~ "1,1" ]] || false + [[ $output =~ "2,2" ]] || false + [[ $output =~ "4,4" ]] || false + ! [[ $output =~ "3,3" ]] || false # drop the table on main, should keep counting from 4 - server_query repo1 1 dolt "" "drop table t1;" - server_query repo1 1 dolt "" "CREATE TABLE t1(pk bigint primary key auto_increment, val int)" "" - server_query repo1 1 dolt "" "INSERT INTO t1 (val) VALUES (4)" - server_query repo1 1 dolt "" "SELECT * FROM t1" "pk,val\n4,4" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "drop table t1" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE t1(pk bigint primary key auto_increment, val int)" "" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO t1 (val) VALUES (4)" + run dolt sql-client -P $PORT -u dolt --use-db repo1 --result-format csv -q "SELECT * FROM t1" + [[ $output =~ "4,4" ]] || false + ! [[ $output =~ "1,1" ]] || false + ! [[ $output =~ "2,2" ]] || false + ! [[ $output =~ "3,3" ]] || false } @test "sql-server: sql-push --set-remote within session" { @@ -811,13 +995,13 @@ END""") start_sql_server repo1 dolt push origin main - run server_query repo1 1 dolt "" "select dolt_push() as p" "p\n0" 1 + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_push()" + [ $status -ne 0 ] [[ "$output" =~ "the current branch has no upstream branch" ]] || false - server_query repo1 1 dolt "" "select dolt_push('--set-upstream', 'origin', 'main') as p" "p\n1" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_push('--set-upstream', 'origin', 'main')" - skip "In-memory branch doesn't track upstream correctly" - server_query repo1 1 dolt "" "select dolt_push() as p" "p\n1" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_push()" } @test "sql-server: replicate to backup after sql-session commit" { @@ -829,43 +1013,45 @@ END""") dolt config --local --add sqlserver.global.DOLT_REPLICATE_TO_REMOTE backup1 start_sql_server repo1 - server_query repo1 1 dolt "" " - CREATE TABLE test ( - pk int primary key - ); - INSERT INTO test VALUES (0),(1),(2); - SELECT DOLT_ADD('.'); - SELECT DOLT_COMMIT('-m', 'Step 1');" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CREATE TABLE test (pk int primary key);" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "INSERT INTO test VALUES (0),(1),(2)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CALL DOLT_ADD('.')" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "CALL DOLT_COMMIT('-m', 'Step 1');" cd .. dolt clone file://./bac1 repo3 cd repo3 run dolt sql -q "select * from test" -r csv [ "$status" -eq 0 ] - [[ "${lines[0]}" =~ "pk" ]] - [[ "${lines[1]}" =~ "0" ]] - [[ "${lines[2]}" =~ "1" ]] - [[ "${lines[3]}" =~ "2" ]] + [ "${lines[0]}" = "pk" ] + [ "${lines[1]}" = "0" ] + [ "${lines[2]}" = "1" ] + [ "${lines[3]}" = "2" ] } -@test "sql-server: create database with no starting repo" { +@test "sql-server: create multiple databases with no starting repo" { skiponwindows "Missing dependencies" mkdir no_dolt && cd no_dolt start_sql_server - server_query "" 1 dolt "" "create database test1" - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntest1" - server_query "test1" 1 dolt "" "create table a(x int)" - server_query "test1" 1 dolt "" "select dolt_add('.')" - server_query "test1" 1 dolt "" "insert into a values (1), (2)" - server_query "test1" 1 dolt "" "call dolt_commit('-a', '-m', 'new table a')" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test1" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "test1" ]] || false - server_query "" 1 dolt "" "create database test2" - server_query "test2" 1 dolt "" "create table b(x int)" - server_query "test2" 1 dolt "" "select dolt_add('.')" - server_query "test2" 1 dolt "" "insert into b values (1), (2)" - server_query "test2" 1 dolt "" "select dolt_commit('-a', '-m', 'new table b')" + dolt sql-client -P $PORT -u dolt --use-db 'test1' -q "create table a(x int)" + dolt sql-client -P $PORT -u dolt --use-db 'test1' -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db 'test1' -q "insert into a values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db 'test1' -q "call dolt_commit('-a', '-m', 'new table a')" + + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test2" + dolt sql-client -P $PORT -u dolt --use-db 'test2' -q "create table b(x int)" + dolt sql-client -P $PORT -u dolt --use-db 'test2' -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db 'test2' -q "insert into b values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db 'test2' -q "call dolt_commit('-a', '-m', 'new table b')" cd test1 run dolt log @@ -887,13 +1073,13 @@ END""") cd .. - server_query "" 1 dolt "" "create database test3" - server_query "test3" 1 dolt "" "create table c(x int)" - server_query "test3" 1 dolt "" "select dolt_add('.')" - server_query "test3" 1 dolt "" "insert into c values (1), (2)" - run server_query "test3" 1 dolt "" "select dolt_commit('-a', '-m', 'new table c')" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test3" + dolt sql-client -P $PORT -u dolt --use-db 'test3' -q "create table c(x int)" + dolt sql-client -P $PORT -u dolt --use-db 'test3' -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db 'test3' -q "insert into c values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db 'test3' -q "call dolt_commit('-a', '-m', 'new table c')" - server_query "" 1 dolt "" "drop database test2" + dolt sql-client -P $PORT -u dolt --use-db '' -q "drop database test2" [ -d test3 ] [ ! -d test2 ] @@ -901,43 +1087,57 @@ END""") # make sure the databases exist on restart stop_sql_server start_sql_server - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntest1\ntest3" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "test1" ]] || false + [[ $output =~ "test3" ]] || false + ! [[ $output =~ "test2" ]] || false } -@test "sql-server: drop database with active connections" { +@test "sql-server: can't drop branch qualified database names" { skiponwindows "Missing dependencies" - skip_nbf_dolt "json ordering of keys differs" mkdir no_dolt && cd no_dolt start_sql_server - server_query "" 1 dolt "" "create database test1" - server_query "" 1 dolt "" "create database test2" - server_query "" 1 dolt "" "create database test3" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test1" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test2" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test3" - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntest1\ntest2\ntest3" - server_query "test1" 1 dolt "" "create table a(x int)" - server_query "test1" 1 dolt "" "select dolt_add('.')" - server_query "test1" 1 dolt "" "insert into a values (1), (2)" - run server_query "test1" 1 dolt "" "call dolt_commit('-a', '-m', 'new table a')" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "test1" ]] || false + [[ $output =~ "test2" ]] || false + [[ $output =~ "test3" ]] || false - server_query "test2" 1 dolt "" "create table a(x int)" - server_query "test2" 1 dolt "" "select dolt_add('.')" - server_query "test2" 1 dolt "" "insert into a values (3), (4)" - server_query "test2" 1 dolt "" "call dolt_commit('-a', '-m', 'new table a')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "create table a(x int)" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "insert into a values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "call dolt_commit('-a', '-m', 'new table a')" - server_query "test3" 1 dolt "" "create table a(x int)" - server_query "test3" 1 dolt "" "select dolt_add('.')" - server_query "test3" 1 dolt "" "insert into a values (5), (6)" - server_query "test3" 1 dolt "" "call dolt_commit('-a', '-m', 'new table a')" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "create table a(x int)" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "insert into a values (3), (4)" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "call dolt_commit('-a', '-m', 'new table a')" - server_query "test1" 1 dolt "" "call dolt_checkout('-b', 'newbranch')" - server_query "test1/newbranch" 1 dolt "" "select * from a" "x\n1\n2" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "create table a(x int)" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "insert into a values (5), (6)" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "call dolt_commit('-a', '-m', 'new table a')" - server_query "test2" 1 dolt "" "call dolt_checkout('-b', 'newbranch')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "call dolt_branch('newbranch')" + # Something weird is going on here. This should not need an order by + server_query "test1/newbranch" 1 dolt "" "select * from a order by x" "x\n1\n2" + + dolt sql-client -P $PORT -u dolt --use-db test2 -q "call dolt_branch('newbranch')" server_query "test2/newbranch" 1 dolt "" "select * from a" "x\n3\n4" - server_query "" 1 dolt "" "drop database TEST1" + # uppercase to ensure db names are treated case insensitive + dolt sql-client -P $PORT -u dolt --use-db '' -q "drop database TEST1" run server_query "test1/newbranch" 1 dolt "" "select * from a" "" 1 [[ "$output" =~ "database not found" ]] || false @@ -946,13 +1146,15 @@ END""") run server_query "" 1 dolt "" "drop database \`test2/newbranch\`" "" 1 [[ "$output" =~ "unable to drop revision database: test2/newbranch" ]] || false - - server_query "" 1 dolt "" "drop database TEST2" + dolt sql-client -P $PORT -u dolt --use-db '' -q "drop database TEST2" run server_query "test2/newbranch" 1 dolt "" "select * from a" "" 1 [[ "$output" =~ "database not found" ]] || false - server_query "test3" 1 dolt "" "select * from a" "x\n5\n6" + run dolt sql-client -P $PORT -u dolt --use-db test3 -q "select * from a" + [ $status -eq 0 ] + [[ $output =~ " 5 " ]] || false + [[ $output =~ " 6 " ]] || false } @test "sql-server: connect to databases case insensitive" { @@ -961,16 +1163,22 @@ END""") mkdir no_dolt && cd no_dolt start_sql_server - server_query "" 1 dolt "" "create database Test1" - - server_query "" 1 dolt "" "show databases" "Database\nTest1\ninformation_schema\nmysql" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database Test1" + + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "Test1" ]] || false server_query "" 1 dolt "" "use test1; create table a(x int);" server_query "" 1 dolt "" "use TEST1; insert into a values (1), (2);" - run server_query "" 1 dolt "" "use test1; select dolt_add('.'); select dolt_commit('-a', '-m', 'new table a');" + server_query "" 1 dolt "" "use test1; call dolt_add('.'); call dolt_commit('-a', '-m', 'new table a');" server_query "" 1 dolt "" "use test1; call dolt_checkout('-b', 'newbranch');" server_query "" 1 dolt "" "use \`TEST1/newbranch\`; select * from a order by x" ";x\n1\n2" server_query "" 1 dolt "" "use \`test1/newbranch\`; select * from a order by x" ";x\n1\n2" - server_query "" 1 dolt "" "use \`TEST1/NEWBRANCH\`" "" "database not found: TEST1/NEWBRANCH" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "use \`TEST1/NEWBRANCH\`" + [ $status -ne 0 ] + [[ $output =~ "database not found: TEST1/NEWBRANCH" ]] || false server_query "" 1 dolt "" "create database test2; use test2; select database();" ";;database()\ntest2" server_query "" 1 dolt "" "use test2; drop database TEST2; select database();" ";;database()\nNone" @@ -982,14 +1190,19 @@ END""") mkdir no_dolt && cd no_dolt mkdir db_dir start_sql_server_with_args --host 0.0.0.0 --user dolt --data-dir=db_dir + + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test1" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "test1" ]] || false - server_query "" 1 dolt "" "create database test1" - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntest1" - server_query "test1" 1 dolt "" "create table a(x int)" - server_query "test1" 1 dolt "" "select dolt_add('.')" - server_query "test1" 1 dolt "" "insert into a values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "create table a(x int)" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "insert into a values (1), (2)" - server_query "test1" 1 dolt "" "call dolt_commit('-a', '-m', 'new table a')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "call dolt_commit('-a', '-m', 'new table a')" [ -d db_dir/test1 ] @@ -1000,13 +1213,13 @@ END""") cd ../.. - server_query "" 1 dolt "" "create database test3" - server_query "test3" 1 dolt "" "create table c(x int)" - server_query "test3" 1 dolt "" "select dolt_add('.')" - server_query "test3" 1 dolt "" "insert into c values (1), (2)" - server_query "test3" 1 dolt "" "call dolt_commit('-a', '-m', 'new table c')" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test3" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "create table c(x int)" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "insert into c values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db test3 -q "call dolt_commit('-a', '-m', 'new table c')" - server_query "" 1 dolt "" "drop database test1" + dolt sql-client -P $PORT -u dolt --use-db '' -q "drop database test1" [ -d db_dir/test3 ] [ ! -d db_dir/test1 ] @@ -1014,7 +1227,11 @@ END""") # make sure the databases exist on restart stop_sql_server start_sql_server_with_args --host 0.0.0.0 --user dolt --data-dir=db_dir - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntest3" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "test3" ]] || false } @test "sql-server: create database errors" { @@ -1025,14 +1242,21 @@ END""") touch file_exists start_sql_server - server_query "" 1 dolt "" "create database test1" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test1" # Error on creation, already exists - server_query "" 1 dolt "" "create database test1" "" "exists" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "create database test1" + [ $status -ne 0 ] + [[ $output =~ exists ]] || false # Files / dirs in the way - server_query "" 1 dolt "" "create database dir_exists" "" "exists" - server_query "" 1 dolt "" "create database file_exists" "" "exists" + run dolt sql-client -P $PORT -u dolt --use-db '' -q "create database dir_exists" + [ $status -ne 0 ] + [[ $output =~ exists ]] || false + + run dolt sql-client -P $PORT -u dolt --use-db '' -q "create database file_exists" + [ $status -ne 0 ] + [[ $output =~ exists ]] || false } @test "sql-server: create database with existing repo" { @@ -1041,21 +1265,25 @@ END""") cd repo1 start_sql_server - server_query "" 1 dolt "" "create database test1" - server_query "repo1" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\nrepo1\ntest1" - server_query "test1" 1 dolt "" "create table a(x int)" - server_query "test1" 1 dolt "" "select dolt_add('.')" - server_query "test1" 1 dolt "" "insert into a values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "create database test1" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "test1" ]] || false + [[ $output =~ "repo1" ]] || false - # not bothering to check the results of the commit here - server_query "test1" 1 dolt "" "call dolt_commit('-a', '-m', 'new table a')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "create table a(x int)" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "insert into a values (1), (2)" - server_query "" 1 dolt "" "create database test2" - server_query "test2" 1 dolt "" "create table b(x int)" - server_query "test2" 1 dolt "" "select dolt_add('.')" - server_query "test2" 1 dolt "" "insert into b values (1), (2)" - # not bothering to check the results of the commit here - server_query "test2" 1 dolt "" "call dolt_commit('-a', '-m', 'new table b')" + dolt sql-client -P $PORT -u dolt --use-db test1 -q "call dolt_commit('-a', '-m', 'new table a')" + + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "create database test2" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "create table b(x int)" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "call dolt_add('.')" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "insert into b values (1), (2)" + dolt sql-client -P $PORT -u dolt --use-db test2 -q "call dolt_commit('-a', '-m', 'new table b')" cd test1 run dolt log @@ -1079,7 +1307,13 @@ END""") # make sure the databases exist on restart stop_sql_server start_sql_server - server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\nrepo1\ntest1\ntest2" + run dolt sql-client -P $PORT -u dolt --use-db repo1 -q "show databases" + [ $status -eq 0 ] + [[ $output =~ "mysql" ]] || false + [[ $output =~ "information_schema" ]] || false + [[ $output =~ "test1" ]] || false + [[ $output =~ "repo1" ]] || false + [[ $output =~ "test2" ]] || false } @test "sql-server: fetch uses database tempdir from different working directory" { @@ -1114,7 +1348,7 @@ databases: start_sql_server_with_config repo1 server.yaml - server_query repo1 1 dolt "" "call dolt_fetch()" "" + dolt sql-client -P $PORT -u dolt --use-db repo1 -q "call dolt_fetch()" } @test "sql-server: run mysql from shell" { @@ -1181,7 +1415,7 @@ databases: @test "sql-server: sql-server lock for new databases" { cd repo1 start_sql_server - server_query repo1 1 dolt "" "create database newdb" "" + dolt sql-client -P $PORT -u dolt --use-db '' -q "create database newdb" cd newdb PORT=$( definePORT ) run dolt sql-server -P $PORT --socket "dolt.$PORT.sock" @@ -1210,7 +1444,11 @@ databases: SERVER_PID=$! wait_for_connection $PORT 5000 - server_query repo2 1 dolt "" "select 1 as col1" "col1\n1" + run dolt sql-client -P $PORT -u dolt --use-db repo2 -q "select 1 as col1" + [ $status -eq 0 ] + [[ $output =~ col1 ]] || false + [[ $output =~ " 1 " ]] || false + run grep '\"/tmp/mysql.sock\"' log.txt [ "$status" -eq 0 ] [ "${#lines[@]}" -eq 1 ] @@ -1236,7 +1474,10 @@ databases: SERVER_PID=$! wait_for_connection $PORT 5000 - server_query repo2 1 dolt "" "select 1 as col1" "col1\n1" + run dolt sql-client -P $PORT -u dolt --use-db repo2 -q "select 1 as col1" + [ $status -eq 0 ] + [[ $output =~ col1 ]] || false + [[ $output =~ " 1 " ]] || false run grep '\"/tmp/mysql.sock\"' log.txt [ "$status" -eq 0 ] @@ -1289,7 +1530,10 @@ behavior: SERVER_PID=$! wait_for_connection $PORT 5000 - server_query repo2 1 dolt "" "select 1 as col1" "col1\n1" + run dolt sql-client -P $PORT -u dolt --use-db repo2 -q "select 1 as col1" + [ $status -eq 0 ] + [[ $output =~ col1 ]] || false + [[ $output =~ " 1 " ]] || false run grep "dolt.$PORT.sock" log.txt [ "$status" -eq 0 ] @@ -1344,8 +1588,6 @@ s.close() } @test "sql-server: sigterm running server and restarting works correctly" { - skip "Skipping while we debug why this test hangs for hours in CI" - start_sql_server run ls repo1/.dolt [[ "$output" =~ "sql-server.lock" ]] || false @@ -1361,8 +1603,12 @@ s.close() run ls repo2/.dolt [[ "$output" =~ "sql-server.lock" ]] || false + skip "this now fails because of the socket file not being cleaned up" start_sql_server - server_query repo1 1 dolt "" "SELECT 1" "1\n1" + run dolt sql-client -P $PORT -u dolt --use-db repo2 -q "select 1 as col1" + [ $status -eq 0 ] + [[ $output =~ col1 ]] || false + [[ $output =~ " 1 " ]] || false stop_sql_server # Try adding fake pid numbers. Could happen via debugger or something @@ -1370,7 +1616,10 @@ s.close() echo "4123423" > repo2/.dolt/sql-server.lock start_sql_server - server_query repo1 1 dolt "" "SELECT 1" "1\n1" + run dolt sql-client -P $PORT -u dolt --use-db repo2 -q "select 1 as col1" + [ $status -eq 0 ] + [[ $output =~ col1 ]] || false + [[ $output =~ " 1 " ]] || false stop_sql_server # Add malicious text to lockfile and expect to fail @@ -1379,6 +1628,14 @@ s.close() run start_sql_server [[ "$output" =~ "database locked by another sql-server; either clone the database to run a second server" ]] || false [ "$status" -eq 1 ] + + rm repo1/.dolt/sql-server.lock + + # this test was hanging as the server is stopped from the above error + # but stop_sql_server in teardown tries to kill process that is not + # running anymore, so start the server again, and it will be stopped in + # teardown + start_sql_server } @test "sql-server: create a database when no current database is set" { @@ -1409,15 +1666,16 @@ s.close() cd nodb start_sql_server >> server_log.txt 2>&1 - server_query "" 1 dolt "" "CREATE DATABASE mydb1" - server_query "" 1 dolt "" "CREATE DATABASE mydb2" + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE DATABASE mydb1" + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE DATABASE mydb2" [ -d mydb1 ] [ -d mydb2 ] rm -rf mydb2 - server_query "" 1 dolt "" "SHOW DATABASES" "" 1 + run dolt sql-client -P $PORT -u dolt --use-db '' -q "SHOW DATABASES" + [ $status -ne 0 ] run grep "panic" server_log.txt [ "${#lines[@]}" -eq 0 ] @@ -1426,8 +1684,9 @@ s.close() [ "${#lines[@]}" -eq 1 ] # this tests fails sometimes as the server is stopped from the above error - # but stop_sql_server in teardown tries to kill process that is not running anymore, - # so start the server again, and it will be stopped in teardown + # but stop_sql_server in teardown tries to kill process that is not + # running anymore, so start the server again, and it will be stopped in + # teardown start_sql_server } @@ -1441,12 +1700,12 @@ s.close() start_sql_server >> server_log.txt 2>&1 # 'doltdb' will be nested database inside 'mydb' - server_query "" 1 dolt "" "CREATE DATABASE doltdb" + dolt sql-client -P $PORT -u dolt --use-db '' -q "CREATE DATABASE doltdb" run dolt sql -q "SHOW DATABASES" [[ "$output" =~ "mydb" ]] || false [[ "$output" =~ "doltdb" ]] || false - server_query "" 1 dolt "" "DROP DATABASE mydb" + dolt sql-client -P $PORT -u dolt --use-db '' -q "DROP DATABASE mydb" run grep "database not found: mydb" server_log.txt [ "${#lines[@]}" -eq 0 ] @@ -1469,7 +1728,7 @@ s.close() [[ "$output" =~ "mydb" ]] || false start_sql_server >> server_log.txt 2>&1 - server_query "mydb" 1 dolt "" "DROP DATABASE mydb;" + dolt sql-client -P $PORT -u dolt --use-db '' -q "DROP DATABASE mydb;" run grep "database not found: mydb" server_log.txt [ "${#lines[@]}" -eq 0 ] @@ -1489,7 +1748,7 @@ s.close() cd .. start_sql_server >> server_log.txt 2>&1 - server_query "" 1 dolt "" "DROP DATABASE my_db;" + dolt sql-client -P $PORT -u dolt --use-db '' -q "DROP DATABASE my_db;" run grep "database not found: my_db" server_log.txt [ "${#lines[@]}" -eq 0 ]