mirror of
https://github.com/dolthub/dolt.git
synced 2026-01-07 08:50:34 -06:00
Merge remote-tracking branch 'origin/main' into taylor/diff-dot-sql
This commit is contained in:
@@ -28,7 +28,7 @@ if [ -z "$MODE" ]; then
|
||||
fi
|
||||
|
||||
nomsFormat="ldnbf"
|
||||
if [ "$NOMS_BIN_FORMAT" == "__DOLT__"]; then
|
||||
if [ "$NOMS_BIN_FORMAT" == "__DOLT__" ]; then
|
||||
nomsFormat="doltnbf"
|
||||
fi
|
||||
|
||||
|
||||
@@ -38,6 +38,11 @@ if [ -z "$MODE" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
nomsFormat="ldnbf"
|
||||
if [ "$NOMS_BIN_FORMAT" == "__DOLT__" ]; then
|
||||
nomsFormat="doltnbf"
|
||||
fi
|
||||
|
||||
# use first 8 characters of TO_VERSION to differentiate
|
||||
# jobs
|
||||
short=${TO_VERSION:0:8}
|
||||
@@ -49,7 +54,7 @@ sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
|
||||
|
||||
timesuffix=`date +%s%N`
|
||||
|
||||
jobname="$actorShort-$timesuffix"
|
||||
jobname="$actorShort-$nomsFormat-$timesuffix"
|
||||
|
||||
timeprefix=$(date +%Y/%m/%d)
|
||||
|
||||
|
||||
4
.github/workflows/ci-bats-unix.yaml
vendored
4
.github/workflows/ci-bats-unix.yaml
vendored
@@ -22,10 +22,10 @@ jobs:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
os: [ ubuntu-22.04, macos-latest ]
|
||||
dolt_fmt: [ "__DOLT__", "__DOLT_DEV__", "__LD_1__" ]
|
||||
dolt_fmt: [ "__DOLT__", "__LD_1__" ]
|
||||
exclude:
|
||||
- os: "macos-latest"
|
||||
dolt_fmt: ["__DOLT_DEV__", "__LD_1__" ]
|
||||
dolt_fmt: "__LD_1__"
|
||||
env:
|
||||
use_credentials: ${{ secrets.AWS_SECRET_ACCESS_KEY != '' && secrets.AWS_ACCESS_KEY_ID != '' }}
|
||||
steps:
|
||||
|
||||
@@ -73,10 +73,10 @@ Show changes between the working and staged tables, changes between the working
|
||||
This form is to view the changes you made relative to the staging area for the next commit. In other words, the differences are what you could tell Dolt to further add but you still haven't. You can stage these changes by using dolt add.
|
||||
|
||||
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> [<tables>...]{{.EmphasisRight}}
|
||||
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, instead of using {{.LessThan}}commit{{.GreaterThan}}, use the merge base of {{.LessThan}}commit{{.GreaterThan}} and HEAD. {{.EmphasisLeft}}dolt diff --merge-base A{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A HEAD){{.EmphasisRight}}.
|
||||
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, instead of using {{.LessThan}}commit{{.GreaterThan}}, use the merge base of {{.LessThan}}commit{{.GreaterThan}} and HEAD. {{.EmphasisLeft}}dolt diff --merge-base A{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A HEAD){{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...HEAD{{.EmphasisRight}}.
|
||||
|
||||
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> <commit> [<tables>...]{{.EmphasisRight}}
|
||||
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight} is given, use the merge base of the two commits for the "before" side. {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}}.
|
||||
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight} is given, use the merge base of the two commits for the "before" side. {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}}.
|
||||
|
||||
{{.EmphasisLeft}}dolt diff [--options] <commit>..<commit> [<tables>...]{{.EmphasisRight}}
|
||||
This is synonymous to the above form (without the ..) to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}.
|
||||
@@ -139,7 +139,7 @@ func (cmd DiffCmd) ArgParser() *argparser.ArgParser {
|
||||
ap.SupportsInt(limitParam, "", "record_count", "limits to the first N diffs.")
|
||||
ap.SupportsFlag(CachedFlag, "c", "Show only the unstaged data changes.")
|
||||
ap.SupportsFlag(SkinnyFlag, "sk", "Shows only primary key columns and any columns with data changes.")
|
||||
ap.SupportsFlag(MergeBase, "", "Uses merge base of {{.LessThan}}from_commit{{.GreaterThan}} and {{.LessThan}}to_commit{{.GreaterThan}} (or HEAD if not supplied) as {{.LessThan}}from_commit{{.GreaterThan}}")
|
||||
ap.SupportsFlag(MergeBase, "", "Uses merge base of the first commit and second commit (or HEAD if not supplied) as the first commit")
|
||||
return ap
|
||||
}
|
||||
|
||||
@@ -393,7 +393,7 @@ func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv,
|
||||
|
||||
if len(refs[1]) > 0 {
|
||||
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
|
||||
return fmt.Errorf("to ref in two dot diff must be valid ref: %s", refs[1])
|
||||
return fmt.Errorf("to ref in three dot diff must be valid ref: %s", refs[1])
|
||||
}
|
||||
dArgs.toRoot = toRoot
|
||||
dArgs.toRef = refs[1]
|
||||
|
||||
@@ -183,7 +183,7 @@ func (fk ForeignKey) ValidateReferencedTableSchema(sch schema.Schema) error {
|
||||
fk.Name, fk.ReferencedTableName)
|
||||
}
|
||||
}
|
||||
if !sch.Indexes().Contains(fk.ReferencedTableIndex) {
|
||||
if (fk.ReferencedTableIndex != "" && !sch.Indexes().Contains(fk.ReferencedTableIndex)) || (fk.ReferencedTableIndex == "" && sch.GetPKCols().Size() < len(fk.ReferencedTableColumns)) {
|
||||
return fmt.Errorf("foreign key `%s` has entered an invalid state, referenced table `%s` is missing the index `%s`",
|
||||
fk.Name, fk.ReferencedTableName, fk.ReferencedTableIndex)
|
||||
}
|
||||
@@ -203,7 +203,7 @@ func (fk ForeignKey) ValidateTableSchema(sch schema.Schema) error {
|
||||
return fmt.Errorf("foreign key `%s` has entered an invalid state, table `%s` has unexpected schema", fk.Name, fk.TableName)
|
||||
}
|
||||
}
|
||||
if !sch.Indexes().Contains(fk.TableIndex) {
|
||||
if (fk.TableIndex != "" && !sch.Indexes().Contains(fk.TableIndex)) || (fk.TableIndex == "" && sch.GetPKCols().Size() < len(fk.TableColumns)) {
|
||||
return fmt.Errorf("foreign key `%s` has entered an invalid state, table `%s` is missing the index `%s`",
|
||||
fk.Name, fk.TableName, fk.TableIndex)
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ package actions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
@@ -61,7 +62,6 @@ func InferColumnTypesFromTableReader(ctx context.Context, rd table.ReadCloser, a
|
||||
|
||||
var curr, prev row.Row
|
||||
i := newInferrer(rd.GetSchema(), args)
|
||||
|
||||
OUTER:
|
||||
for j := 0; true; j++ {
|
||||
var err error
|
||||
@@ -130,10 +130,8 @@ func (inf *inferrer) inferColumnTypes() (*schema.ColCollection, error) {
|
||||
col.TypeInfo = inferredTypes[tag]
|
||||
col.Tag = schema.ReservedTagMin + tag
|
||||
|
||||
col.Constraints = []schema.ColConstraint{schema.NotNullConstraint{}}
|
||||
if inf.nullable.Contains(tag) {
|
||||
col.Constraints = []schema.ColConstraint(nil)
|
||||
}
|
||||
// for large imports, it is possible to miss all the null values, so we cannot accurately add not null constraint
|
||||
col.Constraints = []schema.ColConstraint(nil)
|
||||
|
||||
cols = append(cols, col)
|
||||
return false, nil
|
||||
@@ -218,32 +216,27 @@ func leastPermissiveNumericType(strVal string, floatThreshold float64) (ti typei
|
||||
return ti
|
||||
}
|
||||
|
||||
if strings.Contains(strVal, "-") {
|
||||
i, err := strconv.ParseInt(strVal, 10, 64)
|
||||
if err != nil {
|
||||
return typeinfo.UnknownType
|
||||
}
|
||||
if i >= math.MinInt32 && i <= math.MaxInt32 {
|
||||
return typeinfo.Int32Type
|
||||
} else {
|
||||
return typeinfo.Int64Type
|
||||
}
|
||||
// always parse as signed int
|
||||
i, err := strconv.ParseInt(strVal, 10, 64)
|
||||
|
||||
// use string for out of range
|
||||
if errors.Is(err, strconv.ErrRange) {
|
||||
return typeinfo.StringDefaultType
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return typeinfo.UnknownType
|
||||
}
|
||||
|
||||
// handle leading zero case
|
||||
if len(strVal) > 1 && strVal[0] == '0' {
|
||||
return typeinfo.StringDefaultType
|
||||
}
|
||||
|
||||
if i >= math.MinInt32 && i <= math.MaxInt32 {
|
||||
return typeinfo.Int32Type
|
||||
} else {
|
||||
ui, err := strconv.ParseUint(strVal, 10, 64)
|
||||
if err != nil {
|
||||
return typeinfo.UnknownType
|
||||
}
|
||||
|
||||
// handle leading zero case
|
||||
if len(strVal) > 1 && strVal[0] == '0' {
|
||||
return typeinfo.StringDefaultType
|
||||
}
|
||||
|
||||
if ui <= math.MaxUint32 {
|
||||
return typeinfo.Uint32Type
|
||||
} else {
|
||||
return typeinfo.Uint64Type
|
||||
}
|
||||
return typeinfo.Int64Type
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,14 +279,13 @@ func chronoTypes() []typeinfo.TypeInfo {
|
||||
func numericTypes() []typeinfo.TypeInfo {
|
||||
// prefer:
|
||||
// ints over floats
|
||||
// unsigned over signed
|
||||
// smaller over larger
|
||||
return []typeinfo.TypeInfo{
|
||||
//typeinfo.Uint8Type,
|
||||
//typeinfo.Uint16Type,
|
||||
//typeinfo.Uint24Type,
|
||||
typeinfo.Uint32Type,
|
||||
typeinfo.Uint64Type,
|
||||
//typeinfo.Uint32Type,
|
||||
//typeinfo.Uint64Type,
|
||||
|
||||
//typeinfo.Int8Type,
|
||||
//typeinfo.Int16Type,
|
||||
@@ -398,12 +390,6 @@ func findCommonNumericType(nums typeInfoSet) typeinfo.TypeInfo {
|
||||
typeinfo.Int24Type,
|
||||
typeinfo.Int16Type,
|
||||
typeinfo.Int8Type,
|
||||
|
||||
typeinfo.Uint64Type,
|
||||
typeinfo.Uint32Type,
|
||||
typeinfo.Uint24Type,
|
||||
typeinfo.Uint16Type,
|
||||
typeinfo.Uint8Type,
|
||||
}
|
||||
for _, numType := range mostToLeast {
|
||||
if setHasType(nums, numType) {
|
||||
|
||||
@@ -49,14 +49,14 @@ func TestLeastPermissiveType(t *testing.T) {
|
||||
{"lower bool", "true", 0.0, typeinfo.BoolType},
|
||||
{"upper bool", "FALSE", 0.0, typeinfo.BoolType},
|
||||
{"yes", "yes", 0.0, typeinfo.StringDefaultType},
|
||||
{"one", "1", 0.0, typeinfo.Uint32Type},
|
||||
{"one", "1", 0.0, typeinfo.Int32Type},
|
||||
{"negative one", "-1", 0.0, typeinfo.Int32Type},
|
||||
{"negative one point 0", "-1.0", 0.0, typeinfo.Float32Type},
|
||||
{"negative one point 0 with FT of 0.1", "-1.0", 0.1, typeinfo.Int32Type},
|
||||
{"negative one point one with FT of 0.1", "-1.1", 0.1, typeinfo.Float32Type},
|
||||
{"negative one point 999 with FT of 1.0", "-1.999", 1.0, typeinfo.Int32Type},
|
||||
{"zero point zero zero zero zero", "0.0000", 0.0, typeinfo.Float32Type},
|
||||
{"max int", strconv.FormatUint(math.MaxInt64, 10), 0.0, typeinfo.Uint64Type},
|
||||
{"max int", strconv.FormatUint(math.MaxInt64, 10), 0.0, typeinfo.Int64Type},
|
||||
{"bigger than max int", strconv.FormatUint(math.MaxUint64, 10) + "0", 0.0, typeinfo.StringDefaultType},
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestLeastPermissiveNumericType(t *testing.T) {
|
||||
floatThreshold float64
|
||||
expType typeinfo.TypeInfo
|
||||
}{
|
||||
{"zero", "0", 0.0, typeinfo.Uint32Type},
|
||||
{"zero", "0", 0.0, typeinfo.Int32Type},
|
||||
{"zero float", "0.0", 0.0, typeinfo.Float32Type},
|
||||
{"zero float with floatThreshold of 0.1", "0.0", 0.1, typeinfo.Int32Type},
|
||||
{"negative float", "-1.3451234", 0.0, typeinfo.Float32Type},
|
||||
@@ -85,8 +85,8 @@ func TestLeastPermissiveNumericType(t *testing.T) {
|
||||
{"all zeroes", "0000", 0.0, typeinfo.StringDefaultType},
|
||||
{"leading zeroes", "01", 0.0, typeinfo.StringDefaultType},
|
||||
{"negative int", "-1234", 0.0, typeinfo.Int32Type},
|
||||
{"fits in uint64 but not int64", strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.Uint64Type},
|
||||
{"negative less than math.MinInt64", "-" + strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.UnknownType},
|
||||
{"fits in uint64 but not int64", strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.StringDefaultType},
|
||||
{"negative less than math.MinInt64", "-" + strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.StringDefaultType},
|
||||
{"math.MinInt64", strconv.FormatInt(math.MinInt64, 10), 0.0, typeinfo.Int64Type},
|
||||
}
|
||||
|
||||
@@ -142,14 +142,6 @@ func testFindCommonType(t *testing.T) {
|
||||
},
|
||||
expType: typeinfo.Int64Type,
|
||||
},
|
||||
{
|
||||
name: "all unsigned ints",
|
||||
inferSet: typeInfoSet{
|
||||
typeinfo.Uint32Type: {},
|
||||
typeinfo.Uint64Type: {},
|
||||
},
|
||||
expType: typeinfo.Uint64Type,
|
||||
},
|
||||
{
|
||||
name: "all floats",
|
||||
inferSet: typeInfoSet{
|
||||
@@ -159,35 +151,31 @@ func testFindCommonType(t *testing.T) {
|
||||
expType: typeinfo.Float64Type,
|
||||
},
|
||||
{
|
||||
name: "32 bit ints and uints",
|
||||
name: "32 bit ints",
|
||||
inferSet: typeInfoSet{
|
||||
typeinfo.Int32Type: {},
|
||||
typeinfo.Uint32Type: {},
|
||||
typeinfo.Int32Type: {},
|
||||
},
|
||||
expType: typeinfo.Int32Type,
|
||||
},
|
||||
{
|
||||
name: "64 bit ints and uints",
|
||||
name: "64 bit ints",
|
||||
inferSet: typeInfoSet{
|
||||
typeinfo.Int64Type: {},
|
||||
typeinfo.Uint64Type: {},
|
||||
typeinfo.Int64Type: {},
|
||||
},
|
||||
expType: typeinfo.Int64Type,
|
||||
},
|
||||
{
|
||||
name: "32 bit ints, uints, and floats",
|
||||
name: "32 bit ints and floats",
|
||||
inferSet: typeInfoSet{
|
||||
typeinfo.Int32Type: {},
|
||||
typeinfo.Uint32Type: {},
|
||||
typeinfo.Float32Type: {},
|
||||
},
|
||||
expType: typeinfo.Float32Type,
|
||||
},
|
||||
{
|
||||
name: "64 bit ints, uints, and floats",
|
||||
name: "64 bit ints and floats",
|
||||
inferSet: typeInfoSet{
|
||||
typeinfo.Int64Type: {},
|
||||
typeinfo.Uint64Type: {},
|
||||
typeinfo.Float64Type: {},
|
||||
},
|
||||
expType: typeinfo.Float64Type,
|
||||
@@ -228,11 +216,6 @@ func testFindCommonType(t *testing.T) {
|
||||
|
||||
func testFindCommonTypeFromSingleType(t *testing.T) {
|
||||
allTypes := []typeinfo.TypeInfo{
|
||||
typeinfo.Uint8Type,
|
||||
typeinfo.Uint16Type,
|
||||
typeinfo.Uint24Type,
|
||||
typeinfo.Uint32Type,
|
||||
typeinfo.Uint64Type,
|
||||
typeinfo.Int8Type,
|
||||
typeinfo.Int16Type,
|
||||
typeinfo.Int24Type,
|
||||
@@ -388,7 +371,7 @@ func TestInferSchema(t *testing.T) {
|
||||
},
|
||||
map[string]typeinfo.TypeInfo{
|
||||
"int": typeinfo.Int32Type,
|
||||
"uint": typeinfo.Uint64Type,
|
||||
"uint": typeinfo.StringDefaultType,
|
||||
"uuid": typeinfo.UuidType,
|
||||
"float": typeinfo.Float32Type,
|
||||
"bool": typeinfo.BoolType,
|
||||
@@ -404,7 +387,7 @@ func TestInferSchema(t *testing.T) {
|
||||
floatThreshold: 0,
|
||||
},
|
||||
map[string]typeinfo.TypeInfo{
|
||||
"mix": typeinfo.Uint64Type,
|
||||
"mix": typeinfo.StringDefaultType,
|
||||
"uuid": typeinfo.UuidType,
|
||||
},
|
||||
nil,
|
||||
@@ -500,7 +483,7 @@ func TestInferSchema(t *testing.T) {
|
||||
|
||||
err = allCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
idx := schema.IndexOfConstraint(col.Constraints, schema.NotNullConstraintType)
|
||||
assert.True(t, idx == -1 == test.nullableCols.Contains(col.Name), "%s unexpected nullability", col.Name)
|
||||
assert.True(t, idx == -1, "%s unexpected not null constraint", col.Name)
|
||||
return false, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -244,7 +244,7 @@ func nomsParentFkConstraintViolations(
|
||||
continue
|
||||
}
|
||||
|
||||
postParentIndexPartialKey, err := row.ReduceToIndexPartialKey(postParent.Index, postParentRow)
|
||||
postParentIndexPartialKey, err := row.ReduceToIndexPartialKey(foreignKey.TableColumns, postParent.Index, postParentRow)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@@ -362,8 +362,14 @@ func nomsChildFkConstraintViolations(
|
||||
preChildRowData types.Map,
|
||||
) (*doltdb.Table, bool, error) {
|
||||
foundViolations := false
|
||||
postParentIndexTags := postParent.Index.IndexedColumnTags()
|
||||
postChildIndexTags := postChild.Index.IndexedColumnTags()
|
||||
var postParentIndexTags, postChildIndexTags []uint64
|
||||
if postParent.Index.Name() == "" {
|
||||
postParentIndexTags = foreignKey.ReferencedTableColumns
|
||||
postChildIndexTags = foreignKey.TableColumns
|
||||
} else {
|
||||
postParentIndexTags = postParent.Index.IndexedColumnTags()
|
||||
postChildIndexTags = postChild.Index.IndexedColumnTags()
|
||||
}
|
||||
postChildCVMap, err := postChild.Table.GetConstraintViolations(ctx)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@@ -411,7 +417,7 @@ func nomsChildFkConstraintViolations(
|
||||
continue
|
||||
}
|
||||
|
||||
postChildIndexPartialKey, err := row.ReduceToIndexPartialKey(postChild.Index, postChildRow)
|
||||
postChildIndexPartialKey, err := row.ReduceToIndexPartialKey(postChildIndexTags, postChild.Index, postChildRow)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@@ -496,6 +502,28 @@ func newConstraintViolationsLoadedTable(ctx context.Context, tblName, idxName st
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Create Primary Key Index
|
||||
if idxName == "" {
|
||||
pkCols := sch.GetPKCols()
|
||||
pkIdxColl := schema.NewIndexCollection(pkCols, pkCols)
|
||||
pkIdxProps := schema.IndexProperties{
|
||||
IsUnique: true,
|
||||
IsUserDefined: false,
|
||||
Comment: "",
|
||||
}
|
||||
pkIdx := schema.NewIndex("", pkCols.SortedTags, pkCols.SortedTags, pkIdxColl, pkIdxProps)
|
||||
return &constraintViolationsLoadedTable{
|
||||
TableName: trueTblName,
|
||||
Table: tbl,
|
||||
Schema: sch,
|
||||
RowData: rowData,
|
||||
Index: pkIdx,
|
||||
IndexSchema: pkIdx.Schema(),
|
||||
IndexData: rowData,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
idx, ok := sch.Indexes().GetByNameCaseInsensitive(idxName)
|
||||
if !ok {
|
||||
return &constraintViolationsLoadedTable{
|
||||
|
||||
@@ -64,7 +64,7 @@ func prollyParentFkConstraintViolations(
|
||||
err = prolly.DiffMaps(ctx, preParentRowData, postParentRowData, func(ctx context.Context, diff tree.Diff) error {
|
||||
switch diff.Type {
|
||||
case tree.RemovedDiff, tree.ModifiedDiff:
|
||||
partialKey, hadNulls := makePartialKey(partialKB, postParent.Index, postParent.Schema, val.Tuple(diff.Key), val.Tuple(diff.From), preParentRowData.Pool())
|
||||
partialKey, hadNulls := makePartialKey(partialKB, foreignKey.ReferencedTableColumns, postParent.Index, postParent.Schema, val.Tuple(diff.Key), val.Tuple(diff.From), preParentRowData.Pool())
|
||||
if hadNulls {
|
||||
// row had some nulls previously, so it couldn't have been a parent
|
||||
return nil
|
||||
@@ -147,7 +147,7 @@ func prollyChildFkConstraintViolations(
|
||||
switch diff.Type {
|
||||
case tree.AddedDiff, tree.ModifiedDiff:
|
||||
k, v := val.Tuple(diff.Key), val.Tuple(diff.To)
|
||||
partialKey, hasNulls := makePartialKey(partialKB, postChild.Index, postChild.Schema, k, v, preChildRowData.Pool())
|
||||
partialKey, hasNulls := makePartialKey(partialKB, foreignKey.TableColumns, postChild.Index, postChild.Schema, k, v, preChildRowData.Pool())
|
||||
if hasNulls {
|
||||
return nil
|
||||
}
|
||||
@@ -289,8 +289,11 @@ func createCVsForPartialKeyMatches(
|
||||
return createdViolation, nil
|
||||
}
|
||||
|
||||
func makePartialKey(kb *val.TupleBuilder, idxSch schema.Index, tblSch schema.Schema, k, v val.Tuple, pool pool.BuffPool) (val.Tuple, bool) {
|
||||
for i, tag := range idxSch.IndexedColumnTags() {
|
||||
func makePartialKey(kb *val.TupleBuilder, tags []uint64, idxSch schema.Index, tblSch schema.Schema, k, v val.Tuple, pool pool.BuffPool) (val.Tuple, bool) {
|
||||
if idxSch.Name() != "" {
|
||||
tags = idxSch.IndexedColumnTags()
|
||||
}
|
||||
for i, tag := range tags {
|
||||
if j, ok := tblSch.GetPKCols().TagToIdx[tag]; ok {
|
||||
if k.FieldIsNull(j) {
|
||||
return nil, true
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/set"
|
||||
"github.com/dolthub/dolt/go/store/chunks"
|
||||
"github.com/dolthub/dolt/go/store/datas"
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
@@ -277,6 +278,16 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root
|
||||
return nil, err
|
||||
}
|
||||
|
||||
removedTables, err := getRemovedTableNames(ctx, oldParent, oldRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrated, err = migrated.RemoveTables(ctx, true, false, removedTables...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = oldRoot.IterTables(ctx, func(name string, oldTbl *doltdb.Table, sch schema.Schema) (bool, error) {
|
||||
ok, err := oldTbl.HasConflicts(ctx)
|
||||
if err != nil {
|
||||
@@ -345,6 +356,21 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root
|
||||
return migrated, nil
|
||||
}
|
||||
|
||||
// renames also get returned here
|
||||
func getRemovedTableNames(ctx context.Context, prev, curr *doltdb.RootValue) ([]string, error) {
|
||||
prevNames, err := prev.GetTableNames(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tblNameSet := set.NewStrSet(prevNames)
|
||||
currNames, err := curr.GetTableNames(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tblNameSet.Remove(currNames...)
|
||||
return tblNameSet.AsSlice(), nil
|
||||
}
|
||||
|
||||
func migrateTable(ctx context.Context, newSch schema.Schema, oldParentTbl, oldTbl, newParentTbl *doltdb.Table) (*doltdb.Table, error) {
|
||||
idx, err := oldParentTbl.GetRowData(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -163,15 +163,19 @@ func ReduceToIndexKeysFromTagMap(nbf *types.NomsBinFormat, idx schema.Index, tag
|
||||
}
|
||||
|
||||
// ReduceToIndexPartialKey creates an index record from a primary storage record.
|
||||
func ReduceToIndexPartialKey(idx schema.Index, r Row) (types.Tuple, error) {
|
||||
func ReduceToIndexPartialKey(tags []uint64, idx schema.Index, r Row) (types.Tuple, error) {
|
||||
var vals []types.Value
|
||||
for _, tag := range idx.IndexedColumnTags() {
|
||||
if idx.Name() != "" {
|
||||
tags = idx.IndexedColumnTags()
|
||||
}
|
||||
for _, tag := range tags {
|
||||
val, ok := r.GetColVal(tag)
|
||||
if !ok {
|
||||
val = types.NullValue
|
||||
}
|
||||
vals = append(vals, types.Uint(tag), val)
|
||||
}
|
||||
|
||||
return types.NewTuple(r.Format(), vals...)
|
||||
}
|
||||
|
||||
|
||||
@@ -68,12 +68,17 @@ type indexImpl struct {
|
||||
comment string
|
||||
}
|
||||
|
||||
func NewIndex(name string, tags, allTags []uint64, indexColl *indexCollectionImpl, props IndexProperties) Index {
|
||||
func NewIndex(name string, tags, allTags []uint64, indexColl IndexCollection, props IndexProperties) Index {
|
||||
var indexCollImpl *indexCollectionImpl
|
||||
if indexColl != nil {
|
||||
indexCollImpl = indexColl.(*indexCollectionImpl)
|
||||
}
|
||||
|
||||
return &indexImpl{
|
||||
name: name,
|
||||
tags: tags,
|
||||
allTags: allTags,
|
||||
indexColl: indexColl,
|
||||
indexColl: indexCollImpl,
|
||||
isUnique: props.IsUnique,
|
||||
isUserDefined: props.IsUserDefined,
|
||||
comment: props.Comment,
|
||||
|
||||
@@ -462,7 +462,9 @@ func TestDropPks(t *testing.T) {
|
||||
fk, ok := foreignKeyCollection.GetByNameCaseInsensitive(childFkName)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, childName, fk.TableName)
|
||||
assert.Equal(t, tt.fkIdxName, fk.ReferencedTableIndex)
|
||||
if tt.fkIdxName != "" && fk.ReferencedTableIndex != "" {
|
||||
assert.Equal(t, tt.fkIdxName, fk.ReferencedTableIndex)
|
||||
}
|
||||
|
||||
parent, ok, err := root.GetTable(ctx, parentName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -1482,7 +1482,7 @@ var DoltConstraintViolationTransactionTests = []queries.TransactionTest{
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ INSERT INTO child VALUES (1, 1);",
|
||||
ExpectedErrStr: "cannot add or update a child row - Foreign key violation on fk: `nk01br56`, table: `child`, referenced table: `parent`, key: `[1]`",
|
||||
ExpectedErrStr: "cannot add or update a child row - Foreign key violation on fk: `0050p5ek`, table: `child`, referenced table: `parent`, key: `[1]`",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1971,76 +1971,30 @@ func (t *AlterableDoltTable) AddForeignKey(ctx *sql.Context, sqlFk sql.ForeignKe
|
||||
refColTags[i] = refCol.Tag
|
||||
}
|
||||
|
||||
var tableIndexName, refTableIndexName string
|
||||
tableIndex, ok, err := findIndexWithPrefix(t.sch, sqlFk.Columns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
// The engine matched on a primary key, and Dolt does not yet support using the primary key within the
|
||||
// schema.Index interface (which is used internally to represent indexes across the codebase). In the
|
||||
// meantime, we must generate a duplicate key over the primary key.
|
||||
//TODO: use the primary key as-is
|
||||
idxReturn, err := creation.CreateIndex(ctx, tbl, "", sqlFk.Columns, false, false, "", editor.Options{
|
||||
ForeignKeyChecksDisabled: true,
|
||||
Deaf: t.opts.Deaf,
|
||||
Tempdir: t.opts.Tempdir,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tableIndex = idxReturn.NewIndex
|
||||
tbl = idxReturn.NewTable
|
||||
root, err = root.PutTable(ctx, t.tableName, idxReturn.NewTable)
|
||||
if sqlFk.IsSelfReferential() {
|
||||
refTbl = idxReturn.NewTable
|
||||
}
|
||||
// Use secondary index if found; otherwise it will use empty string, indicating primary key
|
||||
if ok {
|
||||
tableIndexName = tableIndex.Name()
|
||||
}
|
||||
|
||||
refTableIndex, ok, err := findIndexWithPrefix(refSch, sqlFk.ParentColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
// The engine matched on a primary key, and Dolt does not yet support using the primary key within the
|
||||
// schema.Index interface (which is used internally to represent indexes across the codebase). In the
|
||||
// meantime, we must generate a duplicate key over the primary key.
|
||||
//TODO: use the primary key as-is
|
||||
var refPkTags []uint64
|
||||
for _, i := range refSch.GetPkOrdinals() {
|
||||
refPkTags = append(refPkTags, refSch.GetAllCols().GetByIndex(i).Tag)
|
||||
}
|
||||
|
||||
var colNames []string
|
||||
for _, t := range refColTags {
|
||||
c, _ := refSch.GetAllCols().GetByTag(t)
|
||||
colNames = append(colNames, c.Name)
|
||||
}
|
||||
|
||||
// Our duplicate index is only unique if it's the entire primary key (which is by definition unique)
|
||||
unique := len(refPkTags) == len(refColTags)
|
||||
idxReturn, err := creation.CreateIndex(ctx, refTbl, "", colNames, unique, false, "", editor.Options{
|
||||
ForeignKeyChecksDisabled: true,
|
||||
Deaf: t.opts.Deaf,
|
||||
Tempdir: t.opts.Tempdir,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
refTbl = idxReturn.NewTable
|
||||
refTableIndex = idxReturn.NewIndex
|
||||
root, err = root.PutTable(ctx, sqlFk.ParentTable, idxReturn.NewTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Use secondary index if found; otherwise it will use empty string, indicating primary key
|
||||
if ok {
|
||||
refTableIndexName = refTableIndex.Name()
|
||||
}
|
||||
|
||||
doltFk = doltdb.ForeignKey{
|
||||
Name: sqlFk.Name,
|
||||
TableName: sqlFk.Table,
|
||||
TableIndex: tableIndex.Name(),
|
||||
TableIndex: tableIndexName,
|
||||
TableColumns: colTags,
|
||||
ReferencedTableName: sqlFk.ParentTable,
|
||||
ReferencedTableIndex: refTableIndex.Name(),
|
||||
ReferencedTableIndex: refTableIndexName,
|
||||
ReferencedTableColumns: refColTags,
|
||||
OnUpdate: onUpdateRefAction,
|
||||
OnDelete: onDeleteRefAction,
|
||||
|
||||
@@ -105,6 +105,44 @@ func (tea *BulkImportTEA) Get(ctx context.Context, keyHash hash.Hash, key types.
|
||||
return &doltKVP{k: key, v: v}, true, nil
|
||||
}
|
||||
|
||||
func (tea *BulkImportTEA) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) {
|
||||
var err error
|
||||
var matches []hashedTuple
|
||||
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{
|
||||
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
|
||||
defer mapIter.Close(ctx)
|
||||
var r row.Row
|
||||
for r, err = mapIter.ReadRow(ctx); err == nil; r, err = mapIter.ReadRow(ctx) {
|
||||
tplKeyVal, err := r.NomsMapKey(idxSch).Value(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := tplKeyVal.(types.Tuple)
|
||||
tplValVal, err := r.NomsMapValue(idxSch).Value(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val := tplValVal.(types.Tuple)
|
||||
keyHash, err := key.Hash(key.Format())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches = append(matches, hashedTuple{key, val, keyHash})
|
||||
}
|
||||
|
||||
if err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := len(matches) - 1; i >= 0; i-- {
|
||||
if _, ok := tea.deletes[matches[i].hash]; ok {
|
||||
matches[i] = matches[len(matches)-1]
|
||||
matches = matches[:len(matches)-1]
|
||||
}
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// Commit is the default behavior and does nothing
|
||||
func (tea *BulkImportTEA) Commit(ctx context.Context, nbf *types.NomsBinFormat) error {
|
||||
return nil
|
||||
|
||||
@@ -280,18 +280,36 @@ func (te *pkTableEditor) GetIndexedRows(ctx context.Context, key types.Tuple, in
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, ok, err := te.tea.Get(ctx, keyHash, key)
|
||||
|
||||
pkKeys, err := te.tea.HasPartial(ctx, te.tSch, keyHash, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
if len(pkKeys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
dRow, err := row.FromNoms(te.Schema(), kvp.k, kvp.v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
rows := make([]row.Row, len(pkKeys))
|
||||
for i, pkKey := range pkKeys {
|
||||
pkKeyHash, err := pkKey.key.Hash(pkKey.key.Format())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, ok, err := te.tea.Get(ctx, pkKeyHash, pkKey.key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
dRow, err := row.FromNoms(te.Schema(), kvp.k, kvp.v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows[i] = dRow
|
||||
}
|
||||
return []row.Row{dRow}, nil
|
||||
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("an index editor for `%s` could not be found on table `%s`", indexName, te.name)
|
||||
|
||||
@@ -16,7 +16,12 @@ package editor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/row"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/table"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/set"
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
@@ -43,6 +48,9 @@ type TableEditAccumulator interface {
|
||||
// This assumes that the given hash is for the given key.
|
||||
Get(ctx context.Context, keyHash hash.Hash, key types.Tuple) (*doltKVP, bool, error)
|
||||
|
||||
// HasPartial returns true if the current TableEditAccumulator contains the given partialKey
|
||||
HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error)
|
||||
|
||||
// Commit applies the in memory edits to the list of committed in memory edits
|
||||
Commit(ctx context.Context, nbf *types.NomsBinFormat) error
|
||||
|
||||
@@ -174,6 +182,51 @@ func (tea *tableEditAccumulatorImpl) Get(ctx context.Context, keyHash hash.Hash,
|
||||
return &doltKVP{k: key, v: v}, true, err
|
||||
}
|
||||
|
||||
func (tea *tableEditAccumulatorImpl) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) {
|
||||
var err error
|
||||
var matches []hashedTuple
|
||||
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{
|
||||
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
|
||||
defer mapIter.Close(ctx)
|
||||
var r row.Row
|
||||
for r, err = mapIter.ReadRow(ctx); err == nil; r, err = mapIter.ReadRow(ctx) {
|
||||
tplKeyVal, err := r.NomsMapKey(idxSch).Value(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := tplKeyVal.(types.Tuple)
|
||||
tplValVal, err := r.NomsMapValue(idxSch).Value(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val := tplValVal.(types.Tuple)
|
||||
keyHash, err := key.Hash(key.Format())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches = append(matches, hashedTuple{key, val, keyHash})
|
||||
}
|
||||
|
||||
if err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
orderedMods := []*inMemModifications{tea.committed, tea.uncommitted}
|
||||
for _, mods := range orderedMods {
|
||||
for i := len(matches) - 1; i >= 0; i-- {
|
||||
if _, ok := mods.adds[matches[i].hash]; ok {
|
||||
matches[i] = matches[len(matches)-1]
|
||||
matches = matches[:len(matches)-1]
|
||||
}
|
||||
}
|
||||
if added, ok := mods.adds[partialKeyHash]; ok {
|
||||
matches = append(matches, hashedTuple{key: added.k, value: added.v})
|
||||
}
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
func (tea *tableEditAccumulatorImpl) flushUncommitted() {
|
||||
// if we are not already actively writing edits to the uncommittedEA then change the state and push all in mem edits
|
||||
// to a types.EditAccumulator
|
||||
|
||||
@@ -194,7 +194,7 @@ func (suite *BlockStoreSuite) TestChunkStorePutMoreThanMemTable() {
|
||||
if suite.putCountFn != nil {
|
||||
suite.Equal(2, suite.putCountFn())
|
||||
}
|
||||
specs, err := suite.store.tables.ToSpecs()
|
||||
specs, err := suite.store.tables.toSpecs()
|
||||
suite.NoError(err)
|
||||
suite.Len(specs, 2)
|
||||
}
|
||||
@@ -415,22 +415,15 @@ func (suite *BlockStoreSuite) TestChunkStorePutWithRebase() {
|
||||
|
||||
func TestBlockStoreConjoinOnCommit(t *testing.T) {
|
||||
stats := &Stats{}
|
||||
assertContainAll := func(t *testing.T, store chunks.ChunkStore, srcs ...chunkSource) {
|
||||
rdrs := make(chunkReaderGroup, len(srcs))
|
||||
for i, src := range srcs {
|
||||
c, err := src.Clone()
|
||||
assertContainAll := func(t *testing.T, store chunks.ChunkStore, sources ...chunkSource) {
|
||||
ctx := context.Background()
|
||||
for _, src := range sources {
|
||||
err := extractAllChunks(ctx, src, func(rec extractRecord) {
|
||||
ok, err := store.Has(context.Background(), hash.Hash(rec.a))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, ok)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
rdrs[i] = c
|
||||
}
|
||||
chunkChan := make(chan extractRecord, mustUint32(rdrs.count()))
|
||||
err := rdrs.extract(context.Background(), chunkChan)
|
||||
require.NoError(t, err)
|
||||
close(chunkChan)
|
||||
|
||||
for rec := range chunkChan {
|
||||
ok, err := store.Has(context.Background(), hash.Hash(rec.a))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -509,7 +502,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
assertContainAll(t, smallTableStore, srcs...)
|
||||
for _, src := range srcs {
|
||||
err := src.Close()
|
||||
err := src.close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
@@ -546,7 +539,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
assertContainAll(t, smallTableStore, srcs...)
|
||||
for _, src := range srcs {
|
||||
err := src.Close()
|
||||
err := src.close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -36,12 +36,12 @@ func newReaderFromIndexData(q MemoryQuotaProvider, idxData []byte, name addr, tr
|
||||
return &chunkSourceAdapter{tr, name}, nil
|
||||
}
|
||||
|
||||
func (csa chunkSourceAdapter) Close() error {
|
||||
return csa.tableReader.Close()
|
||||
func (csa chunkSourceAdapter) close() error {
|
||||
return csa.tableReader.close()
|
||||
}
|
||||
|
||||
func (csa chunkSourceAdapter) Clone() (chunkSource, error) {
|
||||
tr, err := csa.tableReader.Clone()
|
||||
func (csa chunkSourceAdapter) clone() (chunkSource, error) {
|
||||
tr, err := csa.tableReader.clone()
|
||||
if err != nil {
|
||||
return &chunkSourceAdapter{}, err
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func makeTestSrcs(t *testing.T, tableSizes []uint32, p tablePersister) (srcs chu
|
||||
}
|
||||
cs, err := p.Persist(context.Background(), mt, nil, &Stats{})
|
||||
require.NoError(t, err)
|
||||
c, err := cs.Clone()
|
||||
c, err := cs.clone()
|
||||
require.NoError(t, err)
|
||||
srcs = append(srcs, c)
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func TestConjoin(t *testing.T) {
|
||||
makeTestTableSpecs := func(tableSizes []uint32, p tablePersister) (specs []tableSpec) {
|
||||
for _, src := range makeTestSrcs(t, tableSizes, p) {
|
||||
specs = append(specs, tableSpec{mustAddr(src.hash()), mustUint32(src.count())})
|
||||
err := src.Close()
|
||||
err := src.close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return
|
||||
@@ -93,28 +93,34 @@ func TestConjoin(t *testing.T) {
|
||||
}
|
||||
|
||||
assertContainAll := func(t *testing.T, p tablePersister, expect, actual []tableSpec) {
|
||||
open := func(specs []tableSpec) (srcs chunkReaderGroup) {
|
||||
open := func(specs []tableSpec) (sources chunkSources) {
|
||||
for _, sp := range specs {
|
||||
cs, err := p.Open(context.Background(), sp.name, sp.chunkCount, nil)
|
||||
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
srcs = append(srcs, cs)
|
||||
sources = append(sources, cs)
|
||||
}
|
||||
return
|
||||
}
|
||||
expectSrcs, actualSrcs := open(expect), open(actual)
|
||||
chunkChan := make(chan extractRecord, mustUint32(expectSrcs.count()))
|
||||
err := expectSrcs.extract(context.Background(), chunkChan)
|
||||
require.NoError(t, err)
|
||||
close(chunkChan)
|
||||
|
||||
for rec := range chunkChan {
|
||||
has, err := actualSrcs.has(rec.a)
|
||||
expectSrcs, actualSrcs := open(expect), open(actual)
|
||||
|
||||
ctx := context.Background()
|
||||
for _, src := range expectSrcs {
|
||||
err := extractAllChunks(ctx, src, func(rec extractRecord) {
|
||||
var ok bool
|
||||
for _, src := range actualSrcs {
|
||||
var err error
|
||||
ok, err = src.has(rec.a)
|
||||
require.NoError(t, err)
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, ok)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, has)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -118,12 +118,12 @@ func (mmtr *fileTableReader) hash() (addr, error) {
|
||||
return mmtr.h, nil
|
||||
}
|
||||
|
||||
func (mmtr *fileTableReader) Close() error {
|
||||
return mmtr.tableReader.Close()
|
||||
func (mmtr *fileTableReader) close() error {
|
||||
return mmtr.tableReader.close()
|
||||
}
|
||||
|
||||
func (mmtr *fileTableReader) Clone() (chunkSource, error) {
|
||||
tr, err := mmtr.tableReader.Clone()
|
||||
func (mmtr *fileTableReader) clone() (chunkSource, error) {
|
||||
tr, err := mmtr.tableReader.clone()
|
||||
if err != nil {
|
||||
return &fileTableReader{}, err
|
||||
}
|
||||
|
||||
@@ -218,6 +218,6 @@ func (mt *memTable) write(haver chunkReader, stats *Stats) (name addr, data []by
|
||||
return name, buff[:tableSize], count, nil
|
||||
}
|
||||
|
||||
func (mt *memTable) Close() error {
|
||||
func (mt *memTable) close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -307,22 +307,10 @@ func (crg chunkReaderGroup) uncompressedLen() (data uint64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (crg chunkReaderGroup) extract(ctx context.Context, chunks chan<- extractRecord) error {
|
||||
for _, haver := range crg {
|
||||
err := haver.extract(ctx, chunks)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (crg chunkReaderGroup) Close() error {
|
||||
func (crg chunkReaderGroup) close() error {
|
||||
var firstErr error
|
||||
for _, c := range crg {
|
||||
err := c.Close()
|
||||
err := c.close()
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
|
||||
@@ -95,12 +95,12 @@ func (ccs *persistingChunkSource) getReader() chunkReader {
|
||||
return ccs.cs
|
||||
}
|
||||
|
||||
func (ccs *persistingChunkSource) Close() error {
|
||||
func (ccs *persistingChunkSource) close() error {
|
||||
// persistingChunkSource does not own |cs| or |mt|. No need to close them.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ccs *persistingChunkSource) Clone() (chunkSource, error) {
|
||||
func (ccs *persistingChunkSource) clone() (chunkSource, error) {
|
||||
// persistingChunkSource does not own |cs| or |mt|. No need to Clone.
|
||||
return ccs, nil
|
||||
}
|
||||
@@ -240,20 +240,6 @@ func (ccs *persistingChunkSource) size() (uint64, error) {
|
||||
return ccs.cs.size()
|
||||
}
|
||||
|
||||
func (ccs *persistingChunkSource) extract(ctx context.Context, chunks chan<- extractRecord) error {
|
||||
err := ccs.wait()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ccs.cs == nil {
|
||||
return ErrNoChunkSource
|
||||
}
|
||||
|
||||
return ccs.cs.extract(ctx, chunks)
|
||||
}
|
||||
|
||||
type emptyChunkSource struct{}
|
||||
|
||||
func (ecs emptyChunkSource) has(h addr) (bool, error) {
|
||||
@@ -304,14 +290,10 @@ func (ecs emptyChunkSource) calcReads(reqs []getRecord, blockSize uint64) (reads
|
||||
return 0, true, nil
|
||||
}
|
||||
|
||||
func (ecs emptyChunkSource) extract(ctx context.Context, chunks chan<- extractRecord) error {
|
||||
func (ecs emptyChunkSource) close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ecs emptyChunkSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ecs emptyChunkSource) Clone() (chunkSource, error) {
|
||||
func (ecs emptyChunkSource) clone() (chunkSource, error) {
|
||||
return ecs, nil
|
||||
}
|
||||
|
||||
@@ -579,18 +579,20 @@ func compactSourcesToBuffer(sources chunkSources) (name addr, data []byte, chunk
|
||||
tw := newTableWriter(buff, nil)
|
||||
errString := ""
|
||||
|
||||
ctx := context.Background()
|
||||
for _, src := range sources {
|
||||
chunks := make(chan extractRecord)
|
||||
ch := make(chan extractRecord)
|
||||
go func() {
|
||||
defer close(chunks)
|
||||
err := src.extract(context.Background(), chunks)
|
||||
|
||||
defer close(ch)
|
||||
err = extractAllChunks(ctx, src, func(rec extractRecord) {
|
||||
ch <- rec
|
||||
})
|
||||
if err != nil {
|
||||
chunks <- extractRecord{a: mustAddr(src.hash()), err: err}
|
||||
ch <- extractRecord{a: mustAddr(src.hash()), err: err}
|
||||
}
|
||||
}()
|
||||
|
||||
for rec := range chunks {
|
||||
for rec := range ch {
|
||||
if rec.err != nil {
|
||||
errString += fmt.Sprintf("Failed to extract %s:\n %v\n******\n\n", rec.a, rec.err)
|
||||
continue
|
||||
@@ -625,3 +627,25 @@ func (ftp fakeTablePersister) Open(ctx context.Context, name addr, chunkCount ui
|
||||
func (ftp fakeTablePersister) PruneTableFiles(_ context.Context, _ manifestContents) error {
|
||||
return chunks.ErrUnsupportedOperation
|
||||
}
|
||||
|
||||
func extractAllChunks(ctx context.Context, src chunkSource, cb func(rec extractRecord)) (err error) {
|
||||
var index tableIndex
|
||||
if index, err = src.index(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var a addr
|
||||
for i := uint32(0); i < index.ChunkCount(); i++ {
|
||||
_, err = index.IndexEntry(i, &a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := src.get(ctx, a, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cb(extractRecord{a: a, data: data})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -294,7 +294,7 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
|
||||
}
|
||||
}
|
||||
|
||||
newTables, err := nbs.tables.Rebase(ctx, updatedContents.specs, nbs.stats)
|
||||
newTables, err := nbs.tables.rebase(ctx, updatedContents.specs, nbs.stats)
|
||||
if err != nil {
|
||||
return manifestContents{}, err
|
||||
}
|
||||
@@ -302,7 +302,7 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
|
||||
nbs.upstream = updatedContents
|
||||
oldTables := nbs.tables
|
||||
nbs.tables = newTables
|
||||
err = oldTables.Close()
|
||||
err = oldTables.close()
|
||||
if err != nil {
|
||||
return manifestContents{}, err
|
||||
}
|
||||
@@ -371,7 +371,7 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat
|
||||
}
|
||||
}
|
||||
|
||||
newTables, err := nbs.tables.Rebase(ctx, updatedContents.specs, nbs.stats)
|
||||
newTables, err := nbs.tables.rebase(ctx, updatedContents.specs, nbs.stats)
|
||||
if err != nil {
|
||||
return manifestContents{}, err
|
||||
}
|
||||
@@ -379,7 +379,7 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat
|
||||
nbs.upstream = updatedContents
|
||||
oldTables := nbs.tables
|
||||
nbs.tables = newTables
|
||||
err = oldTables.Close()
|
||||
err = oldTables.close()
|
||||
if err != nil {
|
||||
return manifestContents{}, err
|
||||
}
|
||||
@@ -587,7 +587,7 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager
|
||||
}
|
||||
|
||||
if exists {
|
||||
newTables, err := nbs.tables.Rebase(ctx, contents.specs, nbs.stats)
|
||||
newTables, err := nbs.tables.rebase(ctx, contents.specs, nbs.stats)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -596,7 +596,7 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager
|
||||
nbs.upstream = contents
|
||||
oldTables := nbs.tables
|
||||
nbs.tables = newTables
|
||||
err = oldTables.Close()
|
||||
err = oldTables.close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -647,7 +647,7 @@ func (nbs *NomsBlockStore) addChunk(ctx context.Context, h addr, data []byte) bo
|
||||
nbs.mt = newMemTable(nbs.mtSize)
|
||||
}
|
||||
if !nbs.mt.addChunk(h, data) {
|
||||
nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats)
|
||||
nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats)
|
||||
nbs.mt = newMemTable(nbs.mtSize)
|
||||
return nbs.mt.addChunk(h, data)
|
||||
}
|
||||
@@ -922,7 +922,7 @@ func (nbs *NomsBlockStore) Rebase(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
newTables, err := nbs.tables.Rebase(ctx, contents.specs, nbs.stats)
|
||||
newTables, err := nbs.tables.rebase(ctx, contents.specs, nbs.stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -930,7 +930,7 @@ func (nbs *NomsBlockStore) Rebase(ctx context.Context) error {
|
||||
nbs.upstream = contents
|
||||
oldTables := nbs.tables
|
||||
nbs.tables = newTables
|
||||
err = oldTables.Close()
|
||||
err = oldTables.close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -952,7 +952,7 @@ func (nbs *NomsBlockStore) Commit(ctx context.Context, current, last hash.Hash)
|
||||
anyPossiblyNovelChunks := func() bool {
|
||||
nbs.mu.Lock()
|
||||
defer nbs.mu.Unlock()
|
||||
return nbs.mt != nil || nbs.tables.Novel() > 0
|
||||
return nbs.mt != nil || len(nbs.tables.novel) > 0
|
||||
}
|
||||
|
||||
if !anyPossiblyNovelChunks() && current == last {
|
||||
@@ -984,7 +984,7 @@ func (nbs *NomsBlockStore) Commit(ctx context.Context, current, last hash.Hash)
|
||||
}
|
||||
|
||||
if cnt > preflushChunkCount {
|
||||
nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats)
|
||||
nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats)
|
||||
nbs.mt = nil
|
||||
}
|
||||
}
|
||||
@@ -1033,7 +1033,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
|
||||
}
|
||||
|
||||
handleOptimisticLockFailure := func(upstream manifestContents) error {
|
||||
newTables, err := nbs.tables.Rebase(ctx, upstream.specs, nbs.stats)
|
||||
newTables, err := nbs.tables.rebase(ctx, upstream.specs, nbs.stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1041,7 +1041,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
|
||||
nbs.upstream = upstream
|
||||
oldTables := nbs.tables
|
||||
nbs.tables = newTables
|
||||
err = oldTables.Close()
|
||||
err = oldTables.close()
|
||||
|
||||
if last != upstream.root {
|
||||
return errOptimisticLockFailedRoot
|
||||
@@ -1067,7 +1067,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
|
||||
}
|
||||
|
||||
if cnt > 0 {
|
||||
nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats)
|
||||
nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats)
|
||||
nbs.mt = nil
|
||||
}
|
||||
}
|
||||
@@ -1081,7 +1081,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
|
||||
return err
|
||||
}
|
||||
|
||||
newTables, err := nbs.tables.Rebase(ctx, newUpstream.specs, nbs.stats)
|
||||
newTables, err := nbs.tables.rebase(ctx, newUpstream.specs, nbs.stats)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1090,7 +1090,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
|
||||
nbs.upstream = newUpstream
|
||||
oldTables := nbs.tables
|
||||
nbs.tables = newTables
|
||||
err = oldTables.Close()
|
||||
err = oldTables.close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1098,7 +1098,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
|
||||
return errOptimisticLockFailedTables
|
||||
}
|
||||
|
||||
specs, err := nbs.tables.ToSpecs()
|
||||
specs, err := nbs.tables.toSpecs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1139,7 +1139,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
|
||||
return handleOptimisticLockFailure(upstream)
|
||||
}
|
||||
|
||||
newTables, err := nbs.tables.Flatten(ctx)
|
||||
newTables, err := nbs.tables.flatten(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -1158,7 +1158,7 @@ func (nbs *NomsBlockStore) Version() string {
|
||||
}
|
||||
|
||||
func (nbs *NomsBlockStore) Close() error {
|
||||
return nbs.tables.Close()
|
||||
return nbs.tables.close()
|
||||
}
|
||||
|
||||
func (nbs *NomsBlockStore) Stats() interface{} {
|
||||
@@ -1574,7 +1574,7 @@ func (nbs *NomsBlockStore) gcTableSize() (uint64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
avgTableSize := total / uint64(nbs.tables.Upstream()+nbs.tables.Novel()+1)
|
||||
avgTableSize := total / uint64(nbs.tables.Size()+1)
|
||||
|
||||
// max(avgTableSize, defaultMemTableSize)
|
||||
if avgTableSize > nbs.mtSize {
|
||||
@@ -1622,14 +1622,14 @@ func (nbs *NomsBlockStore) swapTables(ctx context.Context, specs []tableSpec) (e
|
||||
nbs.mt = newMemTable(nbs.mtSize)
|
||||
|
||||
// clear nbs.tables.novel
|
||||
nbs.tables, err = nbs.tables.Flatten(ctx)
|
||||
nbs.tables, err = nbs.tables.flatten(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// replace nbs.tables.upstream with gc compacted tables
|
||||
nbs.upstream = upstream
|
||||
nbs.tables, err = nbs.tables.Rebase(ctx, upstream.specs, nbs.stats)
|
||||
nbs.tables, err = nbs.tables.rebase(ctx, upstream.specs, nbs.stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -230,12 +230,11 @@ type chunkReader interface {
|
||||
get(ctx context.Context, h addr, stats *Stats) ([]byte, error)
|
||||
getMany(ctx context.Context, eg *errgroup.Group, reqs []getRecord, found func(context.Context, *chunks.Chunk), stats *Stats) (bool, error)
|
||||
getManyCompressed(ctx context.Context, eg *errgroup.Group, reqs []getRecord, found func(context.Context, CompressedChunk), stats *Stats) (bool, error)
|
||||
extract(ctx context.Context, chunks chan<- extractRecord) error
|
||||
count() (uint32, error)
|
||||
uncompressedLen() (uint64, error)
|
||||
|
||||
// Close releases resources retained by the |chunkReader|.
|
||||
Close() error
|
||||
// close releases resources retained by the |chunkReader|.
|
||||
close() error
|
||||
}
|
||||
|
||||
type chunkSource interface {
|
||||
@@ -253,12 +252,12 @@ type chunkSource interface {
|
||||
// index returns the tableIndex of this chunkSource.
|
||||
index() (tableIndex, error)
|
||||
|
||||
// Clone returns a |chunkSource| with the same contents as the
|
||||
// clone returns a |chunkSource| with the same contents as the
|
||||
// original, but with independent |Close| behavior. A |chunkSource|
|
||||
// cannot be |Close|d more than once, so if a |chunkSource| is being
|
||||
// retained in two objects with independent life-cycle, it should be
|
||||
// |Clone|d first.
|
||||
Clone() (chunkSource, error)
|
||||
clone() (chunkSource, error)
|
||||
}
|
||||
|
||||
type chunkSources []chunkSource
|
||||
|
||||
@@ -654,11 +654,11 @@ func (tr tableReader) size() (uint64, error) {
|
||||
return i.TableFileSize(), nil
|
||||
}
|
||||
|
||||
func (tr tableReader) Close() error {
|
||||
func (tr tableReader) close() error {
|
||||
return tr.tableIndex.Close()
|
||||
}
|
||||
|
||||
func (tr tableReader) Clone() (tableReader, error) {
|
||||
func (tr tableReader) clone() (tableReader, error) {
|
||||
ti, err := tr.tableIndex.Clone()
|
||||
if err != nil {
|
||||
return tableReader{}, err
|
||||
|
||||
@@ -252,7 +252,7 @@ func (ts tableSet) physicalLen() (uint64, error) {
|
||||
return lenNovel + lenUp, nil
|
||||
}
|
||||
|
||||
func (ts tableSet) Close() error {
|
||||
func (ts tableSet) close() error {
|
||||
var firstErr error
|
||||
setErr := func(err error) {
|
||||
if err != nil && firstErr == nil {
|
||||
@@ -261,11 +261,11 @@ func (ts tableSet) Close() error {
|
||||
}
|
||||
|
||||
for _, t := range ts.novel {
|
||||
err := t.Close()
|
||||
err := t.close()
|
||||
setErr(err)
|
||||
}
|
||||
for _, t := range ts.upstream {
|
||||
err := t.Close()
|
||||
err := t.close()
|
||||
setErr(err)
|
||||
}
|
||||
return firstErr
|
||||
@@ -276,20 +276,9 @@ func (ts tableSet) Size() int {
|
||||
return len(ts.novel) + len(ts.upstream)
|
||||
}
|
||||
|
||||
// Novel returns the number of tables containing novel chunks in this
|
||||
// tableSet.
|
||||
func (ts tableSet) Novel() int {
|
||||
return len(ts.novel)
|
||||
}
|
||||
|
||||
// Upstream returns the number of known-persisted tables in this tableSet.
|
||||
func (ts tableSet) Upstream() int {
|
||||
return len(ts.upstream)
|
||||
}
|
||||
|
||||
// Prepend adds a memTable to an existing tableSet, compacting |mt| and
|
||||
// prepend adds a memTable to an existing tableSet, compacting |mt| and
|
||||
// returning a new tableSet with newly compacted table added.
|
||||
func (ts tableSet) Prepend(ctx context.Context, mt *memTable, stats *Stats) tableSet {
|
||||
func (ts tableSet) prepend(ctx context.Context, mt *memTable, stats *Stats) tableSet {
|
||||
newTs := tableSet{
|
||||
novel: make(chunkSources, len(ts.novel)+1),
|
||||
upstream: make(chunkSources, len(ts.upstream)),
|
||||
@@ -303,29 +292,9 @@ func (ts tableSet) Prepend(ctx context.Context, mt *memTable, stats *Stats) tabl
|
||||
return newTs
|
||||
}
|
||||
|
||||
func (ts tableSet) extract(ctx context.Context, chunks chan<- extractRecord) error {
|
||||
// Since new tables are _prepended_ to a tableSet, extracting chunks in insertOrder requires iterating ts.upstream back to front, followed by ts.novel.
|
||||
for i := len(ts.upstream) - 1; i >= 0; i-- {
|
||||
err := ts.upstream[i].extract(ctx, chunks)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i := len(ts.novel) - 1; i >= 0; i-- {
|
||||
err := ts.novel[i].extract(ctx, chunks)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flatten returns a new tableSet with |upstream| set to the union of ts.novel
|
||||
// flatten returns a new tableSet with |upstream| set to the union of ts.novel
|
||||
// and ts.upstream.
|
||||
func (ts tableSet) Flatten(ctx context.Context) (tableSet, error) {
|
||||
func (ts tableSet) flatten(ctx context.Context) (tableSet, error) {
|
||||
flattened := tableSet{
|
||||
upstream: make(chunkSources, 0, ts.Size()),
|
||||
p: ts.p,
|
||||
@@ -349,9 +318,9 @@ func (ts tableSet) Flatten(ctx context.Context) (tableSet, error) {
|
||||
return flattened, nil
|
||||
}
|
||||
|
||||
// Rebase returns a new tableSet holding the novel tables managed by |ts| and
|
||||
// rebase returns a new tableSet holding the novel tables managed by |ts| and
|
||||
// those specified by |specs|.
|
||||
func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) {
|
||||
func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) {
|
||||
merged := tableSet{
|
||||
novel: make(chunkSources, 0, len(ts.novel)),
|
||||
p: ts.p,
|
||||
@@ -368,7 +337,7 @@ func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats)
|
||||
}
|
||||
|
||||
if cnt > 0 {
|
||||
t2, err := t.Clone()
|
||||
t2, err := t.clone()
|
||||
if err != nil {
|
||||
return tableSet{}, err
|
||||
}
|
||||
@@ -404,7 +373,7 @@ OUTER:
|
||||
return tableSet{}, err
|
||||
}
|
||||
if spec.name == h {
|
||||
c, err := existing.Clone()
|
||||
c, err := existing.clone()
|
||||
if err != nil {
|
||||
return tableSet{}, err
|
||||
}
|
||||
@@ -454,7 +423,7 @@ OUTER:
|
||||
if err != nil {
|
||||
// Close any opened chunkSources
|
||||
for _, cs := range opened {
|
||||
_ = cs.Close()
|
||||
_ = cs.close()
|
||||
}
|
||||
|
||||
if r := rp.Load(); r != nil {
|
||||
@@ -466,7 +435,7 @@ OUTER:
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
func (ts tableSet) ToSpecs() ([]tableSpec, error) {
|
||||
func (ts tableSet) toSpecs() ([]tableSpec, error) {
|
||||
tableSpecs := make([]tableSpec, 0, ts.Size())
|
||||
for _, src := range ts.novel {
|
||||
cnt, err := src.count()
|
||||
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
var testChunks = [][]byte{[]byte("hello2"), []byte("goodbye2"), []byte("badbye2")}
|
||||
|
||||
func TestTableSetPrependEmpty(t *testing.T) {
|
||||
ts := newFakeTableSet(&noopQuotaProvider{}).Prepend(context.Background(), newMemTable(testMemTableSize), &Stats{})
|
||||
specs, err := ts.ToSpecs()
|
||||
ts := newFakeTableSet(&noopQuotaProvider{}).prepend(context.Background(), newMemTable(testMemTableSize), &Stats{})
|
||||
specs, err := ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, specs)
|
||||
}
|
||||
@@ -42,23 +42,23 @@ func TestTableSetPrependEmpty(t *testing.T) {
|
||||
func TestTableSetPrepend(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
ts := newFakeTableSet(&noopQuotaProvider{})
|
||||
specs, err := ts.ToSpecs()
|
||||
specs, err := ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(specs)
|
||||
mt := newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
firstSpecs, err := ts.ToSpecs()
|
||||
firstSpecs, err := ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Len(firstSpecs, 1)
|
||||
|
||||
mt = newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
|
||||
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
secondSpecs, err := ts.ToSpecs()
|
||||
secondSpecs, err := ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Len(secondSpecs, 2)
|
||||
assert.Equal(firstSpecs, secondSpecs[1:])
|
||||
@@ -67,22 +67,22 @@ func TestTableSetPrepend(t *testing.T) {
|
||||
func TestTableSetToSpecsExcludesEmptyTable(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
ts := newFakeTableSet(&noopQuotaProvider{})
|
||||
specs, err := ts.ToSpecs()
|
||||
specs, err := ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(specs)
|
||||
mt := newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
mt = newMemTable(testMemTableSize)
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
mt = newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
|
||||
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
specs, err = ts.ToSpecs()
|
||||
specs, err = ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Len(specs, 2)
|
||||
}
|
||||
@@ -90,61 +90,26 @@ func TestTableSetToSpecsExcludesEmptyTable(t *testing.T) {
|
||||
func TestTableSetFlattenExcludesEmptyTable(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
ts := newFakeTableSet(&noopQuotaProvider{})
|
||||
specs, err := ts.ToSpecs()
|
||||
specs, err := ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(specs)
|
||||
mt := newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
mt = newMemTable(testMemTableSize)
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
mt = newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
|
||||
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
ts, err = ts.Flatten(context.Background())
|
||||
ts, err = ts.flatten(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(ts.Size(), 2)
|
||||
}
|
||||
|
||||
func TestTableSetExtract(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
ts := newFakeTableSet(&noopQuotaProvider{})
|
||||
specs, err := ts.ToSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(specs)
|
||||
|
||||
// Put in one table
|
||||
mt := newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
// Put in a second
|
||||
mt = newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
|
||||
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
chunkChan := make(chan extractRecord)
|
||||
go func() {
|
||||
defer close(chunkChan)
|
||||
err := ts.extract(context.Background(), chunkChan)
|
||||
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
i := 0
|
||||
for rec := range chunkChan {
|
||||
a := computeAddr(testChunks[i])
|
||||
assert.NotNil(rec.data, "Nothing for", a)
|
||||
assert.Equal(testChunks[i], rec.data, "Item %d: %s != %s", i, string(testChunks[i]), string(rec.data))
|
||||
assert.Equal(a, rec.a)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func persist(t *testing.T, p tablePersister, chunks ...[]byte) {
|
||||
for _, c := range chunks {
|
||||
mt := newMemTable(testMemTableSize)
|
||||
@@ -166,37 +131,37 @@ func TestTableSetRebase(t *testing.T) {
|
||||
for _, c := range chunks {
|
||||
mt := newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(c), c)
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
fullTS := newTableSet(persister, q)
|
||||
defer func() {
|
||||
require.NoError(t, fullTS.Close())
|
||||
require.NoError(t, fullTS.close())
|
||||
}()
|
||||
specs, err := fullTS.ToSpecs()
|
||||
specs, err := fullTS.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(specs)
|
||||
fullTS = insert(fullTS, testChunks...)
|
||||
fullTS, err = fullTS.Flatten(context.Background())
|
||||
fullTS, err = fullTS.flatten(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := newTableSet(persister, q)
|
||||
ts = insert(ts, testChunks[0])
|
||||
assert.Equal(1, ts.Size())
|
||||
ts, err = ts.Flatten(context.Background())
|
||||
ts, err = ts.flatten(context.Background())
|
||||
require.NoError(t, err)
|
||||
ts = insert(ts, []byte("novel"))
|
||||
|
||||
specs, err = fullTS.ToSpecs()
|
||||
specs, err = fullTS.toSpecs()
|
||||
require.NoError(t, err)
|
||||
ts2, err := ts.Rebase(context.Background(), specs, nil)
|
||||
ts2, err := ts.rebase(context.Background(), specs, nil)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, ts2.Close())
|
||||
require.NoError(t, ts2.close())
|
||||
}()
|
||||
err = ts.Close()
|
||||
err = ts.close()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(4, ts2.Size())
|
||||
}
|
||||
@@ -204,17 +169,17 @@ func TestTableSetRebase(t *testing.T) {
|
||||
func TestTableSetPhysicalLen(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
ts := newFakeTableSet(&noopQuotaProvider{})
|
||||
specs, err := ts.ToSpecs()
|
||||
specs, err := ts.toSpecs()
|
||||
require.NoError(t, err)
|
||||
assert.Empty(specs)
|
||||
mt := newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
mt = newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
|
||||
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
|
||||
ts = ts.Prepend(context.Background(), mt, &Stats{})
|
||||
ts = ts.prepend(context.Background(), mt, &Stats{})
|
||||
|
||||
assert.True(mustUint64(ts.physicalLen()) > indexSize(mustUint32(ts.count())))
|
||||
}
|
||||
@@ -241,7 +206,7 @@ func TestTableSetClosesOpenedChunkSourcesOnErr(t *testing.T) {
|
||||
}
|
||||
|
||||
ts := tableSet{p: p, q: q, rl: make(chan struct{}, 1)}
|
||||
_, err := ts.Rebase(context.Background(), specs, &Stats{})
|
||||
_, err := ts.rebase(context.Background(), specs, &Stats{})
|
||||
require.Error(t, err)
|
||||
|
||||
for _ = range p.opened {
|
||||
|
||||
@@ -190,10 +190,12 @@ teardown() {
|
||||
|
||||
start_sql_server
|
||||
|
||||
server_query "" 1 dolt "" "create database testdb" ""
|
||||
server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntestdb" ""
|
||||
server_query "testdb" 1 dolt "" "create table a(x int)" ""
|
||||
server_query "testdb" 1 dolt "" "insert into a values (1), (2)" ""
|
||||
dolt sql-client --use-db '' -u dolt -P $PORT -q "create database testdb"
|
||||
run dolt sql-client --use-db '' -u dolt -P $PORT -r csv -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "testdb" ]] || false
|
||||
dolt sql-client --use-db testdb -u dolt -P $PORT -q "create table a(x int)"
|
||||
dolt sql-client --use-db testdb -u dolt -P $PORT -q "insert into a values (1), (2)"
|
||||
|
||||
[ -d "testdb" ]
|
||||
cd testdb
|
||||
|
||||
@@ -2788,7 +2788,7 @@ SQL
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations_test" -r=csv
|
||||
log_status_eq "0"
|
||||
[[ "$output" =~ "violation_type,pk,v1,violation_info" ]] || false
|
||||
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": ""pk"", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
|
||||
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": """", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
log_status_eq "0"
|
||||
@@ -2825,7 +2825,7 @@ SQL
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations_test" -r=csv
|
||||
log_status_eq "0"
|
||||
[[ "$output" =~ "violation_type,pk,v1,violation_info" ]] || false
|
||||
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": ""pk"", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
|
||||
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": """", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
log_status_eq "0"
|
||||
|
||||
@@ -65,11 +65,14 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_checkout('to_keep')"
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_branch('-D', 'main');"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "id\n" ""
|
||||
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "describe test"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "id" ]] || false
|
||||
}
|
||||
|
||||
@test "deleted-branches: can SQL connect with existing branch revision specifier when checked out branch is deleted" {
|
||||
@@ -77,11 +80,12 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
# Can't string together multiple queries in dolt sql-client
|
||||
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
|
||||
|
||||
|
||||
# Against the default branch it fails
|
||||
run server_query "dolt_repo_$$" 1 "" dolt "" "SELECT * FROM test" "id\n" ""
|
||||
[ "$status" -eq 1 ] || fail "expected query against the default branch, which was deleted, to fail"
|
||||
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Against to_keep it succeeds
|
||||
server_query "dolt_repo_$$/to_keep" 1 dolt "" "SELECT * FROM test" "id\n" ""
|
||||
@@ -92,10 +96,11 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'"
|
||||
|
||||
# Against the default branch it fails
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "" 1
|
||||
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test" ""
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Against main, which exists it succeeds
|
||||
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test" "id\n" ""
|
||||
@@ -106,7 +111,7 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
|
||||
|
||||
# We are able to use a database branch revision in the connection string
|
||||
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test;"
|
||||
@@ -141,11 +146,11 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "CALL DOLT_CHECKOUT('to_checkout');" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "CALL DOLT_CHECKOUT('to_checkout')"
|
||||
}
|
||||
|
||||
@@ -22,6 +22,11 @@ teardown() {
|
||||
dolt commit -am "cm"
|
||||
}
|
||||
|
||||
@test "foreign-keys-invert-pk: no secondary indexes made" {
|
||||
run dolt index ls
|
||||
[[ $output = "No indexes in the working set" ]] || false
|
||||
}
|
||||
|
||||
@test "foreign-keys-invert-pk: check referential integrity on merge" {
|
||||
dolt commit -am "main"
|
||||
dolt checkout -b feat
|
||||
|
||||
@@ -1971,7 +1971,7 @@ SQL
|
||||
# the prefix key should not be unique
|
||||
run dolt sql -q "show create table parent"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "KEY \`b\` (\`b\`)" ]] || false
|
||||
[[ ! $output =~ "KEY \`b\` (\`b\`)" ]] || false
|
||||
[[ ! $output =~ "UNIQUE" ]] || false
|
||||
|
||||
run dolt sql -q "show create table child"
|
||||
|
||||
@@ -169,7 +169,7 @@ pk,c1,c2,c3,c4,c5
|
||||
9,1,2,3,4,5
|
||||
DELIM
|
||||
dolt table import -c --pk=pk test 1pk5col-ints.csv
|
||||
run dolt sql -q "create table fktest(id int not null, tpk int unsigned, c2 int, primary key(id), foreign key (tpk) references test(pk))"
|
||||
run dolt sql -q "create table fktest(id int not null, tpk int, c2 int, primary key(id), foreign key (tpk) references test(pk))"
|
||||
[ "$status" -eq 0 ]
|
||||
run dolt sql -q "insert into fktest values (1, 0, 1)"
|
||||
[ "$status" -eq 0 ]
|
||||
@@ -567,7 +567,7 @@ DELIM
|
||||
[[ "$output" =~ "CREATE TABLE \`test\`" ]]
|
||||
[[ "$output" =~ "\`pk\` int" ]]
|
||||
[[ "$output" =~ "\`str\` varchar(16383)" ]]
|
||||
[[ "$output" =~ "\`int\` int unsigned" ]]
|
||||
[[ "$output" =~ "\`int\` int" ]]
|
||||
[[ "$output" =~ "\`bool\` tinyint" ]]
|
||||
[[ "$output" =~ "\`float\` float" ]]
|
||||
[[ "$output" =~ "\`date\` date" ]]
|
||||
|
||||
@@ -248,3 +248,21 @@ SQL
|
||||
run dolt schema show t
|
||||
[[ "$output" =~ "PRIMARY KEY (\`pk1\`,\`pk2\`)" ]] || false
|
||||
}
|
||||
|
||||
@test "migrate: removed tables stay removed" {
|
||||
dolt sql -q "create table alpha (pk int primary key);"
|
||||
dolt sql -q "create table beta (pk int primary key);"
|
||||
dolt commit -Am "create tables"
|
||||
|
||||
dolt sql -q "alter table alpha rename to zulu;"
|
||||
dolt sql -q "drop table beta"
|
||||
dolt commit -Am "rename table alpha to zeta, drop table beta"
|
||||
|
||||
dolt migrate
|
||||
|
||||
run dolt ls
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "zulu" ]] || false
|
||||
[[ ! "$output" =~ "alpha" ]] || false
|
||||
[[ ! "$output" =~ "beta" ]] || false
|
||||
}
|
||||
|
||||
@@ -14,8 +14,8 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "migration-integration: first-hour-db" {
|
||||
dolt clone dolthub/first-hour-db
|
||||
cd first-hour-db
|
||||
dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int
|
||||
cd first-hour-db-migration-int
|
||||
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
@@ -39,8 +39,8 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "migration-integration: first-hour-db after garbage collection" {
|
||||
dolt clone dolthub/first-hour-db
|
||||
cd first-hour-db
|
||||
dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int
|
||||
cd first-hour-db-migration-int
|
||||
dolt gc
|
||||
|
||||
dolt tag -v
|
||||
@@ -65,8 +65,8 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "migration-integration: us-jails" {
|
||||
dolt clone dolthub/us-jails
|
||||
cd us-jails
|
||||
dolt clone https://doltremoteapi.dolthub.com/dolthub/us-jails-migration-integration
|
||||
cd us-jails-migration-integration
|
||||
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
|
||||
@@ -31,7 +31,7 @@ teardown() {
|
||||
cd dbs1
|
||||
start_multi_db_server repo1
|
||||
server_query repo1 1 dolt "" "create database new; use new; call dcheckout('-b', 'feat'); create table t (x int); call dolt_add('.'); call dcommit('-am', 'cm'); set @@global.new_default_branch='feat'"
|
||||
server_query repo1 1 dolt "" "use repo1"
|
||||
dolt sql-client -u dolt --use-db '' -P $PORT -q "use repo1"
|
||||
}
|
||||
|
||||
@test "multidb: incompatible BIN FORMATs" {
|
||||
|
||||
@@ -45,13 +45,14 @@ teardown() {
|
||||
dolt checkout -b other
|
||||
start_sql_server repo1
|
||||
|
||||
run server_query repo1 1 dolt "" "call dolt_push()" "" "" 1
|
||||
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "the current branch has no upstream branch" ]] || false
|
||||
|
||||
server_query repo1 1 dolt "" "call dolt_push('--set-upstream', 'origin', 'other') " ""
|
||||
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push('--set-upstream', 'origin', 'other')"
|
||||
|
||||
skip "In-memory branch doesn't track upstream"
|
||||
server_query repo1 1 dolt "" "call dolt_push()" ""
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()"
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: push on sql-session commit" {
|
||||
@@ -61,7 +62,7 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_to_remote remote1
|
||||
start_sql_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
|
||||
cd ../repo2
|
||||
dolt pull remote1
|
||||
@@ -81,7 +82,7 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_async_replication 1
|
||||
start_sql_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
|
||||
# wait for the process to exit after we stop it
|
||||
stop_sql_server 1
|
||||
@@ -108,7 +109,10 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Tables_in_repo2" ]] || false
|
||||
[[ "$output" =~ "test" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: pull remote not found error" {
|
||||
@@ -133,7 +137,9 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo1
|
||||
|
||||
run server_query repo1 1 dolt "" "show tables" "Table\n"
|
||||
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: push remote not found error" {
|
||||
@@ -156,7 +162,10 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_to_remote unknown
|
||||
start_sql_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\ntest"
|
||||
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Tables_in_repo1" ]] || false
|
||||
[[ "$output" =~ "test" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: pull multiple heads" {
|
||||
@@ -172,8 +181,16 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main,new_feature
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "select dolt_checkout('new_feature') as b" "b\n0"
|
||||
server_query repo2 1 dolt "" "select name from dolt_branches order by name" "name\nmain\nnew_feature"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select dolt_checkout('new_feature') as b"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "b" ]] || false
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select name from dolt_branches order by name"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "name" ]] || false
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
[[ "$output" =~ "new_feature" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: connect to remote head" {
|
||||
@@ -194,13 +211,17 @@ teardown() {
|
||||
start_sql_server repo2
|
||||
|
||||
# No data on main
|
||||
server_query repo2 1 dolt "" "show tables" ""
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
# Can't use dolt sql-client to connect to branches
|
||||
|
||||
# Connecting to heads that exist only on the remote should work fine (they get fetched)
|
||||
server_query "repo2/new_feature" 1 dolt "" "show tables" "Tables_in_repo2/new_feature\ntest"
|
||||
server_query repo2 1 dolt "" 'use `repo2/new_feature2`' ""
|
||||
server_query repo2 1 dolt "" 'select * from `repo2/new_feature2`.test' "pk\n0\n1\n2"
|
||||
|
||||
|
||||
# Connecting to heads that don't exist should error out
|
||||
run server_query "repo2/notexist" 1 dolt "" 'use `repo2/new_feature2`' "" 1
|
||||
[[ $output =~ "database not found" ]] || false
|
||||
@@ -228,7 +249,10 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "Tables_in_repo2" ]] || false
|
||||
[[ $output =~ "test" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: pull invalid head" {
|
||||
@@ -240,7 +264,8 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads unknown
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "" 1
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@@ -253,7 +278,8 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "" 1
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@@ -270,7 +296,9 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Table\n"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: connect to missing branch pulls remote" {
|
||||
@@ -286,7 +314,11 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "SHOW tables" "" # no tables on main
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
# Can't connect to a specific branch with dolt sql-client
|
||||
server_query "repo2/feature-branch" 1 dolt "" "SHOW Tables" "Tables_in_repo2/feature-branch\ntest"
|
||||
}
|
||||
|
||||
@@ -303,8 +335,14 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
server_query repo2 1 dolt "" "use \`repo2/$head_hash\`" ""
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "Tables_in_repo2" ]] || false
|
||||
[[ $output =~ "test" ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q"use \`repo2/$head_hash\`"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: connect to tag works" {
|
||||
@@ -321,8 +359,14 @@ teardown() {
|
||||
dolt tag v1
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
server_query repo2 1 dolt "" "use \`repo2/v1\`" ""
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "Tables_in_repo2" ]] || false
|
||||
[[ $output =~ "test" ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "use \`repo2/v1\`"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
get_head_commit() {
|
||||
|
||||
@@ -290,15 +290,15 @@ SQL
|
||||
start_multi_db_server repo1
|
||||
cd ..
|
||||
|
||||
server_query repo1 1 dolt "" "create table t1 (a int primary key)"
|
||||
server_query repo1 1 dolt "" "call dolt_add('.')"
|
||||
server_query repo1 1 dolt "" "call dolt_commit('-am', 'cm')"
|
||||
server_query repo2 1 dolt "" "create table t2 (a int primary key)"
|
||||
server_query repo2 1 dolt "" "call dolt_add('.')"
|
||||
server_query repo2 1 dolt "" "call dolt_commit('-am', 'cm')"
|
||||
server_query repo3 1 dolt "" "create table t3 (a int primary key)"
|
||||
server_query repo3 1 dolt "" "call dolt_add('.')"
|
||||
server_query repo3 1 dolt "" "call dolt_commit('-am', 'cm')"
|
||||
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "create table t1 (a int primary key)"
|
||||
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_add('.')"
|
||||
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
|
||||
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "create table t2 (a int primary key)"
|
||||
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_add('.')"
|
||||
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
|
||||
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "create table t3 (a int primary key)"
|
||||
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_add('.')"
|
||||
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
|
||||
|
||||
clone_helper $TMPDIRS
|
||||
|
||||
@@ -344,7 +344,18 @@ SQL
|
||||
cd dbs1
|
||||
start_multi_db_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\nt1"
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\nt2"
|
||||
server_query repo3 1 dolt "" "show tables" "Tables_in_repo3\nt3"
|
||||
run dolt sql-client --use-db repo1 -u dolt -P $PORT -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ Tables_in_repo1 ]] || false
|
||||
[[ "$output" =~ t1 ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -u dolt -P $PORT -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ Tables_in_repo2 ]] || false
|
||||
[[ "$output" =~ t2 ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo3 -u dolt -P $PORT -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ Tables_in_repo3 ]] || false
|
||||
[[ "$output" =~ t3 ]] || false
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ teardown() {
|
||||
[[ "$output" =~ "\`string\` varchar(16383)" ]] || false
|
||||
[[ "$output" =~ "\`boolean\` tinyint" ]] || false
|
||||
[[ "$output" =~ "\`float\` float" ]] || false
|
||||
[[ "$output" =~ "\`uint\` int unsigned" ]] || false
|
||||
[[ "$output" =~ "\`uint\` int" ]] || false
|
||||
[[ "$output" =~ "\`uuid\` char(36) CHARACTER SET ascii COLLATE ascii_bin" ]] || false
|
||||
}
|
||||
|
||||
@@ -259,9 +259,9 @@ DELIM
|
||||
|
||||
run dolt diff --schema
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ '+ `x` varchar(16383) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `y` float NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `z` int NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `x` varchar(16383),' ]] || false
|
||||
[[ "$output" =~ '+ `y` float,' ]] || false
|
||||
[[ "$output" =~ '+ `z` int,' ]] || false
|
||||
# assert no columns were deleted/replaced
|
||||
[[ ! "$output" = "- \`" ]] || false
|
||||
|
||||
@@ -282,9 +282,9 @@ DELIM
|
||||
|
||||
run dolt diff --schema
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ '+ `x` varchar(16383) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `y` float NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `z` int NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '+ `x` varchar(16383),' ]] || false
|
||||
[[ "$output" =~ '+ `y` float,' ]] || false
|
||||
[[ "$output" =~ '+ `z` int,' ]] || false
|
||||
# assert no columns were deleted/replaced
|
||||
[[ ! "$output" = "- \`" ]] || false
|
||||
|
||||
@@ -308,9 +308,9 @@ DELIM
|
||||
|
||||
run dolt diff --schema
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ '- `a` varchar(16383) NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '- `b` float NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '- `c` tinyint NOT NULL,' ]] || false
|
||||
[[ "$output" =~ '- `a` varchar(16383),' ]] || false
|
||||
[[ "$output" =~ '- `b` float,' ]] || false
|
||||
[[ "$output" =~ '- `c` tinyint,' ]] || false
|
||||
# assert no columns were added
|
||||
[[ ! "$output" = "+ \`" ]] || false
|
||||
}
|
||||
|
||||
@@ -39,10 +39,10 @@ teardown() {
|
||||
@test "sql-charsets-collations: define charset and collation on a database" {
|
||||
start_sql_server
|
||||
|
||||
server_query "" 1 dolt "" "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;"
|
||||
dolt sql-client -u dolt --use-db '' -P $PORT -q "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;"
|
||||
skip "Defining charsets and collations on a database not supported"
|
||||
server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1"
|
||||
server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci"
|
||||
dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1"
|
||||
dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci"
|
||||
}
|
||||
|
||||
@test "sql-charsets-collations: define and use a collation and charset" {
|
||||
|
||||
@@ -60,9 +60,15 @@ teardown() {
|
||||
SERVER_PID=$! # will get killed by teardown_common
|
||||
sleep 5 # not using python wait so this works on windows
|
||||
|
||||
server_query test_db 1 root "" "select user from mysql.user order by user" "User\nroot"
|
||||
server_query test_db 1 root "" "create user new_user" ""
|
||||
server_query test_db 1 root "" "select user from mysql.user order by user" "User\nnew_user\nroot"
|
||||
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "root" ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u root --use-db test_db -q "create user new_user"
|
||||
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "root" ]] || false
|
||||
[[ $output =~ "new_user" ]] || false
|
||||
|
||||
stop_sql_server
|
||||
rm -f .dolt/sql-server.lock
|
||||
@@ -73,7 +79,8 @@ teardown() {
|
||||
SERVER_PID=$! # will get killed by teardown_common
|
||||
sleep 5 # not using python wait so this works on windows
|
||||
|
||||
server_query test_db 1 root "" "select user from mysql.user order by user" "" 1
|
||||
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -ne 0 ]
|
||||
}
|
||||
|
||||
@test "sql-privs: starting server with empty config works" {
|
||||
@@ -82,10 +89,16 @@ teardown() {
|
||||
|
||||
start_sql_server_with_config test_db server.yaml
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "dolt" ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "dolt" ]] || false
|
||||
[[ $output =~ "new_user" ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
|
||||
@@ -111,8 +124,12 @@ behavior:
|
||||
|
||||
dolt sql-server --port=$PORT --config server.yaml --user cmddolt &
|
||||
SERVER_PID=$!
|
||||
sleep 5
|
||||
|
||||
server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt"
|
||||
|
||||
run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "cmddolt" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: yaml with user is also replaced with command line user" {
|
||||
@@ -135,8 +152,11 @@ behavior:
|
||||
|
||||
dolt sql-server --port=$PORT --config server.yaml --user cmddolt &
|
||||
SERVER_PID=$!
|
||||
sleep 5
|
||||
|
||||
server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt"
|
||||
run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "cmddolt" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: yaml specifies doltcfg dir" {
|
||||
@@ -146,9 +166,16 @@ behavior:
|
||||
|
||||
start_sql_server_with_config test_db server.yaml
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -165,10 +192,17 @@ behavior:
|
||||
|
||||
start_sql_server_with_config test_db server.yaml
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
[[ "$output" =~ "privs.db" ]] || false
|
||||
@@ -184,9 +218,18 @@ behavior:
|
||||
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nprivs_user"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ privs_user ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
[[ $output =~ privs_user ]] || false
|
||||
|
||||
# Test that privs.json file is not in json format
|
||||
run cat privs.json
|
||||
@@ -196,7 +239,12 @@ behavior:
|
||||
rm -f ./.dolt/sql-server.lock
|
||||
stop_sql_server
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
[[ $output =~ privs_user ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: errors instead of panic when reading badly formatted privilege file" {
|
||||
@@ -217,9 +265,16 @@ behavior:
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -232,7 +287,9 @@ behavior:
|
||||
make_test_repo
|
||||
|
||||
start_sql_server_with_args --host 127.0.0.1 --user=dolt
|
||||
server_query test_db 1 dolt "" "select user, host from mysql.user order by user" "User,Host\ndolt,%"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db --result-format csv -q "select user, host from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "dolt,%" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: multiple doltcfg directories causes error" {
|
||||
@@ -267,10 +324,24 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privileges.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -293,10 +364,17 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "doltcfgdir" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
[[ "$output" =~ "doltcfgdir" ]] || false
|
||||
@@ -314,9 +392,16 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -337,10 +422,24 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privileges.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -368,10 +467,24 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -395,9 +508,16 @@ behavior:
|
||||
! [[ "$output" =~ "doltcfgdir" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -420,10 +540,24 @@ behavior:
|
||||
! [[ "$output" =~ "privileges.db" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -447,7 +581,7 @@ behavior:
|
||||
dolt init
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt
|
||||
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
stop_sql_server
|
||||
sleep 1
|
||||
run ls -a
|
||||
@@ -457,65 +591,91 @@ behavior:
|
||||
|
||||
cd db_dir
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: basic lack of privileges tests" {
|
||||
make_test_repo
|
||||
start_sql_server
|
||||
|
||||
server_query test_db 1 dolt "" "create table t1(c1 int)"
|
||||
server_query test_db 1 dolt "" "create user test"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test"
|
||||
|
||||
# Should only see test_db database
|
||||
server_query "" 1 test "" "show databases" "Database\ntest_db"
|
||||
server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ test_db ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ t1 ]] || false
|
||||
|
||||
# Revoke works as expected
|
||||
server_query test_db 1 dolt "" "revoke select on test_db.* from test"
|
||||
server_query test_db 1 test "" "show tables" "" 1
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "revoke select on test_db.* from test"
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Host in privileges is respected
|
||||
server_query test_db 1 dolt "" "drop user test"
|
||||
server_query test_db 1 dolt "" "create user test@'127.0.0.1'"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test@'127.0.0.1'"
|
||||
server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1"
|
||||
server_query test_db 1 dolt "" "drop user test@'127.0.0.1'"
|
||||
server_query test_db 1 dolt "" "create user test@'10.10.10.10'"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test@'10.10.10.10'"
|
||||
server_query test_db 1 test "" "show tables" "" 1
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'127.0.0.1'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'127.0.0.1'"
|
||||
run dolt sql-client -P $PORT -u test -H 127.0.0.1 --use-db test_db -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ t1 ]] || false
|
||||
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test@'127.0.0.1'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'10.10.10.10'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'10.10.10.10'"
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
}
|
||||
|
||||
@test "sql-privs: creating user identified by password" {
|
||||
make_test_repo
|
||||
start_sql_server
|
||||
|
||||
server_query test_db 1 dolt "" "create user test identified by 'test'" ""
|
||||
server_query test_db 1 dolt "" "grant select on mysql.user to test" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test identified by 'test'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on mysql.user to test"
|
||||
|
||||
# Should not be able to connect to test_db
|
||||
server_query test_db 1 test test "select user from mysql.user order by user" "" 1
|
||||
run dolt sql-client -P $PORT -u test -p test --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
server_query "" 1 test test "select user from mysql.user order by user" "User\ndolt\ntest"
|
||||
run dolt sql-client -P $PORT -u test -p test --use-db '' -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ test ]] || false
|
||||
|
||||
# Bad password can't connect
|
||||
server_query "" 1 test bad "select user from mysql.user order by user" "" 1
|
||||
run dolt sql-client -P $PORT -u test -p bad --use-db '' -q "select user from mysql.user order by user"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Should only see mysql database
|
||||
server_query "" 1 test test "show databases" "Database\nmysql"
|
||||
run dolt sql-client -P $PORT -u test -p test --use-db '' -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ mysql ]] || false
|
||||
! [[ $output =~ test_db ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: deleting user prevents access by that user" {
|
||||
make_test_repo
|
||||
start_sql_server
|
||||
|
||||
server_query test_db 1 dolt "" "create user test"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)"
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on test_db.* to test"
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
echo $output
|
||||
[[ $output =~ t1 ]] || false
|
||||
|
||||
server_query test_db 1 test "" "show tables" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "drop user test"
|
||||
|
||||
server_query test_db 1 dolt "" "drop user test"
|
||||
|
||||
server_query test_db 1 test "" "show tables" "" 1
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user