Merge remote-tracking branch 'origin/main' into aaron/bats-move-sql-server-temporary-table-test

This commit is contained in:
Aaron Son
2022-10-28 13:33:00 -07:00
67 changed files with 2239 additions and 927 deletions

View File

@@ -22,10 +22,10 @@ jobs:
fail-fast: true
matrix:
os: [ ubuntu-22.04, macos-latest ]
dolt_fmt: [ "__DOLT__", "__DOLT_DEV__", "__LD_1__" ]
dolt_fmt: [ "__DOLT__", "__LD_1__" ]
exclude:
- os: "macos-latest"
dolt_fmt: ["__DOLT_DEV__", "__LD_1__" ]
dolt_fmt: "__LD_1__"
env:
use_credentials: ${{ secrets.AWS_SECRET_ACCESS_KEY != '' && secrets.AWS_ACCESS_KEY_ID != '' }}
steps:

View File

@@ -136,14 +136,14 @@ func loadCred(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (creds.DoltCred
}
func checkCredAndPrintSuccess(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds, endpoint string) errhand.VerboseError {
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: endpoint,
Creds: dc,
})
if err != nil {
return errhand.BuildDError("error: unable to build server endpoint options.").AddCause(err).Build()
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return errhand.BuildDError("error: unable to connect to server with credentials.").AddCause(err).Build()
}

View File

@@ -161,14 +161,14 @@ func updateProfileWithCredentials(ctx context.Context, dEnv *env.DoltEnv, c cred
host := dEnv.Config.GetStringOrDefault(env.RemotesApiHostKey, env.DefaultRemotesApiHost)
port := dEnv.Config.GetStringOrDefault(env.RemotesApiHostPortKey, env.DefaultRemotesApiPort)
hostAndPort := fmt.Sprintf("%s:%s", host, port)
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: hostAndPort,
Creds: c,
})
if err != nil {
return fmt.Errorf("error: unable to build dial options server with credentials: %w", err)
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return fmt.Errorf("error: unable to connect to server with credentials: %w", err)
}

View File

@@ -61,6 +61,7 @@ const (
SQLFlag = "sql"
CachedFlag = "cached"
SkinnyFlag = "skinny"
MergeBase = "merge-base"
)
var diffDocs = cli.CommandDocumentationContent{
@@ -71,11 +72,17 @@ Show changes between the working and staged tables, changes between the working
{{.EmphasisLeft}}dolt diff [--options] [<tables>...]{{.EmphasisRight}}
This form is to view the changes you made relative to the staging area for the next commit. In other words, the differences are what you could tell Dolt to further add but you still haven't. You can stage these changes by using dolt add.
{{.EmphasisLeft}}dolt diff [--options] <commit> [<tables>...]{{.EmphasisRight}}
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch.
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> [<tables>...]{{.EmphasisRight}}
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, instead of using {{.LessThan}}commit{{.GreaterThan}}, use the merge base of {{.LessThan}}commit{{.GreaterThan}} and HEAD. {{.EmphasisLeft}}dolt diff --merge-base A{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A HEAD){{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...HEAD{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] <commit> <commit> [<tables>...]{{.EmphasisRight}}
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> <commit> [<tables>...]{{.EmphasisRight}}
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight} is given, use the merge base of the two commits for the "before" side. {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] <commit>..<commit> [<tables>...]{{.EmphasisRight}}
This is synonymous to the above form (without the ..) to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] <commit>...<commit> [<tables>...]{{.EmphasisRight}}
This is to view the changes on the branch containing and up to the second {{.LessThan}}commit{{.GreaterThan}}, starting at a common ancestor of both {{.LessThan}}commit{{.GreaterThan}}. {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}. You can omit any one of {{.LessThan}}commit{{.GreaterThan}}, which has the same effect as using HEAD instead.
The diffs displayed can be limited to show the first N by providing the parameter {{.EmphasisLeft}}--limit N{{.EmphasisRight}} where {{.EmphasisLeft}}N{{.EmphasisRight}} is the number of diffs to display.
@@ -132,6 +139,7 @@ func (cmd DiffCmd) ArgParser() *argparser.ArgParser {
ap.SupportsInt(limitParam, "", "record_count", "limits to the first N diffs.")
ap.SupportsFlag(CachedFlag, "c", "Show only the unstaged data changes.")
ap.SupportsFlag(SkinnyFlag, "sk", "Shows only primary key columns and any columns with data changes.")
ap.SupportsFlag(MergeBase, "", "Uses merge base of the first commit and second commit (or HEAD if not supplied) as the first commit")
return ap
}
@@ -202,7 +210,7 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
dArgs.limit, _ = apr.GetInt(limitParam)
dArgs.where = apr.GetValueOrDefault(whereParam, "")
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(CachedFlag))
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(CachedFlag), apr.Contains(MergeBase))
if err != nil {
return nil, err
}
@@ -243,7 +251,7 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
// applyDiffRoots applies the appropriate |from| and |to| root values to the receiver and returns the table names
// (if any) given to the command.
func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, args []string, isCached bool) ([]string, error) {
func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, args []string, isCached, useMergeBase bool) ([]string, error) {
headRoot, err := dEnv.HeadRoot(ctx)
if err != nil {
return nil, err
@@ -271,15 +279,33 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
}
if len(args) == 0 {
if useMergeBase {
return nil, fmt.Errorf("Must supply at least one revision when using --merge-base flag")
}
// `dolt diff`
return nil, nil
}
if strings.Contains(args[0], "..") {
if useMergeBase {
return nil, fmt.Errorf("Cannot use `..` or `...` with --merge-base flag")
}
err = dArgs.applyDotRevisions(ctx, dEnv, args)
if err != nil {
return nil, err
}
return args[1:], err
}
// treat the first arg as a ref spec
fromRoot, ok := maybeResolve(ctx, dEnv, args[0])
// if it doesn't resolve, treat it as a table name
if !ok {
// `dolt diff table`
if useMergeBase {
return nil, fmt.Errorf("Must supply at least one revision when using --merge-base flag")
}
return args, nil
}
@@ -288,23 +314,123 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
if len(args) == 1 {
// `dolt diff from_commit`
if useMergeBase {
err := dArgs.applyMergeBase(ctx, dEnv, args[0], "HEAD")
if err != nil {
return nil, err
}
}
return nil, nil
}
toRoot, ok := maybeResolve(ctx, dEnv, args[1])
if !ok {
// `dolt diff from_commit ...tables`
// `dolt diff from_commit [...tables]`
if useMergeBase {
err := dArgs.applyMergeBase(ctx, dEnv, args[0], "HEAD")
if err != nil {
return nil, err
}
}
return args[1:], nil
}
dArgs.toRoot = toRoot
dArgs.toRef = args[1]
// `dolt diff from_commit to_commit ...tables`
if useMergeBase {
err := dArgs.applyMergeBase(ctx, dEnv, args[0], args[1])
if err != nil {
return nil, err
}
}
// `dolt diff from_commit to_commit [...tables]`
return args[2:], nil
}
// applyMergeBase applies the merge base of two revisions to the |from| root
// values.
func (dArgs *diffArgs) applyMergeBase(ctx context.Context, dEnv *env.DoltEnv, leftStr, rightStr string) error {
mergeBaseStr, err := getMergeBaseFromStrings(ctx, dEnv, leftStr, rightStr)
if err != nil {
return err
}
fromRoot, ok := maybeResolve(ctx, dEnv, mergeBaseStr)
if !ok {
return fmt.Errorf("merge base invalid %s", mergeBaseStr)
}
dArgs.fromRoot = fromRoot
dArgs.fromRef = mergeBaseStr
return nil
}
// applyDotRevisions applies the appropriate |from| and |to| root values to the
// receiver for arguments containing `..` or `...`
func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv, args []string) error {
// `dolt diff from_commit...to_commit [...tables]`
if strings.Contains(args[0], "...") {
refs := strings.Split(args[0], "...")
var toRoot *doltdb.RootValue
ok := true
if len(refs[0]) > 0 {
right := refs[1]
// Use current HEAD if right side of `...` does not exist
if len(refs[1]) == 0 {
right = "HEAD"
}
err := dArgs.applyMergeBase(ctx, dEnv, refs[0], right)
if err != nil {
return err
}
}
if len(refs[1]) > 0 {
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
return fmt.Errorf("to ref in three dot diff must be valid ref: %s", refs[1])
}
dArgs.toRoot = toRoot
dArgs.toRef = refs[1]
}
return nil
}
// `dolt diff from_commit..to_commit [...tables]`
if strings.Contains(args[0], "..") {
refs := strings.Split(args[0], "..")
var fromRoot *doltdb.RootValue
var toRoot *doltdb.RootValue
ok := true
if len(refs[0]) > 0 {
if fromRoot, ok = maybeResolve(ctx, dEnv, refs[0]); !ok {
return fmt.Errorf("from ref in two dot diff must be valid ref: %s", refs[0])
}
dArgs.fromRoot = fromRoot
dArgs.fromRef = refs[0]
}
if len(refs[1]) > 0 {
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
return fmt.Errorf("to ref in two dot diff must be valid ref: %s", refs[1])
}
dArgs.toRoot = toRoot
dArgs.toRef = refs[1]
}
return nil
}
return nil
}
// todo: distinguish between non-existent CommitSpec and other errors, don't assume non-existent
func maybeResolve(ctx context.Context, dEnv *env.DoltEnv, spec string) (*doltdb.RootValue, bool) {
cs, err := doltdb.NewCommitSpec(spec)

View File

@@ -238,7 +238,7 @@ func openBrowserForCredsAdd(dc creds.DoltCreds, loginUrl string) {
}
func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds, authEndpoint string, insecure bool) (remotesapi.CredentialsServiceClient, errhand.VerboseError) {
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: authEndpoint,
Creds: dc,
Insecure: insecure,
@@ -246,7 +246,7 @@ func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds, authEndpoint st
if err != nil {
return nil, errhand.BuildDError("error: unable to build dial options for connecting to server with credentials.").AddCause(err).Build()
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return nil, errhand.BuildDError("error: unable to connect to server with credentials.").AddCause(err).Build()
}

View File

@@ -80,24 +80,35 @@ func (cmd MergeBaseCmd) Exec(ctx context.Context, commandStr string, args []stri
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
left, verr := ResolveCommitWithVErr(dEnv, apr.Arg(0))
mergeBaseStr, verr := getMergeBaseFromStrings(ctx, dEnv, apr.Arg(0), apr.Arg(1))
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
right, verr := ResolveCommitWithVErr(dEnv, apr.Arg(1))
cli.Println(mergeBaseStr)
return 0
}
// getMergeBaseFromStrings resolves two revisions and returns the merge base
// commit hash string
func getMergeBaseFromStrings(ctx context.Context, dEnv *env.DoltEnv, leftStr, rightStr string) (string, errhand.VerboseError) {
left, verr := ResolveCommitWithVErr(dEnv, leftStr)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
return "", verr
}
right, verr := ResolveCommitWithVErr(dEnv, rightStr)
if verr != nil {
return "", verr
}
mergeBase, err := merge.MergeBase(ctx, left, right)
if err != nil {
verr = errhand.BuildDError("could not find merge-base for args %s", apr.Args).AddCause(err).Build()
return HandleVErrAndExitCode(verr, usage)
verr = errhand.BuildDError("could not find merge-base for args %s %s", leftStr, rightStr).AddCause(err).Build()
return "", verr
}
cli.Println(mergeBase.String())
return 0
return mergeBase.String(), nil
}
func ResolveCommitWithVErr(dEnv *env.DoltEnv, cSpecStr string) (*doltdb.Commit, errhand.VerboseError) {

View File

@@ -151,14 +151,14 @@ func getGRPCEmitter(dEnv *env.DoltEnv) *events.GrpcEmitter {
}
hostAndPort := fmt.Sprintf("%s:%d", host, port)
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: hostAndPort,
Insecure: insecure,
})
if err != nil {
return nil
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return nil
}

View File

@@ -16,6 +16,7 @@ package sqlserver
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
@@ -261,12 +262,22 @@ func Serve(
args := clusterController.RemoteSrvServerArgs(remoteSrvSqlCtx, remotesrv.ServerArgs{
Logger: logrus.NewEntry(lgr),
})
clusterRemoteSrvTLSConfig, err := LoadClusterTLSConfig(serverConfig.ClusterConfig())
if err != nil {
lgr.Errorf("error starting remotesapi server for cluster config, could not load tls config: %v", err)
startError = err
return
}
args.TLSConfig = clusterRemoteSrvTLSConfig
clusterRemoteSrv, err = remotesrv.NewServer(args)
if err != nil {
lgr.Errorf("error creating remotesapi server on port %d: %v", *serverConfig.RemotesapiPort(), err)
startError = err
return
}
listeners, err := clusterRemoteSrv.Listeners()
if err != nil {
lgr.Errorf("error starting remotesapi server listeners for cluster config on port %d: %v", clusterController.RemoteSrvPort(), err)
@@ -325,6 +336,22 @@ func Serve(
return
}
func LoadClusterTLSConfig(cfg cluster.Config) (*tls.Config, error) {
rcfg := cfg.RemotesAPIConfig()
if rcfg.TLSKey() == "" && rcfg.TLSCert() == "" {
return nil, nil
}
c, err := tls.LoadX509KeyPair(rcfg.TLSCert(), rcfg.TLSKey())
if err != nil {
return nil, err
}
return &tls.Config{
Certificates: []tls.Certificate{
c,
},
}, nil
}
func portInUse(hostPort string) bool {
timeout := time.Second
conn, _ := net.DialTimeout("tcp", hostPort, timeout)

View File

@@ -523,6 +523,12 @@ func ValidateClusterConfig(config cluster.Config) error {
if config.RemotesAPIConfig().Port() < 0 || config.RemotesAPIConfig().Port() > 65535 {
return fmt.Errorf("cluster: remotesapi: port: is not in range 0-65535: %d", config.RemotesAPIConfig().Port())
}
if config.RemotesAPIConfig().TLSKey() == "" && config.RemotesAPIConfig().TLSCert() != "" {
return fmt.Errorf("cluster: remotesapi: tls_key: must supply a tls_key if you supply a tls_cert")
}
if config.RemotesAPIConfig().TLSKey() != "" && config.RemotesAPIConfig().TLSCert() == "" {
return fmt.Errorf("cluster: remotesapi: tls_cert: must supply a tls_cert if you supply a tls_key")
}
return nil
}

View File

@@ -517,9 +517,24 @@ func (c *ClusterYAMLConfig) RemotesAPIConfig() cluster.RemotesAPIConfig {
}
type clusterRemotesAPIYAMLConfig struct {
P int `yaml:"port"`
Port_ int `yaml:"port"`
TLSKey_ string `yaml:"tls_key"`
TLSCert_ string `yaml:"tls_cert"`
TLSCA_ string `yaml:"tls_ca"`
}
func (c clusterRemotesAPIYAMLConfig) Port() int {
return c.P
return c.Port_
}
func (c clusterRemotesAPIYAMLConfig) TLSKey() string {
return c.TLSKey_
}
func (c clusterRemotesAPIYAMLConfig) TLSCert() string {
return c.TLSCert_
}
func (c clusterRemotesAPIYAMLConfig) TLSCA() string {
return c.TLSCA_
}

View File

@@ -34,9 +34,22 @@ import (
var GRPCDialProviderParam = "__DOLT__grpc_dial_provider"
// GRPCDialProvider is an interface for getting a *grpc.ClientConn.
type GRPCRemoteConfig struct {
Endpoint string
DialOptions []grpc.DialOption
HTTPFetcher grpcendpoint.HTTPFetcher
}
// GRPCDialProvider is an interface for getting a concrete Endpoint,
// DialOptions and HTTPFetcher from a slightly more abstract
// grpcendpoint.Config. It allows a caller to override certain aspects of how
// the grpc.ClientConn and the resulting remotestorage ChunkStore are
// configured by dbfactory when it returns remotestorage DBs.
//
// An instance of this must be provided in |params[GRPCDialProviderParam]| when
// calling |CreateDB| with a remotesapi remote. See *env.Remote for example.
type GRPCDialProvider interface {
GetGRPCDialParams(grpcendpoint.Config) (string, []grpc.DialOption, error)
GetGRPCDialParams(grpcendpoint.Config) (GRPCRemoteConfig, error)
}
// DoldRemoteFactory is a DBFactory implementation for creating databases backed by a remote server that implements the
@@ -81,10 +94,13 @@ func (fact DoltRemoteFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFo
return db, vrw, ns, err
}
// If |params[NoCachingParameter]| is set in |params| of the CreateDB call for
// a remotesapi database, then the configured database will have caching at the
// remotestorage.ChunkStore layer disabled.
var NoCachingParameter = "__dolt__NO_CACHING"
func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}, dp GRPCDialProvider) (chunks.ChunkStore, error) {
endpoint, opts, err := dp.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dp.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: urlObj.Host,
Insecure: fact.insecure,
WithEnvCreds: true,
@@ -93,10 +109,10 @@ func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.Noms
return nil, err
}
opts = append(opts, grpc.WithChainUnaryInterceptor(remotestorage.EventsUnaryClientInterceptor(events.GlobalCollector)))
opts := append(cfg.DialOptions, grpc.WithChainUnaryInterceptor(remotestorage.EventsUnaryClientInterceptor(events.GlobalCollector)))
opts = append(opts, grpc.WithChainUnaryInterceptor(remotestorage.RetryingUnaryClientInterceptor))
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, opts...)
if err != nil {
return nil, err
}
@@ -106,6 +122,7 @@ func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.Noms
if err != nil {
return nil, fmt.Errorf("could not access dolt url '%s': %w", urlObj.String(), err)
}
cs = cs.WithHTTPFetcher(cfg.HTTPFetcher)
if _, ok := params[NoCachingParameter]; ok {
cs = cs.WithNoopChunkCache()

View File

@@ -183,7 +183,7 @@ func (fk ForeignKey) ValidateReferencedTableSchema(sch schema.Schema) error {
fk.Name, fk.ReferencedTableName)
}
}
if !sch.Indexes().Contains(fk.ReferencedTableIndex) {
if (fk.ReferencedTableIndex != "" && !sch.Indexes().Contains(fk.ReferencedTableIndex)) || (fk.ReferencedTableIndex == "" && sch.GetPKCols().Size() < len(fk.ReferencedTableColumns)) {
return fmt.Errorf("foreign key `%s` has entered an invalid state, referenced table `%s` is missing the index `%s`",
fk.Name, fk.ReferencedTableName, fk.ReferencedTableIndex)
}
@@ -203,7 +203,7 @@ func (fk ForeignKey) ValidateTableSchema(sch schema.Schema) error {
return fmt.Errorf("foreign key `%s` has entered an invalid state, table `%s` has unexpected schema", fk.Name, fk.TableName)
}
}
if !sch.Indexes().Contains(fk.TableIndex) {
if (fk.TableIndex != "" && !sch.Indexes().Contains(fk.TableIndex)) || (fk.TableIndex == "" && sch.GetPKCols().Size() < len(fk.TableColumns)) {
return fmt.Errorf("foreign key `%s` has entered an invalid state, table `%s` is missing the index `%s`",
fk.Name, fk.TableName, fk.TableIndex)
}

View File

@@ -16,6 +16,7 @@ package actions
import (
"context"
"errors"
"io"
"math"
"strconv"
@@ -61,7 +62,6 @@ func InferColumnTypesFromTableReader(ctx context.Context, rd table.ReadCloser, a
var curr, prev row.Row
i := newInferrer(rd.GetSchema(), args)
OUTER:
for j := 0; true; j++ {
var err error
@@ -130,10 +130,8 @@ func (inf *inferrer) inferColumnTypes() (*schema.ColCollection, error) {
col.TypeInfo = inferredTypes[tag]
col.Tag = schema.ReservedTagMin + tag
col.Constraints = []schema.ColConstraint{schema.NotNullConstraint{}}
if inf.nullable.Contains(tag) {
col.Constraints = []schema.ColConstraint(nil)
}
// for large imports, it is possible to miss all the null values, so we cannot accurately add not null constraint
col.Constraints = []schema.ColConstraint(nil)
cols = append(cols, col)
return false, nil
@@ -218,32 +216,27 @@ func leastPermissiveNumericType(strVal string, floatThreshold float64) (ti typei
return ti
}
if strings.Contains(strVal, "-") {
i, err := strconv.ParseInt(strVal, 10, 64)
if err != nil {
return typeinfo.UnknownType
}
if i >= math.MinInt32 && i <= math.MaxInt32 {
return typeinfo.Int32Type
} else {
return typeinfo.Int64Type
}
// always parse as signed int
i, err := strconv.ParseInt(strVal, 10, 64)
// use string for out of range
if errors.Is(err, strconv.ErrRange) {
return typeinfo.StringDefaultType
}
if err != nil {
return typeinfo.UnknownType
}
// handle leading zero case
if len(strVal) > 1 && strVal[0] == '0' {
return typeinfo.StringDefaultType
}
if i >= math.MinInt32 && i <= math.MaxInt32 {
return typeinfo.Int32Type
} else {
ui, err := strconv.ParseUint(strVal, 10, 64)
if err != nil {
return typeinfo.UnknownType
}
// handle leading zero case
if len(strVal) > 1 && strVal[0] == '0' {
return typeinfo.StringDefaultType
}
if ui <= math.MaxUint32 {
return typeinfo.Uint32Type
} else {
return typeinfo.Uint64Type
}
return typeinfo.Int64Type
}
}
@@ -286,14 +279,13 @@ func chronoTypes() []typeinfo.TypeInfo {
func numericTypes() []typeinfo.TypeInfo {
// prefer:
// ints over floats
// unsigned over signed
// smaller over larger
return []typeinfo.TypeInfo{
//typeinfo.Uint8Type,
//typeinfo.Uint16Type,
//typeinfo.Uint24Type,
typeinfo.Uint32Type,
typeinfo.Uint64Type,
//typeinfo.Uint32Type,
//typeinfo.Uint64Type,
//typeinfo.Int8Type,
//typeinfo.Int16Type,
@@ -398,12 +390,6 @@ func findCommonNumericType(nums typeInfoSet) typeinfo.TypeInfo {
typeinfo.Int24Type,
typeinfo.Int16Type,
typeinfo.Int8Type,
typeinfo.Uint64Type,
typeinfo.Uint32Type,
typeinfo.Uint24Type,
typeinfo.Uint16Type,
typeinfo.Uint8Type,
}
for _, numType := range mostToLeast {
if setHasType(nums, numType) {

View File

@@ -49,14 +49,14 @@ func TestLeastPermissiveType(t *testing.T) {
{"lower bool", "true", 0.0, typeinfo.BoolType},
{"upper bool", "FALSE", 0.0, typeinfo.BoolType},
{"yes", "yes", 0.0, typeinfo.StringDefaultType},
{"one", "1", 0.0, typeinfo.Uint32Type},
{"one", "1", 0.0, typeinfo.Int32Type},
{"negative one", "-1", 0.0, typeinfo.Int32Type},
{"negative one point 0", "-1.0", 0.0, typeinfo.Float32Type},
{"negative one point 0 with FT of 0.1", "-1.0", 0.1, typeinfo.Int32Type},
{"negative one point one with FT of 0.1", "-1.1", 0.1, typeinfo.Float32Type},
{"negative one point 999 with FT of 1.0", "-1.999", 1.0, typeinfo.Int32Type},
{"zero point zero zero zero zero", "0.0000", 0.0, typeinfo.Float32Type},
{"max int", strconv.FormatUint(math.MaxInt64, 10), 0.0, typeinfo.Uint64Type},
{"max int", strconv.FormatUint(math.MaxInt64, 10), 0.0, typeinfo.Int64Type},
{"bigger than max int", strconv.FormatUint(math.MaxUint64, 10) + "0", 0.0, typeinfo.StringDefaultType},
}
@@ -75,7 +75,7 @@ func TestLeastPermissiveNumericType(t *testing.T) {
floatThreshold float64
expType typeinfo.TypeInfo
}{
{"zero", "0", 0.0, typeinfo.Uint32Type},
{"zero", "0", 0.0, typeinfo.Int32Type},
{"zero float", "0.0", 0.0, typeinfo.Float32Type},
{"zero float with floatThreshold of 0.1", "0.0", 0.1, typeinfo.Int32Type},
{"negative float", "-1.3451234", 0.0, typeinfo.Float32Type},
@@ -85,8 +85,8 @@ func TestLeastPermissiveNumericType(t *testing.T) {
{"all zeroes", "0000", 0.0, typeinfo.StringDefaultType},
{"leading zeroes", "01", 0.0, typeinfo.StringDefaultType},
{"negative int", "-1234", 0.0, typeinfo.Int32Type},
{"fits in uint64 but not int64", strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.Uint64Type},
{"negative less than math.MinInt64", "-" + strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.UnknownType},
{"fits in uint64 but not int64", strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.StringDefaultType},
{"negative less than math.MinInt64", "-" + strconv.FormatUint(math.MaxUint64, 10), 0.0, typeinfo.StringDefaultType},
{"math.MinInt64", strconv.FormatInt(math.MinInt64, 10), 0.0, typeinfo.Int64Type},
}
@@ -142,14 +142,6 @@ func testFindCommonType(t *testing.T) {
},
expType: typeinfo.Int64Type,
},
{
name: "all unsigned ints",
inferSet: typeInfoSet{
typeinfo.Uint32Type: {},
typeinfo.Uint64Type: {},
},
expType: typeinfo.Uint64Type,
},
{
name: "all floats",
inferSet: typeInfoSet{
@@ -159,35 +151,31 @@ func testFindCommonType(t *testing.T) {
expType: typeinfo.Float64Type,
},
{
name: "32 bit ints and uints",
name: "32 bit ints",
inferSet: typeInfoSet{
typeinfo.Int32Type: {},
typeinfo.Uint32Type: {},
typeinfo.Int32Type: {},
},
expType: typeinfo.Int32Type,
},
{
name: "64 bit ints and uints",
name: "64 bit ints",
inferSet: typeInfoSet{
typeinfo.Int64Type: {},
typeinfo.Uint64Type: {},
typeinfo.Int64Type: {},
},
expType: typeinfo.Int64Type,
},
{
name: "32 bit ints, uints, and floats",
name: "32 bit ints and floats",
inferSet: typeInfoSet{
typeinfo.Int32Type: {},
typeinfo.Uint32Type: {},
typeinfo.Float32Type: {},
},
expType: typeinfo.Float32Type,
},
{
name: "64 bit ints, uints, and floats",
name: "64 bit ints and floats",
inferSet: typeInfoSet{
typeinfo.Int64Type: {},
typeinfo.Uint64Type: {},
typeinfo.Float64Type: {},
},
expType: typeinfo.Float64Type,
@@ -228,11 +216,6 @@ func testFindCommonType(t *testing.T) {
func testFindCommonTypeFromSingleType(t *testing.T) {
allTypes := []typeinfo.TypeInfo{
typeinfo.Uint8Type,
typeinfo.Uint16Type,
typeinfo.Uint24Type,
typeinfo.Uint32Type,
typeinfo.Uint64Type,
typeinfo.Int8Type,
typeinfo.Int16Type,
typeinfo.Int24Type,
@@ -388,7 +371,7 @@ func TestInferSchema(t *testing.T) {
},
map[string]typeinfo.TypeInfo{
"int": typeinfo.Int32Type,
"uint": typeinfo.Uint64Type,
"uint": typeinfo.StringDefaultType,
"uuid": typeinfo.UuidType,
"float": typeinfo.Float32Type,
"bool": typeinfo.BoolType,
@@ -404,7 +387,7 @@ func TestInferSchema(t *testing.T) {
floatThreshold: 0,
},
map[string]typeinfo.TypeInfo{
"mix": typeinfo.Uint64Type,
"mix": typeinfo.StringDefaultType,
"uuid": typeinfo.UuidType,
},
nil,
@@ -500,7 +483,7 @@ func TestInferSchema(t *testing.T) {
err = allCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
idx := schema.IndexOfConstraint(col.Constraints, schema.NotNullConstraintType)
assert.True(t, idx == -1 == test.nullableCols.Contains(col.Name), "%s unexpected nullability", col.Name)
assert.True(t, idx == -1, "%s unexpected not null constraint", col.Name)
return false, nil
})
require.NoError(t, err)

View File

@@ -25,7 +25,6 @@ import (
"time"
ps "github.com/mitchellh/go-ps"
"google.golang.org/grpc"
goerrors "gopkg.in/src-d/go-errors.v1"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
@@ -831,7 +830,7 @@ func (dEnv *DoltEnv) UserRPCCreds() (creds.DoltCreds, bool, error) {
}
// GetGRPCDialParams implements dbfactory.GRPCDialProvider
func (dEnv *DoltEnv) GetGRPCDialParams(config grpcendpoint.Config) (string, []grpc.DialOption, error) {
func (dEnv *DoltEnv) GetGRPCDialParams(config grpcendpoint.Config) (dbfactory.GRPCRemoteConfig, error) {
return NewGRPCDialProviderFromDoltEnv(dEnv).GetGRPCDialParams(config)
}

View File

@@ -16,6 +16,7 @@ package env
import (
"crypto/tls"
"net/http"
"runtime"
"strings"
"unicode"
@@ -50,7 +51,7 @@ func NewGRPCDialProviderFromDoltEnv(dEnv *DoltEnv) *GRPCDialProvider {
}
// GetGRPCDialParms implements dbfactory.GRPCDialProvider
func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string, []grpc.DialOption, error) {
func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfactory.GRPCRemoteConfig, error) {
endpoint := config.Endpoint
if strings.IndexRune(endpoint, ':') == -1 {
if config.Insecure {
@@ -60,8 +61,20 @@ func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string,
}
}
var httpfetcher grpcendpoint.HTTPFetcher = http.DefaultClient
var opts []grpc.DialOption
if config.Insecure {
if config.TLSConfig != nil {
tc := credentials.NewTLS(config.TLSConfig)
opts = append(opts, grpc.WithTransportCredentials(tc))
httpfetcher = &http.Client{
Transport: &http.Transport{
TLSClientConfig: config.TLSConfig,
ForceAttemptHTTP2: true,
},
}
} else if config.Insecure {
opts = append(opts, grpc.WithInsecure())
} else {
tc := credentials.NewTLS(&tls.Config{})
@@ -76,14 +89,17 @@ func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string,
} else if config.WithEnvCreds {
rpcCreds, err := p.getRPCCreds()
if err != nil {
return "", nil, err
return dbfactory.GRPCRemoteConfig{}, err
}
if rpcCreds != nil {
opts = append(opts, grpc.WithPerRPCCredentials(rpcCreds))
}
}
return endpoint, opts, nil
return dbfactory.GRPCRemoteConfig{
Endpoint: endpoint,
DialOptions: opts,
HTTPFetcher: httpfetcher,
}, nil
}
// getRPCCreds returns any RPC credentials available to this dial provider. If a DoltEnv has been configured

View File

@@ -15,6 +15,9 @@
package grpcendpoint
import (
"crypto/tls"
"net/http"
"google.golang.org/grpc/credentials"
)
@@ -23,4 +26,12 @@ type Config struct {
Insecure bool
Creds credentials.PerRPCCredentials
WithEnvCreds bool
// If non-nil, this is used for transport level security in the dial
// options, instead of a default option based on `Insecure`.
TLSConfig *tls.Config
}
type HTTPFetcher interface {
Do(req *http.Request) (*http.Response, error)
}

View File

@@ -244,7 +244,7 @@ func nomsParentFkConstraintViolations(
continue
}
postParentIndexPartialKey, err := row.ReduceToIndexPartialKey(postParent.Index, postParentRow)
postParentIndexPartialKey, err := row.ReduceToIndexPartialKey(foreignKey.TableColumns, postParent.Index, postParentRow)
if err != nil {
return nil, false, err
}
@@ -362,8 +362,14 @@ func nomsChildFkConstraintViolations(
preChildRowData types.Map,
) (*doltdb.Table, bool, error) {
foundViolations := false
postParentIndexTags := postParent.Index.IndexedColumnTags()
postChildIndexTags := postChild.Index.IndexedColumnTags()
var postParentIndexTags, postChildIndexTags []uint64
if postParent.Index.Name() == "" {
postParentIndexTags = foreignKey.ReferencedTableColumns
postChildIndexTags = foreignKey.TableColumns
} else {
postParentIndexTags = postParent.Index.IndexedColumnTags()
postChildIndexTags = postChild.Index.IndexedColumnTags()
}
postChildCVMap, err := postChild.Table.GetConstraintViolations(ctx)
if err != nil {
return nil, false, err
@@ -411,7 +417,7 @@ func nomsChildFkConstraintViolations(
continue
}
postChildIndexPartialKey, err := row.ReduceToIndexPartialKey(postChild.Index, postChildRow)
postChildIndexPartialKey, err := row.ReduceToIndexPartialKey(postChildIndexTags, postChild.Index, postChildRow)
if err != nil {
return nil, false, err
}
@@ -496,6 +502,28 @@ func newConstraintViolationsLoadedTable(ctx context.Context, tblName, idxName st
if err != nil {
return nil, false, err
}
// Create Primary Key Index
if idxName == "" {
pkCols := sch.GetPKCols()
pkIdxColl := schema.NewIndexCollection(pkCols, pkCols)
pkIdxProps := schema.IndexProperties{
IsUnique: true,
IsUserDefined: false,
Comment: "",
}
pkIdx := schema.NewIndex("", pkCols.SortedTags, pkCols.SortedTags, pkIdxColl, pkIdxProps)
return &constraintViolationsLoadedTable{
TableName: trueTblName,
Table: tbl,
Schema: sch,
RowData: rowData,
Index: pkIdx,
IndexSchema: pkIdx.Schema(),
IndexData: rowData,
}, true, nil
}
idx, ok := sch.Indexes().GetByNameCaseInsensitive(idxName)
if !ok {
return &constraintViolationsLoadedTable{

View File

@@ -64,7 +64,7 @@ func prollyParentFkConstraintViolations(
err = prolly.DiffMaps(ctx, preParentRowData, postParentRowData, func(ctx context.Context, diff tree.Diff) error {
switch diff.Type {
case tree.RemovedDiff, tree.ModifiedDiff:
partialKey, hadNulls := makePartialKey(partialKB, postParent.Index, postParent.Schema, val.Tuple(diff.Key), val.Tuple(diff.From), preParentRowData.Pool())
partialKey, hadNulls := makePartialKey(partialKB, foreignKey.ReferencedTableColumns, postParent.Index, postParent.Schema, val.Tuple(diff.Key), val.Tuple(diff.From), preParentRowData.Pool())
if hadNulls {
// row had some nulls previously, so it couldn't have been a parent
return nil
@@ -147,7 +147,7 @@ func prollyChildFkConstraintViolations(
switch diff.Type {
case tree.AddedDiff, tree.ModifiedDiff:
k, v := val.Tuple(diff.Key), val.Tuple(diff.To)
partialKey, hasNulls := makePartialKey(partialKB, postChild.Index, postChild.Schema, k, v, preChildRowData.Pool())
partialKey, hasNulls := makePartialKey(partialKB, foreignKey.TableColumns, postChild.Index, postChild.Schema, k, v, preChildRowData.Pool())
if hasNulls {
return nil
}
@@ -289,8 +289,11 @@ func createCVsForPartialKeyMatches(
return createdViolation, nil
}
func makePartialKey(kb *val.TupleBuilder, idxSch schema.Index, tblSch schema.Schema, k, v val.Tuple, pool pool.BuffPool) (val.Tuple, bool) {
for i, tag := range idxSch.IndexedColumnTags() {
func makePartialKey(kb *val.TupleBuilder, tags []uint64, idxSch schema.Index, tblSch schema.Schema, k, v val.Tuple, pool pool.BuffPool) (val.Tuple, bool) {
if idxSch.Name() != "" {
tags = idxSch.IndexedColumnTags()
}
for i, tag := range tags {
if j, ok := tblSch.GetPKCols().TagToIdx[tag]; ok {
if k.FieldIsNull(j) {
return nil, true

View File

@@ -27,6 +27,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/utils/set"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
@@ -277,6 +278,16 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root
return nil, err
}
removedTables, err := getRemovedTableNames(ctx, oldParent, oldRoot)
if err != nil {
return nil, err
}
migrated, err = migrated.RemoveTables(ctx, true, false, removedTables...)
if err != nil {
return nil, err
}
err = oldRoot.IterTables(ctx, func(name string, oldTbl *doltdb.Table, sch schema.Schema) (bool, error) {
ok, err := oldTbl.HasConflicts(ctx)
if err != nil {
@@ -345,6 +356,21 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root
return migrated, nil
}
// renames also get returned here
func getRemovedTableNames(ctx context.Context, prev, curr *doltdb.RootValue) ([]string, error) {
prevNames, err := prev.GetTableNames(ctx)
if err != nil {
return nil, err
}
tblNameSet := set.NewStrSet(prevNames)
currNames, err := curr.GetTableNames(ctx)
if err != nil {
return nil, err
}
tblNameSet.Remove(currNames...)
return tblNameSet.AsSlice(), nil
}
func migrateTable(ctx context.Context, newSch schema.Schema, oldParentTbl, oldTbl, newParentTbl *doltdb.Table) (*doltdb.Table, error) {
idx, err := oldParentTbl.GetRowData(ctx)
if err != nil {

View File

@@ -42,21 +42,24 @@ import (
var ErrUnimplemented = errors.New("unimplemented")
type RemoteChunkStore struct {
HttpHost string
csCache DBCache
bucket string
fs filesys.Filesys
lgr *logrus.Entry
sealer Sealer
HttpHost string
httpScheme string
csCache DBCache
bucket string
fs filesys.Filesys
lgr *logrus.Entry
sealer Sealer
remotesapi.UnimplementedChunkStoreServiceServer
}
func NewHttpFSBackedChunkStore(lgr *logrus.Entry, httpHost string, csCache DBCache, fs filesys.Filesys, sealer Sealer) *RemoteChunkStore {
func NewHttpFSBackedChunkStore(lgr *logrus.Entry, httpHost string, csCache DBCache, fs filesys.Filesys, scheme string, sealer Sealer) *RemoteChunkStore {
return &RemoteChunkStore{
HttpHost: httpHost,
csCache: csCache,
bucket: "",
fs: fs,
HttpHost: httpHost,
httpScheme: scheme,
csCache: csCache,
bucket: "",
fs: fs,
lgr: lgr.WithFields(logrus.Fields{
"service": "dolt.services.remotesapi.v1alpha1.ChunkStoreServiceServer",
}),
@@ -286,7 +289,7 @@ func (rs *RemoteChunkStore) getHost(md metadata.MD) string {
func (rs *RemoteChunkStore) getDownloadUrl(logger *logrus.Entry, md metadata.MD, path string) (*url.URL, error) {
host := rs.getHost(md)
return &url.URL{
Scheme: "http",
Scheme: rs.httpScheme,
Host: host,
Path: path,
}, nil
@@ -359,7 +362,7 @@ func (rs *RemoteChunkStore) getUploadUrl(logger *logrus.Entry, md metadata.MD, r
params.Add("content_length", strconv.Itoa(int(tfd.ContentLength)))
params.Add("content_hash", base64.RawURLEncoding.EncodeToString(tfd.ContentHash))
return &url.URL{
Scheme: "http",
Scheme: rs.httpScheme,
Host: rs.getHost(md),
Path: fmt.Sprintf("%s/%s", repoPath, fileID),
RawQuery: params.Encode(),

View File

@@ -16,6 +16,7 @@ package remotesrv
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
@@ -39,6 +40,8 @@ type Server struct {
grpcSrv *grpc.Server
httpPort int
httpSrv http.Server
tlsConfig *tls.Config
}
func (s *Server) GracefulStop() {
@@ -55,6 +58,11 @@ type ServerArgs struct {
DBCache DBCache
ReadOnly bool
Options []grpc.ServerOption
// If supplied, the listener(s) returned from Listeners() will be TLS
// listeners. The scheme used in the URLs returned from the gRPC server
// will be https.
TLSConfig *tls.Config
}
func NewServer(args ServerArgs) (*Server, error) {
@@ -70,10 +78,16 @@ func NewServer(args ServerArgs) (*Server, error) {
return nil, err
}
scheme := "http"
if args.TLSConfig != nil {
scheme = "https"
}
s.tlsConfig = args.TLSConfig
s.wg.Add(2)
s.grpcPort = args.GrpcPort
s.grpcSrv = grpc.NewServer(append([]grpc.ServerOption{grpc.MaxRecvMsgSize(128 * 1024 * 1024)}, args.Options...)...)
var chnkSt remotesapi.ChunkStoreServiceServer = NewHttpFSBackedChunkStore(args.Logger, args.HttpHost, args.DBCache, args.FS, sealer)
var chnkSt remotesapi.ChunkStoreServiceServer = NewHttpFSBackedChunkStore(args.Logger, args.HttpHost, args.DBCache, args.FS, scheme, sealer)
if args.ReadOnly {
chnkSt = ReadOnlyChunkStore{chnkSt}
}
@@ -113,14 +127,25 @@ type Listeners struct {
}
func (s *Server) Listeners() (Listeners, error) {
httpListener, err := net.Listen("tcp", fmt.Sprintf(":%d", s.httpPort))
var httpListener net.Listener
var grpcListener net.Listener
var err error
if s.tlsConfig != nil {
httpListener, err = tls.Listen("tcp", fmt.Sprintf(":%d", s.httpPort), s.tlsConfig)
} else {
httpListener, err = net.Listen("tcp", fmt.Sprintf(":%d", s.httpPort))
}
if err != nil {
return Listeners{}, err
}
if s.httpPort == s.grpcPort {
return Listeners{http: httpListener}, nil
}
grpcListener, err := net.Listen("tcp", fmt.Sprintf(":%d", s.grpcPort))
if s.tlsConfig != nil {
grpcListener, err = tls.Listen("tcp", fmt.Sprintf(":%d", s.grpcPort), s.tlsConfig)
} else {
grpcListener, err = net.Listen("tcp", fmt.Sprintf(":%d", s.grpcPort))
}
if err != nil {
httpListener.Close()
return Listeners{}, err

View File

@@ -163,15 +163,19 @@ func ReduceToIndexKeysFromTagMap(nbf *types.NomsBinFormat, idx schema.Index, tag
}
// ReduceToIndexPartialKey creates an index record from a primary storage record.
func ReduceToIndexPartialKey(idx schema.Index, r Row) (types.Tuple, error) {
func ReduceToIndexPartialKey(tags []uint64, idx schema.Index, r Row) (types.Tuple, error) {
var vals []types.Value
for _, tag := range idx.IndexedColumnTags() {
if idx.Name() != "" {
tags = idx.IndexedColumnTags()
}
for _, tag := range tags {
val, ok := r.GetColVal(tag)
if !ok {
val = types.NullValue
}
vals = append(vals, types.Uint(tag), val)
}
return types.NewTuple(r.Format(), vals...)
}

View File

@@ -68,12 +68,17 @@ type indexImpl struct {
comment string
}
func NewIndex(name string, tags, allTags []uint64, indexColl *indexCollectionImpl, props IndexProperties) Index {
func NewIndex(name string, tags, allTags []uint64, indexColl IndexCollection, props IndexProperties) Index {
var indexCollImpl *indexCollectionImpl
if indexColl != nil {
indexCollImpl = indexColl.(*indexCollectionImpl)
}
return &indexImpl{
name: name,
tags: tags,
allTags: allTags,
indexColl: indexColl,
indexColl: indexCollImpl,
isUnique: props.IsUnique,
isUserDefined: props.IsUserDefined,
comment: props.Comment,

View File

@@ -462,7 +462,9 @@ func TestDropPks(t *testing.T) {
fk, ok := foreignKeyCollection.GetByNameCaseInsensitive(childFkName)
assert.True(t, ok)
assert.Equal(t, childName, fk.TableName)
assert.Equal(t, tt.fkIdxName, fk.ReferencedTableIndex)
if tt.fkIdxName != "" && fk.ReferencedTableIndex != "" {
assert.Equal(t, tt.fkIdxName, fk.ReferencedTableIndex)
}
parent, ok, err := root.GetTable(ctx, parentName)
assert.NoError(t, err)

View File

@@ -23,6 +23,9 @@ type Config interface {
type RemotesAPIConfig interface {
Port() int
TLSKey() string
TLSCert() string
TLSCA() string
}
type StandbyRemoteConfig interface {

View File

@@ -193,7 +193,7 @@ func (c *Controller) applyCommitHooks(ctx context.Context, name string, bt *sql.
}
func (c *Controller) gRPCDialProvider(denv *env.DoltEnv) dbfactory.GRPCDialProvider {
return grpcDialProvider{env.NewGRPCDialProviderFromDoltEnv(denv), &c.cinterceptor}
return grpcDialProvider{env.NewGRPCDialProviderFromDoltEnv(denv), &c.cinterceptor, c.cfg.RemotesAPIConfig().TLSCA()}
}
func (c *Controller) RegisterStoredProcedures(store procedurestore) {

View File

@@ -15,6 +15,10 @@
package cluster
import (
"crypto/tls"
"crypto/x509"
"errors"
"io/ioutil"
"time"
"google.golang.org/grpc"
@@ -30,18 +34,24 @@ import (
// - client interceptors for transmitting our replication role.
// - do not use environment credentials. (for now).
type grpcDialProvider struct {
orig dbfactory.GRPCDialProvider
ci *clientinterceptor
orig dbfactory.GRPCDialProvider
ci *clientinterceptor
caPath string
}
func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string, []grpc.DialOption, error) {
config.WithEnvCreds = false
endpoint, opts, err := p.orig.GetGRPCDialParams(config)
func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfactory.GRPCRemoteConfig, error) {
tlsConfig, err := p.tlsConfig()
if err != nil {
return "", nil, err
return dbfactory.GRPCRemoteConfig{}, err
}
opts = append(opts, p.ci.Options()...)
opts = append(opts, grpc.WithConnectParams(grpc.ConnectParams{
config.TLSConfig = tlsConfig
config.WithEnvCreds = false
cfg, err := p.orig.GetGRPCDialParams(config)
if err != nil {
return dbfactory.GRPCRemoteConfig{}, err
}
cfg.DialOptions = append(cfg.DialOptions, p.ci.Options()...)
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoff.Config{
BaseDelay: 250 * time.Millisecond,
Multiplier: 1.6,
@@ -50,5 +60,77 @@ func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string,
},
MinConnectTimeout: 250 * time.Millisecond,
}))
return endpoint, opts, nil
return cfg, nil
}
// Within a cluster, if remotesapi is configured with a tls_ca, we take the
// following semantics:
// * The configured tls_ca file holds a set of PEM encoded x509 certificates,
// all of which are trusted roots for the outbound connections the
// remotestorage client establishes.
// * The certificate chain presented by the server must validate to a root
// which was present in tls_ca. In particular, every certificate in the chain
// must be within its validity window, the signatures must be valid, key usage
// and isCa must be correctly set for the roots and the intermediates, and the
// leaf must have extended key usage server auth.
// * On the other hand, no verification is done against the SAN or the Subject
// of the certificate.
//
// We use these TLS semantics for both connections to the gRPC endpoint which
// is the actual remotesapi, and for connections to any HTTPS endpoints to
// which the gRPC service returns URLs. For now, this works perfectly for our
// use case, but it's tightly coupled to `cluster:` deployment topologies and
// the likes.
//
// If tls_ca is not set then default TLS handling is performed. In particular,
// if the remotesapi endpoints is HTTPS, then the system roots are used and
// ServerName is verified against the presented URL SANs of the certificates.
func (p grpcDialProvider) tlsConfig() (*tls.Config, error) {
if p.caPath == "" {
return nil, nil
}
pem, err := ioutil.ReadFile(p.caPath)
if err != nil {
return nil, err
}
roots := x509.NewCertPool()
if ok := roots.AppendCertsFromPEM(pem); !ok {
return nil, errors.New("error loading ca roots from " + p.caPath)
}
verifyFunc := func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
certs := make([]*x509.Certificate, len(rawCerts))
var err error
for i, asn1Data := range rawCerts {
certs[i], err = x509.ParseCertificate(asn1Data)
if err != nil {
return err
}
}
keyUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
opts := x509.VerifyOptions{
Roots: roots,
CurrentTime: time.Now(),
Intermediates: x509.NewCertPool(),
KeyUsages: keyUsages,
}
for _, cert := range certs[1:] {
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
if err != nil {
return err
}
return nil
}
return &tls.Config{
// We have to InsecureSkipVerify because ServerName is always
// set by the grpc dial provider and golang tls.Config does not
// have good support for performing certificate validation
// without server name validation.
InsecureSkipVerify: true,
VerifyPeerCertificate: verifyFunc,
NextProtos: []string{"h2"},
}, nil
}

View File

@@ -1482,7 +1482,7 @@ var DoltConstraintViolationTransactionTests = []queries.TransactionTest{
},
{
Query: "/* client b */ INSERT INTO child VALUES (1, 1);",
ExpectedErrStr: "cannot add or update a child row - Foreign key violation on fk: `nk01br56`, table: `child`, referenced table: `parent`, key: `[1]`",
ExpectedErrStr: "cannot add or update a child row - Foreign key violation on fk: `0050p5ek`, table: `child`, referenced table: `parent`, key: `[1]`",
},
},
},

View File

@@ -1971,76 +1971,30 @@ func (t *AlterableDoltTable) AddForeignKey(ctx *sql.Context, sqlFk sql.ForeignKe
refColTags[i] = refCol.Tag
}
var tableIndexName, refTableIndexName string
tableIndex, ok, err := findIndexWithPrefix(t.sch, sqlFk.Columns)
if err != nil {
return err
}
if !ok {
// The engine matched on a primary key, and Dolt does not yet support using the primary key within the
// schema.Index interface (which is used internally to represent indexes across the codebase). In the
// meantime, we must generate a duplicate key over the primary key.
//TODO: use the primary key as-is
idxReturn, err := creation.CreateIndex(ctx, tbl, "", sqlFk.Columns, false, false, "", editor.Options{
ForeignKeyChecksDisabled: true,
Deaf: t.opts.Deaf,
Tempdir: t.opts.Tempdir,
})
if err != nil {
return err
}
tableIndex = idxReturn.NewIndex
tbl = idxReturn.NewTable
root, err = root.PutTable(ctx, t.tableName, idxReturn.NewTable)
if sqlFk.IsSelfReferential() {
refTbl = idxReturn.NewTable
}
// Use secondary index if found; otherwise it will use empty string, indicating primary key
if ok {
tableIndexName = tableIndex.Name()
}
refTableIndex, ok, err := findIndexWithPrefix(refSch, sqlFk.ParentColumns)
if err != nil {
return err
}
if !ok {
// The engine matched on a primary key, and Dolt does not yet support using the primary key within the
// schema.Index interface (which is used internally to represent indexes across the codebase). In the
// meantime, we must generate a duplicate key over the primary key.
//TODO: use the primary key as-is
var refPkTags []uint64
for _, i := range refSch.GetPkOrdinals() {
refPkTags = append(refPkTags, refSch.GetAllCols().GetByIndex(i).Tag)
}
var colNames []string
for _, t := range refColTags {
c, _ := refSch.GetAllCols().GetByTag(t)
colNames = append(colNames, c.Name)
}
// Our duplicate index is only unique if it's the entire primary key (which is by definition unique)
unique := len(refPkTags) == len(refColTags)
idxReturn, err := creation.CreateIndex(ctx, refTbl, "", colNames, unique, false, "", editor.Options{
ForeignKeyChecksDisabled: true,
Deaf: t.opts.Deaf,
Tempdir: t.opts.Tempdir,
})
if err != nil {
return err
}
refTbl = idxReturn.NewTable
refTableIndex = idxReturn.NewIndex
root, err = root.PutTable(ctx, sqlFk.ParentTable, idxReturn.NewTable)
if err != nil {
return err
}
// Use secondary index if found; otherwise it will use empty string, indicating primary key
if ok {
refTableIndexName = refTableIndex.Name()
}
doltFk = doltdb.ForeignKey{
Name: sqlFk.Name,
TableName: sqlFk.Table,
TableIndex: tableIndex.Name(),
TableIndex: tableIndexName,
TableColumns: colTags,
ReferencedTableName: sqlFk.ParentTable,
ReferencedTableIndex: refTableIndex.Name(),
ReferencedTableIndex: refTableIndexName,
ReferencedTableColumns: refColTags,
OnUpdate: onUpdateRefAction,
OnDelete: onDeleteRefAction,

View File

@@ -105,6 +105,44 @@ func (tea *BulkImportTEA) Get(ctx context.Context, keyHash hash.Hash, key types.
return &doltKVP{k: key, v: v}, true, nil
}
func (tea *BulkImportTEA) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) {
var err error
var matches []hashedTuple
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
defer mapIter.Close(ctx)
var r row.Row
for r, err = mapIter.ReadRow(ctx); err == nil; r, err = mapIter.ReadRow(ctx) {
tplKeyVal, err := r.NomsMapKey(idxSch).Value(ctx)
if err != nil {
return nil, err
}
key := tplKeyVal.(types.Tuple)
tplValVal, err := r.NomsMapValue(idxSch).Value(ctx)
if err != nil {
return nil, err
}
val := tplValVal.(types.Tuple)
keyHash, err := key.Hash(key.Format())
if err != nil {
return nil, err
}
matches = append(matches, hashedTuple{key, val, keyHash})
}
if err != io.EOF {
return nil, err
}
for i := len(matches) - 1; i >= 0; i-- {
if _, ok := tea.deletes[matches[i].hash]; ok {
matches[i] = matches[len(matches)-1]
matches = matches[:len(matches)-1]
}
}
return matches, nil
}
// Commit is the default behavior and does nothing
func (tea *BulkImportTEA) Commit(ctx context.Context, nbf *types.NomsBinFormat) error {
return nil

View File

@@ -280,18 +280,36 @@ func (te *pkTableEditor) GetIndexedRows(ctx context.Context, key types.Tuple, in
if err != nil {
return nil, err
}
kvp, ok, err := te.tea.Get(ctx, keyHash, key)
pkKeys, err := te.tea.HasPartial(ctx, te.tSch, keyHash, key)
if err != nil {
return nil, err
}
if !ok {
if len(pkKeys) == 0 {
return nil, nil
}
dRow, err := row.FromNoms(te.Schema(), kvp.k, kvp.v)
if err != nil {
return nil, err
rows := make([]row.Row, len(pkKeys))
for i, pkKey := range pkKeys {
pkKeyHash, err := pkKey.key.Hash(pkKey.key.Format())
if err != nil {
return nil, err
}
kvp, ok, err := te.tea.Get(ctx, pkKeyHash, pkKey.key)
if err != nil {
return nil, err
}
if !ok {
return nil, nil
}
dRow, err := row.FromNoms(te.Schema(), kvp.k, kvp.v)
if err != nil {
return nil, err
}
rows[i] = dRow
}
return []row.Row{dRow}, nil
return rows, nil
}
return nil, fmt.Errorf("an index editor for `%s` could not be found on table `%s`", indexName, te.name)

View File

@@ -16,7 +16,12 @@ package editor
import (
"context"
"io"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
"github.com/dolthub/dolt/go/libraries/utils/set"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
@@ -43,6 +48,9 @@ type TableEditAccumulator interface {
// This assumes that the given hash is for the given key.
Get(ctx context.Context, keyHash hash.Hash, key types.Tuple) (*doltKVP, bool, error)
// HasPartial returns true if the current TableEditAccumulator contains the given partialKey
HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error)
// Commit applies the in memory edits to the list of committed in memory edits
Commit(ctx context.Context, nbf *types.NomsBinFormat) error
@@ -174,6 +182,51 @@ func (tea *tableEditAccumulatorImpl) Get(ctx context.Context, keyHash hash.Hash,
return &doltKVP{k: key, v: v}, true, err
}
func (tea *tableEditAccumulatorImpl) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) {
var err error
var matches []hashedTuple
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
defer mapIter.Close(ctx)
var r row.Row
for r, err = mapIter.ReadRow(ctx); err == nil; r, err = mapIter.ReadRow(ctx) {
tplKeyVal, err := r.NomsMapKey(idxSch).Value(ctx)
if err != nil {
return nil, err
}
key := tplKeyVal.(types.Tuple)
tplValVal, err := r.NomsMapValue(idxSch).Value(ctx)
if err != nil {
return nil, err
}
val := tplValVal.(types.Tuple)
keyHash, err := key.Hash(key.Format())
if err != nil {
return nil, err
}
matches = append(matches, hashedTuple{key, val, keyHash})
}
if err != io.EOF {
return nil, err
}
orderedMods := []*inMemModifications{tea.committed, tea.uncommitted}
for _, mods := range orderedMods {
for i := len(matches) - 1; i >= 0; i-- {
if _, ok := mods.adds[matches[i].hash]; ok {
matches[i] = matches[len(matches)-1]
matches = matches[:len(matches)-1]
}
}
if added, ok := mods.adds[partialKeyHash]; ok {
matches = append(matches, hashedTuple{key: added.k, value: added.v})
}
}
return matches, nil
}
func (tea *tableEditAccumulatorImpl) flushUncommitted() {
// if we are not already actively writing edits to the uncommittedEA then change the state and push all in mem edits
// to a types.EditAccumulator

View File

@@ -194,7 +194,7 @@ func (suite *BlockStoreSuite) TestChunkStorePutMoreThanMemTable() {
if suite.putCountFn != nil {
suite.Equal(2, suite.putCountFn())
}
specs, err := suite.store.tables.ToSpecs()
specs, err := suite.store.tables.toSpecs()
suite.NoError(err)
suite.Len(specs, 2)
}
@@ -415,22 +415,15 @@ func (suite *BlockStoreSuite) TestChunkStorePutWithRebase() {
func TestBlockStoreConjoinOnCommit(t *testing.T) {
stats := &Stats{}
assertContainAll := func(t *testing.T, store chunks.ChunkStore, srcs ...chunkSource) {
rdrs := make(chunkReaderGroup, len(srcs))
for i, src := range srcs {
c, err := src.Clone()
assertContainAll := func(t *testing.T, store chunks.ChunkStore, sources ...chunkSource) {
ctx := context.Background()
for _, src := range sources {
err := extractAllChunks(ctx, src, func(rec extractRecord) {
ok, err := store.Has(context.Background(), hash.Hash(rec.a))
require.NoError(t, err)
assert.True(t, ok)
})
require.NoError(t, err)
rdrs[i] = c
}
chunkChan := make(chan extractRecord, mustUint32(rdrs.count()))
err := rdrs.extract(context.Background(), chunkChan)
require.NoError(t, err)
close(chunkChan)
for rec := range chunkChan {
ok, err := store.Has(context.Background(), hash.Hash(rec.a))
require.NoError(t, err)
assert.True(t, ok)
}
}
@@ -509,7 +502,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) {
assert.True(t, ok)
assertContainAll(t, smallTableStore, srcs...)
for _, src := range srcs {
err := src.Close()
err := src.close()
require.NoError(t, err)
}
})
@@ -546,7 +539,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) {
assert.True(t, ok)
assertContainAll(t, smallTableStore, srcs...)
for _, src := range srcs {
err := src.Close()
err := src.close()
require.NoError(t, err)
}
})

View File

@@ -36,12 +36,12 @@ func newReaderFromIndexData(q MemoryQuotaProvider, idxData []byte, name addr, tr
return &chunkSourceAdapter{tr, name}, nil
}
func (csa chunkSourceAdapter) Close() error {
return csa.tableReader.Close()
func (csa chunkSourceAdapter) close() error {
return csa.tableReader.close()
}
func (csa chunkSourceAdapter) Clone() (chunkSource, error) {
tr, err := csa.tableReader.Clone()
func (csa chunkSourceAdapter) clone() (chunkSource, error) {
tr, err := csa.tableReader.clone()
if err != nil {
return &chunkSourceAdapter{}, err
}

View File

@@ -64,7 +64,7 @@ func makeTestSrcs(t *testing.T, tableSizes []uint32, p tablePersister) (srcs chu
}
cs, err := p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
c, err := cs.Clone()
c, err := cs.clone()
require.NoError(t, err)
srcs = append(srcs, c)
}
@@ -76,7 +76,7 @@ func TestConjoin(t *testing.T) {
makeTestTableSpecs := func(tableSizes []uint32, p tablePersister) (specs []tableSpec) {
for _, src := range makeTestSrcs(t, tableSizes, p) {
specs = append(specs, tableSpec{mustAddr(src.hash()), mustUint32(src.count())})
err := src.Close()
err := src.close()
require.NoError(t, err)
}
return
@@ -93,28 +93,34 @@ func TestConjoin(t *testing.T) {
}
assertContainAll := func(t *testing.T, p tablePersister, expect, actual []tableSpec) {
open := func(specs []tableSpec) (srcs chunkReaderGroup) {
open := func(specs []tableSpec) (sources chunkSources) {
for _, sp := range specs {
cs, err := p.Open(context.Background(), sp.name, sp.chunkCount, nil)
if err != nil {
require.NoError(t, err)
}
srcs = append(srcs, cs)
sources = append(sources, cs)
}
return
}
expectSrcs, actualSrcs := open(expect), open(actual)
chunkChan := make(chan extractRecord, mustUint32(expectSrcs.count()))
err := expectSrcs.extract(context.Background(), chunkChan)
require.NoError(t, err)
close(chunkChan)
for rec := range chunkChan {
has, err := actualSrcs.has(rec.a)
expectSrcs, actualSrcs := open(expect), open(actual)
ctx := context.Background()
for _, src := range expectSrcs {
err := extractAllChunks(ctx, src, func(rec extractRecord) {
var ok bool
for _, src := range actualSrcs {
var err error
ok, err = src.has(rec.a)
require.NoError(t, err)
if ok {
break
}
}
assert.True(t, ok)
})
require.NoError(t, err)
assert.True(t, has)
}
}

View File

@@ -118,12 +118,12 @@ func (mmtr *fileTableReader) hash() (addr, error) {
return mmtr.h, nil
}
func (mmtr *fileTableReader) Close() error {
return mmtr.tableReader.Close()
func (mmtr *fileTableReader) close() error {
return mmtr.tableReader.close()
}
func (mmtr *fileTableReader) Clone() (chunkSource, error) {
tr, err := mmtr.tableReader.Clone()
func (mmtr *fileTableReader) clone() (chunkSource, error) {
tr, err := mmtr.tableReader.clone()
if err != nil {
return &fileTableReader{}, err
}

View File

@@ -218,6 +218,6 @@ func (mt *memTable) write(haver chunkReader, stats *Stats) (name addr, data []by
return name, buff[:tableSize], count, nil
}
func (mt *memTable) Close() error {
func (mt *memTable) close() error {
return nil
}

View File

@@ -307,22 +307,10 @@ func (crg chunkReaderGroup) uncompressedLen() (data uint64, err error) {
return
}
func (crg chunkReaderGroup) extract(ctx context.Context, chunks chan<- extractRecord) error {
for _, haver := range crg {
err := haver.extract(ctx, chunks)
if err != nil {
return err
}
}
return nil
}
func (crg chunkReaderGroup) Close() error {
func (crg chunkReaderGroup) close() error {
var firstErr error
for _, c := range crg {
err := c.Close()
err := c.close()
if err != nil && firstErr == nil {
firstErr = err
}

View File

@@ -95,12 +95,12 @@ func (ccs *persistingChunkSource) getReader() chunkReader {
return ccs.cs
}
func (ccs *persistingChunkSource) Close() error {
func (ccs *persistingChunkSource) close() error {
// persistingChunkSource does not own |cs| or |mt|. No need to close them.
return nil
}
func (ccs *persistingChunkSource) Clone() (chunkSource, error) {
func (ccs *persistingChunkSource) clone() (chunkSource, error) {
// persistingChunkSource does not own |cs| or |mt|. No need to Clone.
return ccs, nil
}
@@ -240,20 +240,6 @@ func (ccs *persistingChunkSource) size() (uint64, error) {
return ccs.cs.size()
}
func (ccs *persistingChunkSource) extract(ctx context.Context, chunks chan<- extractRecord) error {
err := ccs.wait()
if err != nil {
return err
}
if ccs.cs == nil {
return ErrNoChunkSource
}
return ccs.cs.extract(ctx, chunks)
}
type emptyChunkSource struct{}
func (ecs emptyChunkSource) has(h addr) (bool, error) {
@@ -304,14 +290,10 @@ func (ecs emptyChunkSource) calcReads(reqs []getRecord, blockSize uint64) (reads
return 0, true, nil
}
func (ecs emptyChunkSource) extract(ctx context.Context, chunks chan<- extractRecord) error {
func (ecs emptyChunkSource) close() error {
return nil
}
func (ecs emptyChunkSource) Close() error {
return nil
}
func (ecs emptyChunkSource) Clone() (chunkSource, error) {
func (ecs emptyChunkSource) clone() (chunkSource, error) {
return ecs, nil
}

View File

@@ -579,18 +579,20 @@ func compactSourcesToBuffer(sources chunkSources) (name addr, data []byte, chunk
tw := newTableWriter(buff, nil)
errString := ""
ctx := context.Background()
for _, src := range sources {
chunks := make(chan extractRecord)
ch := make(chan extractRecord)
go func() {
defer close(chunks)
err := src.extract(context.Background(), chunks)
defer close(ch)
err = extractAllChunks(ctx, src, func(rec extractRecord) {
ch <- rec
})
if err != nil {
chunks <- extractRecord{a: mustAddr(src.hash()), err: err}
ch <- extractRecord{a: mustAddr(src.hash()), err: err}
}
}()
for rec := range chunks {
for rec := range ch {
if rec.err != nil {
errString += fmt.Sprintf("Failed to extract %s:\n %v\n******\n\n", rec.a, rec.err)
continue
@@ -625,3 +627,25 @@ func (ftp fakeTablePersister) Open(ctx context.Context, name addr, chunkCount ui
func (ftp fakeTablePersister) PruneTableFiles(_ context.Context, _ manifestContents) error {
return chunks.ErrUnsupportedOperation
}
func extractAllChunks(ctx context.Context, src chunkSource, cb func(rec extractRecord)) (err error) {
var index tableIndex
if index, err = src.index(); err != nil {
return err
}
var a addr
for i := uint32(0); i < index.ChunkCount(); i++ {
_, err = index.IndexEntry(i, &a)
if err != nil {
return err
}
data, err := src.get(ctx, a, nil)
if err != nil {
return err
}
cb(extractRecord{a: a, data: data})
}
return
}

View File

@@ -294,7 +294,7 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
}
}
newTables, err := nbs.tables.Rebase(ctx, updatedContents.specs, nbs.stats)
newTables, err := nbs.tables.rebase(ctx, updatedContents.specs, nbs.stats)
if err != nil {
return manifestContents{}, err
}
@@ -302,7 +302,7 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
nbs.upstream = updatedContents
oldTables := nbs.tables
nbs.tables = newTables
err = oldTables.Close()
err = oldTables.close()
if err != nil {
return manifestContents{}, err
}
@@ -371,7 +371,7 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat
}
}
newTables, err := nbs.tables.Rebase(ctx, updatedContents.specs, nbs.stats)
newTables, err := nbs.tables.rebase(ctx, updatedContents.specs, nbs.stats)
if err != nil {
return manifestContents{}, err
}
@@ -379,7 +379,7 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat
nbs.upstream = updatedContents
oldTables := nbs.tables
nbs.tables = newTables
err = oldTables.Close()
err = oldTables.close()
if err != nil {
return manifestContents{}, err
}
@@ -587,7 +587,7 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager
}
if exists {
newTables, err := nbs.tables.Rebase(ctx, contents.specs, nbs.stats)
newTables, err := nbs.tables.rebase(ctx, contents.specs, nbs.stats)
if err != nil {
return nil, err
@@ -596,7 +596,7 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager
nbs.upstream = contents
oldTables := nbs.tables
nbs.tables = newTables
err = oldTables.Close()
err = oldTables.close()
if err != nil {
return nil, err
}
@@ -647,7 +647,7 @@ func (nbs *NomsBlockStore) addChunk(ctx context.Context, h addr, data []byte) bo
nbs.mt = newMemTable(nbs.mtSize)
}
if !nbs.mt.addChunk(h, data) {
nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats)
nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats)
nbs.mt = newMemTable(nbs.mtSize)
return nbs.mt.addChunk(h, data)
}
@@ -922,7 +922,7 @@ func (nbs *NomsBlockStore) Rebase(ctx context.Context) error {
return nil
}
newTables, err := nbs.tables.Rebase(ctx, contents.specs, nbs.stats)
newTables, err := nbs.tables.rebase(ctx, contents.specs, nbs.stats)
if err != nil {
return err
}
@@ -930,7 +930,7 @@ func (nbs *NomsBlockStore) Rebase(ctx context.Context) error {
nbs.upstream = contents
oldTables := nbs.tables
nbs.tables = newTables
err = oldTables.Close()
err = oldTables.close()
if err != nil {
return err
}
@@ -952,7 +952,7 @@ func (nbs *NomsBlockStore) Commit(ctx context.Context, current, last hash.Hash)
anyPossiblyNovelChunks := func() bool {
nbs.mu.Lock()
defer nbs.mu.Unlock()
return nbs.mt != nil || nbs.tables.Novel() > 0
return nbs.mt != nil || len(nbs.tables.novel) > 0
}
if !anyPossiblyNovelChunks() && current == last {
@@ -984,7 +984,7 @@ func (nbs *NomsBlockStore) Commit(ctx context.Context, current, last hash.Hash)
}
if cnt > preflushChunkCount {
nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats)
nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats)
nbs.mt = nil
}
}
@@ -1033,7 +1033,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
}
handleOptimisticLockFailure := func(upstream manifestContents) error {
newTables, err := nbs.tables.Rebase(ctx, upstream.specs, nbs.stats)
newTables, err := nbs.tables.rebase(ctx, upstream.specs, nbs.stats)
if err != nil {
return err
}
@@ -1041,7 +1041,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
nbs.upstream = upstream
oldTables := nbs.tables
nbs.tables = newTables
err = oldTables.Close()
err = oldTables.close()
if last != upstream.root {
return errOptimisticLockFailedRoot
@@ -1067,7 +1067,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
}
if cnt > 0 {
nbs.tables = nbs.tables.Prepend(ctx, nbs.mt, nbs.stats)
nbs.tables = nbs.tables.prepend(ctx, nbs.mt, nbs.stats)
nbs.mt = nil
}
}
@@ -1081,7 +1081,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
return err
}
newTables, err := nbs.tables.Rebase(ctx, newUpstream.specs, nbs.stats)
newTables, err := nbs.tables.rebase(ctx, newUpstream.specs, nbs.stats)
if err != nil {
return err
@@ -1090,7 +1090,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
nbs.upstream = newUpstream
oldTables := nbs.tables
nbs.tables = newTables
err = oldTables.Close()
err = oldTables.close()
if err != nil {
return err
}
@@ -1098,7 +1098,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
return errOptimisticLockFailedTables
}
specs, err := nbs.tables.ToSpecs()
specs, err := nbs.tables.toSpecs()
if err != nil {
return err
}
@@ -1139,7 +1139,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
return handleOptimisticLockFailure(upstream)
}
newTables, err := nbs.tables.Flatten(ctx)
newTables, err := nbs.tables.flatten(ctx)
if err != nil {
return nil
@@ -1158,7 +1158,7 @@ func (nbs *NomsBlockStore) Version() string {
}
func (nbs *NomsBlockStore) Close() error {
return nbs.tables.Close()
return nbs.tables.close()
}
func (nbs *NomsBlockStore) Stats() interface{} {
@@ -1574,7 +1574,7 @@ func (nbs *NomsBlockStore) gcTableSize() (uint64, error) {
return 0, err
}
avgTableSize := total / uint64(nbs.tables.Upstream()+nbs.tables.Novel()+1)
avgTableSize := total / uint64(nbs.tables.Size()+1)
// max(avgTableSize, defaultMemTableSize)
if avgTableSize > nbs.mtSize {
@@ -1622,14 +1622,14 @@ func (nbs *NomsBlockStore) swapTables(ctx context.Context, specs []tableSpec) (e
nbs.mt = newMemTable(nbs.mtSize)
// clear nbs.tables.novel
nbs.tables, err = nbs.tables.Flatten(ctx)
nbs.tables, err = nbs.tables.flatten(ctx)
if err != nil {
return err
}
// replace nbs.tables.upstream with gc compacted tables
nbs.upstream = upstream
nbs.tables, err = nbs.tables.Rebase(ctx, upstream.specs, nbs.stats)
nbs.tables, err = nbs.tables.rebase(ctx, upstream.specs, nbs.stats)
if err != nil {
return err
}

View File

@@ -230,12 +230,11 @@ type chunkReader interface {
get(ctx context.Context, h addr, stats *Stats) ([]byte, error)
getMany(ctx context.Context, eg *errgroup.Group, reqs []getRecord, found func(context.Context, *chunks.Chunk), stats *Stats) (bool, error)
getManyCompressed(ctx context.Context, eg *errgroup.Group, reqs []getRecord, found func(context.Context, CompressedChunk), stats *Stats) (bool, error)
extract(ctx context.Context, chunks chan<- extractRecord) error
count() (uint32, error)
uncompressedLen() (uint64, error)
// Close releases resources retained by the |chunkReader|.
Close() error
// close releases resources retained by the |chunkReader|.
close() error
}
type chunkSource interface {
@@ -253,12 +252,12 @@ type chunkSource interface {
// index returns the tableIndex of this chunkSource.
index() (tableIndex, error)
// Clone returns a |chunkSource| with the same contents as the
// clone returns a |chunkSource| with the same contents as the
// original, but with independent |Close| behavior. A |chunkSource|
// cannot be |Close|d more than once, so if a |chunkSource| is being
// retained in two objects with independent life-cycle, it should be
// |Clone|d first.
Clone() (chunkSource, error)
clone() (chunkSource, error)
}
type chunkSources []chunkSource

View File

@@ -654,11 +654,11 @@ func (tr tableReader) size() (uint64, error) {
return i.TableFileSize(), nil
}
func (tr tableReader) Close() error {
func (tr tableReader) close() error {
return tr.tableIndex.Close()
}
func (tr tableReader) Clone() (tableReader, error) {
func (tr tableReader) clone() (tableReader, error) {
ti, err := tr.tableIndex.Clone()
if err != nil {
return tableReader{}, err

View File

@@ -252,7 +252,7 @@ func (ts tableSet) physicalLen() (uint64, error) {
return lenNovel + lenUp, nil
}
func (ts tableSet) Close() error {
func (ts tableSet) close() error {
var firstErr error
setErr := func(err error) {
if err != nil && firstErr == nil {
@@ -261,11 +261,11 @@ func (ts tableSet) Close() error {
}
for _, t := range ts.novel {
err := t.Close()
err := t.close()
setErr(err)
}
for _, t := range ts.upstream {
err := t.Close()
err := t.close()
setErr(err)
}
return firstErr
@@ -276,20 +276,9 @@ func (ts tableSet) Size() int {
return len(ts.novel) + len(ts.upstream)
}
// Novel returns the number of tables containing novel chunks in this
// tableSet.
func (ts tableSet) Novel() int {
return len(ts.novel)
}
// Upstream returns the number of known-persisted tables in this tableSet.
func (ts tableSet) Upstream() int {
return len(ts.upstream)
}
// Prepend adds a memTable to an existing tableSet, compacting |mt| and
// prepend adds a memTable to an existing tableSet, compacting |mt| and
// returning a new tableSet with newly compacted table added.
func (ts tableSet) Prepend(ctx context.Context, mt *memTable, stats *Stats) tableSet {
func (ts tableSet) prepend(ctx context.Context, mt *memTable, stats *Stats) tableSet {
newTs := tableSet{
novel: make(chunkSources, len(ts.novel)+1),
upstream: make(chunkSources, len(ts.upstream)),
@@ -303,29 +292,9 @@ func (ts tableSet) Prepend(ctx context.Context, mt *memTable, stats *Stats) tabl
return newTs
}
func (ts tableSet) extract(ctx context.Context, chunks chan<- extractRecord) error {
// Since new tables are _prepended_ to a tableSet, extracting chunks in insertOrder requires iterating ts.upstream back to front, followed by ts.novel.
for i := len(ts.upstream) - 1; i >= 0; i-- {
err := ts.upstream[i].extract(ctx, chunks)
if err != nil {
return err
}
}
for i := len(ts.novel) - 1; i >= 0; i-- {
err := ts.novel[i].extract(ctx, chunks)
if err != nil {
return err
}
}
return nil
}
// Flatten returns a new tableSet with |upstream| set to the union of ts.novel
// flatten returns a new tableSet with |upstream| set to the union of ts.novel
// and ts.upstream.
func (ts tableSet) Flatten(ctx context.Context) (tableSet, error) {
func (ts tableSet) flatten(ctx context.Context) (tableSet, error) {
flattened := tableSet{
upstream: make(chunkSources, 0, ts.Size()),
p: ts.p,
@@ -349,9 +318,9 @@ func (ts tableSet) Flatten(ctx context.Context) (tableSet, error) {
return flattened, nil
}
// Rebase returns a new tableSet holding the novel tables managed by |ts| and
// rebase returns a new tableSet holding the novel tables managed by |ts| and
// those specified by |specs|.
func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) {
func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) {
merged := tableSet{
novel: make(chunkSources, 0, len(ts.novel)),
p: ts.p,
@@ -368,7 +337,7 @@ func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats)
}
if cnt > 0 {
t2, err := t.Clone()
t2, err := t.clone()
if err != nil {
return tableSet{}, err
}
@@ -404,7 +373,7 @@ OUTER:
return tableSet{}, err
}
if spec.name == h {
c, err := existing.Clone()
c, err := existing.clone()
if err != nil {
return tableSet{}, err
}
@@ -454,7 +423,7 @@ OUTER:
if err != nil {
// Close any opened chunkSources
for _, cs := range opened {
_ = cs.Close()
_ = cs.close()
}
if r := rp.Load(); r != nil {
@@ -466,7 +435,7 @@ OUTER:
return merged, nil
}
func (ts tableSet) ToSpecs() ([]tableSpec, error) {
func (ts tableSet) toSpecs() ([]tableSpec, error) {
tableSpecs := make([]tableSpec, 0, ts.Size())
for _, src := range ts.novel {
cnt, err := src.count()

View File

@@ -33,8 +33,8 @@ import (
var testChunks = [][]byte{[]byte("hello2"), []byte("goodbye2"), []byte("badbye2")}
func TestTableSetPrependEmpty(t *testing.T) {
ts := newFakeTableSet(&noopQuotaProvider{}).Prepend(context.Background(), newMemTable(testMemTableSize), &Stats{})
specs, err := ts.ToSpecs()
ts := newFakeTableSet(&noopQuotaProvider{}).prepend(context.Background(), newMemTable(testMemTableSize), &Stats{})
specs, err := ts.toSpecs()
require.NoError(t, err)
assert.Empty(t, specs)
}
@@ -42,23 +42,23 @@ func TestTableSetPrependEmpty(t *testing.T) {
func TestTableSetPrepend(t *testing.T) {
assert := assert.New(t)
ts := newFakeTableSet(&noopQuotaProvider{})
specs, err := ts.ToSpecs()
specs, err := ts.toSpecs()
require.NoError(t, err)
assert.Empty(specs)
mt := newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
firstSpecs, err := ts.ToSpecs()
firstSpecs, err := ts.toSpecs()
require.NoError(t, err)
assert.Len(firstSpecs, 1)
mt = newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
secondSpecs, err := ts.ToSpecs()
secondSpecs, err := ts.toSpecs()
require.NoError(t, err)
assert.Len(secondSpecs, 2)
assert.Equal(firstSpecs, secondSpecs[1:])
@@ -67,22 +67,22 @@ func TestTableSetPrepend(t *testing.T) {
func TestTableSetToSpecsExcludesEmptyTable(t *testing.T) {
assert := assert.New(t)
ts := newFakeTableSet(&noopQuotaProvider{})
specs, err := ts.ToSpecs()
specs, err := ts.toSpecs()
require.NoError(t, err)
assert.Empty(specs)
mt := newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
mt = newMemTable(testMemTableSize)
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
mt = newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
specs, err = ts.ToSpecs()
specs, err = ts.toSpecs()
require.NoError(t, err)
assert.Len(specs, 2)
}
@@ -90,61 +90,26 @@ func TestTableSetToSpecsExcludesEmptyTable(t *testing.T) {
func TestTableSetFlattenExcludesEmptyTable(t *testing.T) {
assert := assert.New(t)
ts := newFakeTableSet(&noopQuotaProvider{})
specs, err := ts.ToSpecs()
specs, err := ts.toSpecs()
require.NoError(t, err)
assert.Empty(specs)
mt := newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
mt = newMemTable(testMemTableSize)
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
mt = newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
ts, err = ts.Flatten(context.Background())
ts, err = ts.flatten(context.Background())
require.NoError(t, err)
assert.EqualValues(ts.Size(), 2)
}
func TestTableSetExtract(t *testing.T) {
assert := assert.New(t)
ts := newFakeTableSet(&noopQuotaProvider{})
specs, err := ts.ToSpecs()
require.NoError(t, err)
assert.Empty(specs)
// Put in one table
mt := newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
ts = ts.Prepend(context.Background(), mt, &Stats{})
// Put in a second
mt = newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
ts = ts.Prepend(context.Background(), mt, &Stats{})
chunkChan := make(chan extractRecord)
go func() {
defer close(chunkChan)
err := ts.extract(context.Background(), chunkChan)
require.NoError(t, err)
}()
i := 0
for rec := range chunkChan {
a := computeAddr(testChunks[i])
assert.NotNil(rec.data, "Nothing for", a)
assert.Equal(testChunks[i], rec.data, "Item %d: %s != %s", i, string(testChunks[i]), string(rec.data))
assert.Equal(a, rec.a)
i++
}
}
func persist(t *testing.T, p tablePersister, chunks ...[]byte) {
for _, c := range chunks {
mt := newMemTable(testMemTableSize)
@@ -166,37 +131,37 @@ func TestTableSetRebase(t *testing.T) {
for _, c := range chunks {
mt := newMemTable(testMemTableSize)
mt.addChunk(computeAddr(c), c)
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
}
return ts
}
fullTS := newTableSet(persister, q)
defer func() {
require.NoError(t, fullTS.Close())
require.NoError(t, fullTS.close())
}()
specs, err := fullTS.ToSpecs()
specs, err := fullTS.toSpecs()
require.NoError(t, err)
assert.Empty(specs)
fullTS = insert(fullTS, testChunks...)
fullTS, err = fullTS.Flatten(context.Background())
fullTS, err = fullTS.flatten(context.Background())
require.NoError(t, err)
ts := newTableSet(persister, q)
ts = insert(ts, testChunks[0])
assert.Equal(1, ts.Size())
ts, err = ts.Flatten(context.Background())
ts, err = ts.flatten(context.Background())
require.NoError(t, err)
ts = insert(ts, []byte("novel"))
specs, err = fullTS.ToSpecs()
specs, err = fullTS.toSpecs()
require.NoError(t, err)
ts2, err := ts.Rebase(context.Background(), specs, nil)
ts2, err := ts.rebase(context.Background(), specs, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, ts2.Close())
require.NoError(t, ts2.close())
}()
err = ts.Close()
err = ts.close()
require.NoError(t, err)
assert.Equal(4, ts2.Size())
}
@@ -204,17 +169,17 @@ func TestTableSetRebase(t *testing.T) {
func TestTableSetPhysicalLen(t *testing.T) {
assert := assert.New(t)
ts := newFakeTableSet(&noopQuotaProvider{})
specs, err := ts.ToSpecs()
specs, err := ts.toSpecs()
require.NoError(t, err)
assert.Empty(specs)
mt := newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[0]), testChunks[0])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
mt = newMemTable(testMemTableSize)
mt.addChunk(computeAddr(testChunks[1]), testChunks[1])
mt.addChunk(computeAddr(testChunks[2]), testChunks[2])
ts = ts.Prepend(context.Background(), mt, &Stats{})
ts = ts.prepend(context.Background(), mt, &Stats{})
assert.True(mustUint64(ts.physicalLen()) > indexSize(mustUint32(ts.count())))
}
@@ -241,7 +206,7 @@ func TestTableSetClosesOpenedChunkSourcesOnErr(t *testing.T) {
}
ts := tableSet{p: p, q: q, rl: make(chan struct{}, 1)}
_, err := ts.Rebase(context.Background(), specs, &Stats{})
_, err := ts.rebase(context.Background(), specs, &Stats{})
require.Error(t, err)
for _ = range p.opened {

View File

@@ -190,10 +190,12 @@ teardown() {
start_sql_server
server_query "" 1 dolt "" "create database testdb" ""
server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntestdb" ""
server_query "testdb" 1 dolt "" "create table a(x int)" ""
server_query "testdb" 1 dolt "" "insert into a values (1), (2)" ""
dolt sql-client --use-db '' -u dolt -P $PORT -q "create database testdb"
run dolt sql-client --use-db '' -u dolt -P $PORT -r csv -q "show databases"
[ $status -eq 0 ]
[[ "$output" =~ "testdb" ]] || false
dolt sql-client --use-db testdb -u dolt -P $PORT -q "create table a(x int)"
dolt sql-client --use-db testdb -u dolt -P $PORT -q "insert into a values (1), (2)"
[ -d "testdb" ]
cd testdb

View File

@@ -2788,7 +2788,7 @@ SQL
run dolt sql -q "SELECT * FROM dolt_constraint_violations_test" -r=csv
log_status_eq "0"
[[ "$output" =~ "violation_type,pk,v1,violation_info" ]] || false
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": ""pk"", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": """", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT * FROM test" -r=csv
log_status_eq "0"
@@ -2825,7 +2825,7 @@ SQL
run dolt sql -q "SELECT * FROM dolt_constraint_violations_test" -r=csv
log_status_eq "0"
[[ "$output" =~ "violation_type,pk,v1,violation_info" ]] || false
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": ""pk"", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
[[ "$output" =~ 'foreign key,4,3,"{""Columns"": [""v1""], ""ForeignKey"": ""fk_name"", ""Index"": ""v1"", ""OnDelete"": ""CASCADE"", ""OnUpdate"": ""CASCADE"", ""ReferencedColumns"": [""pk""], ""ReferencedIndex"": """", ""ReferencedTable"": ""test"", ""Table"": ""test""}"' ]] || false
[[ "${#lines[@]}" = "2" ]] || false
run dolt sql -q "SELECT * FROM test" -r=csv
log_status_eq "0"

View File

@@ -65,11 +65,14 @@ make_it() {
start_sql_server "dolt_repo_$$"
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'"
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_checkout('to_keep')"
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_branch('-D', 'main');"
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "id\n" ""
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "describe test"
[ $status -eq 0 ]
[[ "$output" =~ "id" ]] || false
}
@test "deleted-branches: can SQL connect with existing branch revision specifier when checked out branch is deleted" {
@@ -77,11 +80,12 @@ make_it() {
start_sql_server "dolt_repo_$$"
# Can't string together multiple queries in dolt sql-client
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
# Against the default branch it fails
run server_query "dolt_repo_$$" 1 "" dolt "" "SELECT * FROM test" "id\n" ""
[ "$status" -eq 1 ] || fail "expected query against the default branch, which was deleted, to fail"
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test"
[ $status -ne 0 ]
# Against to_keep it succeeds
server_query "dolt_repo_$$/to_keep" 1 dolt "" "SELECT * FROM test" "id\n" ""
@@ -92,10 +96,11 @@ make_it() {
start_sql_server "dolt_repo_$$"
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'"
# Against the default branch it fails
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "" 1
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test" ""
[ $status -ne 0 ]
# Against main, which exists it succeeds
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test" "id\n" ""
@@ -106,7 +111,7 @@ make_it() {
start_sql_server "dolt_repo_$$"
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
# We are able to use a database branch revision in the connection string
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test;"
@@ -141,11 +146,11 @@ make_it() {
start_sql_server "dolt_repo_$$"
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" ""
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test"
server_query "dolt_repo_$$" 1 dolt "" "CALL DOLT_CHECKOUT('to_checkout');" ""
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "CALL DOLT_CHECKOUT('to_checkout')"
}

View File

@@ -49,6 +49,27 @@ teardown() {
run dolt diff head head^
[ "$status" -eq 0 ]
[[ "$output" =~ "- | 0" ]] || false
# Two dot
run dolt diff head..
[ "$status" -eq 0 ]
[ "$output" = "" ]
run dolt diff head^..
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 0" ]] || false
run dolt diff head^..head
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 0" ]] || false
run dolt diff head..head^
[ "$status" -eq 0 ]
[[ "$output" =~ "- | 0" ]] || false
run dolt diff ..head^
[ "$status" -eq 0 ]
[[ "$output" =~ "- | 0" ]] || false
}
@test "diff: dirty working set" {
@@ -70,6 +91,132 @@ teardown() {
[[ "$output" =~ "+ | 0" ]] || false
}
@test "diff: two and three dot diff" {
dolt checkout main
dolt sql -q 'insert into test values (0,0,0,0,0,0)'
dolt add .
dolt commit -m table
dolt checkout -b branch1
dolt sql -q 'insert into test values (1,1,1,1,1,1)'
dolt add .
dolt commit -m row
dolt checkout main
dolt sql -q 'insert into test values (2,2,2,2,2,2)'
dolt add .
dolt commit -m newrow
# Two dot shows all changes between branches
run dolt diff branch1
[ "$status" -eq 0 ]
[[ "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff branch1..
[ "$status" -eq 0 ]
[[ "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff branch1..main
[ "$status" -eq 0 ]
[[ "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff branch1 main
[ "$status" -eq 0 ]
[[ "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff ..branch1
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 1" ]] || false
[[ "$output" =~ "- | 2" ]] || false
run dolt diff main..branch1
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 1" ]] || false
[[ "$output" =~ "- | 2" ]] || false
run dolt diff main branch1
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 1" ]] || false
[[ "$output" =~ "- | 2" ]] || false
# Three dot shows changes between common ancestor and branch
run dolt diff branch1...
[ "$status" -eq 0 ]
[[ ! "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff $(dolt merge-base branch1 HEAD)
[ "$status" -eq 0 ]
[[ ! "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff --merge-base branch1
[ "$status" -eq 0 ]
[[ ! "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff branch1...main
[ "$status" -eq 0 ]
[[ ! "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff --merge-base branch1 main
[ "$status" -eq 0 ]
[[ ! "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff main...branch1
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 1" ]] || false
[[ ! "$output" =~ "- | 2" ]] || false
run dolt diff --merge-base main branch1
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 1" ]] || false
[[ ! "$output" =~ "- | 2" ]] || false
run dolt diff --merge-base main branch1 test
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 1" ]] || false
[[ ! "$output" =~ "- | 2" ]] || false
run dolt diff $(dolt merge-base branch1 main) main
[ "$status" -eq 0 ]
[[ ! "$output" =~ "- | 1" ]] || false
[[ "$output" =~ "+ | 2" ]] || false
run dolt diff $(dolt merge-base main branch1) branch1
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 1" ]] || false
[[ ! "$output" =~ "- | 2" ]] || false
# Dots work with --summary
run dolt diff main..branch1 --summary
[ "$status" -eq 0 ]
[[ "$output" =~ "1 Row Unmodified (50.00%)" ]] || false
[[ "$output" =~ "1 Row Added (50.00%)" ]] || false
[[ "$output" =~ "1 Row Deleted (50.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "6 Cells Added (50.00%)" ]] || false
[[ "$output" =~ "6 Cells Deleted (50.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(2 Row Entries vs 2 Row Entries)" ]] || false
run dolt diff main...branch1 --summary
echo $output
[ "$status" -eq 0 ]
[[ "$output" =~ "1 Row Unmodified (100.00%)" ]] || false
[[ "$output" =~ "1 Row Added (100.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "6 Cells Added (100.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(1 Row Entry vs 2 Row Entries)" ]] || false
}
@test "diff: data and schema changes" {
dolt sql <<SQL
drop table test;
@@ -242,8 +389,28 @@ EOF
run dolt diff head^ head fake
[ "$status" -ne 0 ]
[[ "$output" =~ "table fake does not exist in either revision" ]] || false
# Two dot
run dolt diff head^..head test other
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 0" ]] || false
[[ "$output" =~ "+ | 9" ]] || false
run dolt diff head^..head fake
[ "$status" -ne 0 ]
[[ "$output" =~ "table fake does not exist in either revision" ]] || false
run dolt diff head^.. test other
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 0" ]] || false
[[ "$output" =~ "+ | 9" ]] || false
run dolt diff head^.. fake
[ "$status" -ne 0 ]
[[ "$output" =~ "table fake does not exist in either revision" ]] || false
}
@test "diff: with table and branch of the same name" {
dolt sql -q 'create table dolomite (pk int not null primary key)'
dolt add .
@@ -270,6 +437,10 @@ EOF
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 9" ]] || false
[[ ! "$output" =~ "+ | 0" ]] || false
run dolt diff head^..head dolomite
[ "$status" -eq 0 ]
[[ "$output" =~ "+ | 9" ]] || false
[[ ! "$output" =~ "+ | 0" ]] || false
dolt branch -D dolomite
dolt sql -q 'insert into dolomite values (8)'
run dolt diff dolomite
@@ -411,6 +582,16 @@ SQL
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(1 Row Entry vs 2 Row Entries)" ]] || false
run dolt diff --summary firstbranch..newbranch
[ "$status" -eq 0 ]
[[ "$output" =~ "1 Row Unmodified (100.00%)" ]] || false
[[ "$output" =~ "1 Row Added (100.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "6 Cells Added (100.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(1 Row Entry vs 2 Row Entries)" ]] || false
}
@test "diff: summary shows correct changes after schema change" {
@@ -556,6 +737,22 @@ SQL
[ "$status" -eq 1 ]
[[ "$output" =~ "Error running diff query" ]] || false
[[ "$output" =~ "where pk=4" ]] || false
# Two dot
run dolt diff test1..test2
[ "$status" -eq 0 ]
[[ "$output" =~ "44" ]] || false
[[ "$output" =~ "55" ]] || false
run dolt diff test1..test2 --where "from_pk=4 OR to_pk=5"
[ "$status" -eq 0 ]
[[ "$output" =~ "44" ]] || false
[[ "$output" =~ "55" ]] || false
run dolt diff test1..test2 --where "pk=4"
[ "$status" -eq 1 ]
[[ "$output" =~ "Error running diff query" ]] || false
[[ "$output" =~ "where pk=4" ]] || false
}
@test "diff: diff summary incorrect primary key set change regression test" {
@@ -643,6 +840,36 @@ SQL
run dolt diff ref.with.period test-branch
[ $status -eq 1 ]
[[ ! $output =~ "panic" ]]
run dolt diff $FIRST_COMMIT..test-branch
[ $status -eq 0 ]
[[ ! $output =~ "panic" ]]
run dolt diff main@$FIRST_COMMIT..test-branch
[ $status -eq 1 ]
[[ ! $output =~ "panic" ]]
run dolt diff ref.with.period..test-branch
[ $status -eq 1 ]
[[ ! $output =~ "panic" ]]
run dolt diff $FIRST_COMMIT...test-branch
[ $status -eq 0 ]
[[ ! $output =~ "panic" ]]
run dolt diff main@$FIRST_COMMIT...test-branch
[ $status -eq 1 ]
[[ ! $output =~ "panic" ]]
run dolt diff ref.with.period...test-branch
[ $status -eq 1 ]
[[ ! $output =~ "panic" ]]
run dolt diff --merge-base $FIRST_COMMIT test-branch
[ $status -eq 0 ]
[[ ! $output =~ "panic" ]]
run dolt diff --merge-base main@$FIRST_COMMIT test-branch
[ $status -eq 1 ]
[[ ! $output =~ "panic" ]]
run dolt diff --merge-base ref.with.period test-branch
[ $status -eq 1 ]
[[ ! $output =~ "panic" ]]
}
@test "diff: with foreign key and sql output" {
@@ -698,6 +925,13 @@ SQL
[[ "$output" =~ "pv1" ]] || false
[[ "$output" =~ "cv1" ]] || false
[ $status -eq 0 ]
run dolt diff main..another-branch
echo $output
! [[ "$output" =~ "panic" ]] || false
[[ "$output" =~ "pv1" ]] || false
[[ "$output" =~ "cv1" ]] || false
[ $status -eq 0 ]
}
@test "diff: sql update queries only show changed columns" {

View File

@@ -22,6 +22,11 @@ teardown() {
dolt commit -am "cm"
}
@test "foreign-keys-invert-pk: no secondary indexes made" {
run dolt index ls
[[ $output = "No indexes in the working set" ]] || false
}
@test "foreign-keys-invert-pk: check referential integrity on merge" {
dolt commit -am "main"
dolt checkout -b feat

View File

@@ -1971,7 +1971,7 @@ SQL
# the prefix key should not be unique
run dolt sql -q "show create table parent"
[ $status -eq 0 ]
[[ $output =~ "KEY \`b\` (\`b\`)" ]] || false
[[ ! $output =~ "KEY \`b\` (\`b\`)" ]] || false
[[ ! $output =~ "UNIQUE" ]] || false
run dolt sql -q "show create table child"

View File

@@ -169,7 +169,7 @@ pk,c1,c2,c3,c4,c5
9,1,2,3,4,5
DELIM
dolt table import -c --pk=pk test 1pk5col-ints.csv
run dolt sql -q "create table fktest(id int not null, tpk int unsigned, c2 int, primary key(id), foreign key (tpk) references test(pk))"
run dolt sql -q "create table fktest(id int not null, tpk int, c2 int, primary key(id), foreign key (tpk) references test(pk))"
[ "$status" -eq 0 ]
run dolt sql -q "insert into fktest values (1, 0, 1)"
[ "$status" -eq 0 ]
@@ -567,7 +567,7 @@ DELIM
[[ "$output" =~ "CREATE TABLE \`test\`" ]]
[[ "$output" =~ "\`pk\` int" ]]
[[ "$output" =~ "\`str\` varchar(16383)" ]]
[[ "$output" =~ "\`int\` int unsigned" ]]
[[ "$output" =~ "\`int\` int" ]]
[[ "$output" =~ "\`bool\` tinyint" ]]
[[ "$output" =~ "\`float\` float" ]]
[[ "$output" =~ "\`date\` date" ]]

View File

@@ -248,3 +248,21 @@ SQL
run dolt schema show t
[[ "$output" =~ "PRIMARY KEY (\`pk1\`,\`pk2\`)" ]] || false
}
@test "migrate: removed tables stay removed" {
dolt sql -q "create table alpha (pk int primary key);"
dolt sql -q "create table beta (pk int primary key);"
dolt commit -Am "create tables"
dolt sql -q "alter table alpha rename to zulu;"
dolt sql -q "drop table beta"
dolt commit -Am "rename table alpha to zeta, drop table beta"
dolt migrate
run dolt ls
[ $status -eq 0 ]
[[ "$output" =~ "zulu" ]] || false
[[ ! "$output" =~ "alpha" ]] || false
[[ ! "$output" =~ "beta" ]] || false
}

View File

@@ -14,8 +14,8 @@ teardown() {
}
@test "migration-integration: first-hour-db" {
dolt clone dolthub/first-hour-db
cd first-hour-db
dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int
cd first-hour-db-migration-int
dolt tag -v
run dolt tag -v
@@ -39,8 +39,8 @@ teardown() {
}
@test "migration-integration: first-hour-db after garbage collection" {
dolt clone dolthub/first-hour-db
cd first-hour-db
dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int
cd first-hour-db-migration-int
dolt gc
dolt tag -v
@@ -65,8 +65,8 @@ teardown() {
}
@test "migration-integration: us-jails" {
dolt clone dolthub/us-jails
cd us-jails
dolt clone https://doltremoteapi.dolthub.com/dolthub/us-jails-migration-integration
cd us-jails-migration-integration
dolt tag -v
run dolt tag -v

View File

@@ -31,7 +31,7 @@ teardown() {
cd dbs1
start_multi_db_server repo1
server_query repo1 1 dolt "" "create database new; use new; call dcheckout('-b', 'feat'); create table t (x int); call dolt_add('.'); call dcommit('-am', 'cm'); set @@global.new_default_branch='feat'"
server_query repo1 1 dolt "" "use repo1"
dolt sql-client -u dolt --use-db '' -P $PORT -q "use repo1"
}
@test "multidb: incompatible BIN FORMATs" {

View File

@@ -45,13 +45,14 @@ teardown() {
dolt checkout -b other
start_sql_server repo1
run server_query repo1 1 dolt "" "call dolt_push()" "" "" 1
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()"
[ $status -ne 0 ]
[[ "$output" =~ "the current branch has no upstream branch" ]] || false
server_query repo1 1 dolt "" "call dolt_push('--set-upstream', 'origin', 'other') " ""
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push('--set-upstream', 'origin', 'other')"
skip "In-memory branch doesn't track upstream"
server_query repo1 1 dolt "" "call dolt_push()" ""
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()"
}
@test "remotes-sql-server: push on sql-session commit" {
@@ -61,7 +62,7 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_to_remote remote1
start_sql_server repo1
server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');"
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');"
cd ../repo2
dolt pull remote1
@@ -81,7 +82,7 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_async_replication 1
start_sql_server repo1
server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');"
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');"
# wait for the process to exit after we stop it
stop_sql_server 1
@@ -108,7 +109,10 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main
start_sql_server repo2
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" -r csv
[ $status -eq 0 ]
[[ "$output" =~ "Tables_in_repo2" ]] || false
[[ "$output" =~ "test" ]] || false
}
@test "remotes-sql-server: pull remote not found error" {
@@ -133,7 +137,9 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main
start_sql_server repo1
run server_query repo1 1 dolt "" "show tables" "Table\n"
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables"
[ $status -eq 0 ]
[[ "$output" =~ "Table" ]] || false
}
@test "remotes-sql-server: push remote not found error" {
@@ -156,7 +162,10 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_to_remote unknown
start_sql_server repo1
server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\ntest"
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables"
[ $status -eq 0 ]
[[ "$output" =~ "Tables_in_repo1" ]] || false
[[ "$output" =~ "test" ]] || false
}
@test "remotes-sql-server: pull multiple heads" {
@@ -172,8 +181,16 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main,new_feature
start_sql_server repo2
server_query repo2 1 dolt "" "select dolt_checkout('new_feature') as b" "b\n0"
server_query repo2 1 dolt "" "select name from dolt_branches order by name" "name\nmain\nnew_feature"
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select dolt_checkout('new_feature') as b"
[ $status -eq 0 ]
[[ "$output" =~ "b" ]] || false
[[ "$output" =~ "0" ]] || false
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select name from dolt_branches order by name"
[ $status -eq 0 ]
[[ "$output" =~ "name" ]] || false
[[ "$output" =~ "main" ]] || false
[[ "$output" =~ "new_feature" ]] || false
}
@test "remotes-sql-server: connect to remote head" {
@@ -194,13 +211,17 @@ teardown() {
start_sql_server repo2
# No data on main
server_query repo2 1 dolt "" "show tables" ""
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
[ $status -eq 0 ]
[ "$output" = "" ]
# Can't use dolt sql-client to connect to branches
# Connecting to heads that exist only on the remote should work fine (they get fetched)
server_query "repo2/new_feature" 1 dolt "" "show tables" "Tables_in_repo2/new_feature\ntest"
server_query repo2 1 dolt "" 'use `repo2/new_feature2`' ""
server_query repo2 1 dolt "" 'select * from `repo2/new_feature2`.test' "pk\n0\n1\n2"
# Connecting to heads that don't exist should error out
run server_query "repo2/notexist" 1 dolt "" 'use `repo2/new_feature2`' "" 1
[[ $output =~ "database not found" ]] || false
@@ -228,7 +249,10 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main
start_sql_server repo2
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
[ $status -eq 0 ]
[[ $output =~ "Tables_in_repo2" ]] || false
[[ $output =~ "test" ]] || false
}
@test "remotes-sql-server: pull invalid head" {
@@ -240,7 +264,8 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads unknown
start_sql_server repo2
server_query repo2 1 dolt "" "show tables" "" 1
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
[ $status -ne 0 ]
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
}
@@ -253,7 +278,8 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main
start_sql_server repo2
server_query repo2 1 dolt "" "show tables" "" 1
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
[ $status -ne 0 ]
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
}
@@ -270,7 +296,9 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main
start_sql_server repo2
server_query repo2 1 dolt "" "show tables" "Table\n"
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables"
[ $status -eq 0 ]
[ "$output" = "" ]
}
@test "remotes-sql-server: connect to missing branch pulls remote" {
@@ -286,7 +314,11 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main
start_sql_server repo2
server_query repo2 1 dolt "" "SHOW tables" "" # no tables on main
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables"
[ $status -eq 0 ]
[ "$output" = "" ]
# Can't connect to a specific branch with dolt sql-client
server_query "repo2/feature-branch" 1 dolt "" "SHOW Tables" "Tables_in_repo2/feature-branch\ntest"
}
@@ -303,8 +335,14 @@ teardown() {
dolt config --local --add sqlserver.global.dolt_replicate_heads main
start_sql_server repo2
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
server_query repo2 1 dolt "" "use \`repo2/$head_hash\`" ""
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
[ $status -eq 0 ]
[[ $output =~ "Tables_in_repo2" ]] || false
[[ $output =~ "test" ]] || false
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q"use \`repo2/$head_hash\`"
[ $status -eq 0 ]
[ "$output" = "" ]
}
@test "remotes-sql-server: connect to tag works" {
@@ -321,8 +359,14 @@ teardown() {
dolt tag v1
start_sql_server repo2
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
server_query repo2 1 dolt "" "use \`repo2/v1\`" ""
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
[ $status -eq 0 ]
[[ $output =~ "Tables_in_repo2" ]] || false
[[ $output =~ "test" ]] || false
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "use \`repo2/v1\`"
[ $status -eq 0 ]
[ "$output" = "" ]
}
get_head_commit() {

View File

@@ -290,15 +290,15 @@ SQL
start_multi_db_server repo1
cd ..
server_query repo1 1 dolt "" "create table t1 (a int primary key)"
server_query repo1 1 dolt "" "call dolt_add('.')"
server_query repo1 1 dolt "" "call dolt_commit('-am', 'cm')"
server_query repo2 1 dolt "" "create table t2 (a int primary key)"
server_query repo2 1 dolt "" "call dolt_add('.')"
server_query repo2 1 dolt "" "call dolt_commit('-am', 'cm')"
server_query repo3 1 dolt "" "create table t3 (a int primary key)"
server_query repo3 1 dolt "" "call dolt_add('.')"
server_query repo3 1 dolt "" "call dolt_commit('-am', 'cm')"
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "create table t1 (a int primary key)"
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_add('.')"
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "create table t2 (a int primary key)"
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_add('.')"
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "create table t3 (a int primary key)"
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_add('.')"
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
clone_helper $TMPDIRS
@@ -344,7 +344,18 @@ SQL
cd dbs1
start_multi_db_server repo1
server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\nt1"
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\nt2"
server_query repo3 1 dolt "" "show tables" "Tables_in_repo3\nt3"
run dolt sql-client --use-db repo1 -u dolt -P $PORT -q "show tables"
[ $status -eq 0 ]
[[ "$output" =~ Tables_in_repo1 ]] || false
[[ "$output" =~ t1 ]] || false
run dolt sql-client --use-db repo2 -u dolt -P $PORT -q "show tables"
[ $status -eq 0 ]
[[ "$output" =~ Tables_in_repo2 ]] || false
[[ "$output" =~ t2 ]] || false
run dolt sql-client --use-db repo3 -u dolt -P $PORT -q "show tables"
[ $status -eq 0 ]
[[ "$output" =~ Tables_in_repo3 ]] || false
[[ "$output" =~ t3 ]] || false
}

View File

@@ -82,7 +82,7 @@ teardown() {
[[ "$output" =~ "\`string\` varchar(16383)" ]] || false
[[ "$output" =~ "\`boolean\` tinyint" ]] || false
[[ "$output" =~ "\`float\` float" ]] || false
[[ "$output" =~ "\`uint\` int unsigned" ]] || false
[[ "$output" =~ "\`uint\` int" ]] || false
[[ "$output" =~ "\`uuid\` char(36) CHARACTER SET ascii COLLATE ascii_bin" ]] || false
}
@@ -259,9 +259,9 @@ DELIM
run dolt diff --schema
[ "$status" -eq 0 ]
[[ "$output" =~ '+ `x` varchar(16383) NOT NULL,' ]] || false
[[ "$output" =~ '+ `y` float NOT NULL,' ]] || false
[[ "$output" =~ '+ `z` int NOT NULL,' ]] || false
[[ "$output" =~ '+ `x` varchar(16383),' ]] || false
[[ "$output" =~ '+ `y` float,' ]] || false
[[ "$output" =~ '+ `z` int,' ]] || false
# assert no columns were deleted/replaced
[[ ! "$output" = "- \`" ]] || false
@@ -282,9 +282,9 @@ DELIM
run dolt diff --schema
[ "$status" -eq 0 ]
[[ "$output" =~ '+ `x` varchar(16383) NOT NULL,' ]] || false
[[ "$output" =~ '+ `y` float NOT NULL,' ]] || false
[[ "$output" =~ '+ `z` int NOT NULL,' ]] || false
[[ "$output" =~ '+ `x` varchar(16383),' ]] || false
[[ "$output" =~ '+ `y` float,' ]] || false
[[ "$output" =~ '+ `z` int,' ]] || false
# assert no columns were deleted/replaced
[[ ! "$output" = "- \`" ]] || false
@@ -308,9 +308,9 @@ DELIM
run dolt diff --schema
[ "$status" -eq 0 ]
[[ "$output" =~ '- `a` varchar(16383) NOT NULL,' ]] || false
[[ "$output" =~ '- `b` float NOT NULL,' ]] || false
[[ "$output" =~ '- `c` tinyint NOT NULL,' ]] || false
[[ "$output" =~ '- `a` varchar(16383),' ]] || false
[[ "$output" =~ '- `b` float,' ]] || false
[[ "$output" =~ '- `c` tinyint,' ]] || false
# assert no columns were added
[[ ! "$output" = "+ \`" ]] || false
}

View File

@@ -39,10 +39,10 @@ teardown() {
@test "sql-charsets-collations: define charset and collation on a database" {
start_sql_server
server_query "" 1 dolt "" "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;"
dolt sql-client -u dolt --use-db '' -P $PORT -q "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;"
skip "Defining charsets and collations on a database not supported"
server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1"
server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci"
dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1"
dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci"
}
@test "sql-charsets-collations: define and use a collation and charset" {

View File

@@ -60,9 +60,15 @@ teardown() {
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
server_query test_db 1 root "" "select user from mysql.user order by user" "User\nroot"
server_query test_db 1 root "" "create user new_user" ""
server_query test_db 1 root "" "select user from mysql.user order by user" "User\nnew_user\nroot"
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
[ $status -eq 0 ]
[[ $output =~ "root" ]] || false
dolt sql-client -P $PORT -u root --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
[ $status -eq 0 ]
[[ $output =~ "root" ]] || false
[[ $output =~ "new_user" ]] || false
stop_sql_server
rm -f .dolt/sql-server.lock
@@ -73,7 +79,8 @@ teardown() {
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
server_query test_db 1 root "" "select user from mysql.user order by user" "" 1
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
[ $status -ne 0 ]
}
@test "sql-privs: starting server with empty config works" {
@@ -82,10 +89,16 @@ teardown() {
start_sql_server_with_config test_db server.yaml
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user"
[ $status -eq 0 ]
[[ $output =~ "dolt" ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user"
[ $status -eq 0 ]
[[ $output =~ "dolt" ]] || false
[[ $output =~ "new_user" ]] || false
run ls -a
[[ "$output" =~ ".doltcfg" ]] || false
@@ -111,8 +124,12 @@ behavior:
dolt sql-server --port=$PORT --config server.yaml --user cmddolt &
SERVER_PID=$!
sleep 5
server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt"
run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ "cmddolt" ]] || false
}
@test "sql-privs: yaml with user is also replaced with command line user" {
@@ -135,8 +152,11 @@ behavior:
dolt sql-server --port=$PORT --config server.yaml --user cmddolt &
SERVER_PID=$!
sleep 5
server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt"
run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ "cmddolt" ]] || false
}
@test "sql-privs: yaml specifies doltcfg dir" {
@@ -146,9 +166,16 @@ behavior:
start_sql_server_with_config test_db server.yaml
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
@@ -165,10 +192,17 @@ behavior:
start_sql_server_with_config test_db server.yaml
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
[[ "$output" =~ ".doltcfg" ]] || false
[[ "$output" =~ "privs.db" ]] || false
@@ -184,9 +218,18 @@ behavior:
start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nprivs_user"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ privs_user ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
[[ $output =~ privs_user ]] || false
# Test that privs.json file is not in json format
run cat privs.json
@@ -196,7 +239,12 @@ behavior:
rm -f ./.dolt/sql-server.lock
stop_sql_server
start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
[[ $output =~ privs_user ]] || false
}
@test "sql-privs: errors instead of panic when reading badly formatted privilege file" {
@@ -217,9 +265,16 @@ behavior:
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
[[ "$output" =~ ".doltcfg" ]] || false
@@ -232,7 +287,9 @@ behavior:
make_test_repo
start_sql_server_with_args --host 127.0.0.1 --user=dolt
server_query test_db 1 dolt "" "select user, host from mysql.user order by user" "User,Host\ndolt,%"
run dolt sql-client -P $PORT -u dolt --use-db test_db --result-format csv -q "select user, host from mysql.user order by user"
[ $status -eq 0 ]
[[ "$output" =~ "dolt,%" ]] || false
}
@test "sql-privs: multiple doltcfg directories causes error" {
@@ -267,10 +324,24 @@ behavior:
! [[ "$output" =~ ".doltcfg" ]] || false
! [[ "$output" =~ "privileges.db" ]] || false
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query db1 1 dolt "" "create user new_user" ""
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
[ $status -eq 0 ]
[[ $output =~ db1 ]] || false
[[ $output =~ db2 ]] || false
[[ $output =~ db3 ]] || false
[[ $output =~ information_schema ]] || false
[[ $output =~ mysql ]] || false
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
@@ -293,10 +364,17 @@ behavior:
! [[ "$output" =~ ".doltcfg" ]] || false
! [[ "$output" =~ "doltcfgdir" ]] || false
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
[[ "$output" =~ "doltcfgdir" ]] || false
@@ -314,9 +392,16 @@ behavior:
! [[ "$output" =~ ".doltcfg" ]] || false
! [[ "$output" =~ "privs.db" ]] || false
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
[[ "$output" =~ ".doltcfg" ]] || false
@@ -337,10 +422,24 @@ behavior:
! [[ "$output" =~ ".doltcfg" ]] || false
! [[ "$output" =~ "privileges.db" ]] || false
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query db1 1 dolt "" "create user new_user" ""
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
[ $status -eq 0 ]
[[ $output =~ db1 ]] || false
[[ $output =~ db2 ]] || false
[[ $output =~ db3 ]] || false
[[ $output =~ information_schema ]] || false
[[ $output =~ mysql ]] || false
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
@@ -368,10 +467,24 @@ behavior:
! [[ "$output" =~ ".doltcfg" ]] || false
! [[ "$output" =~ "privs.db" ]] || false
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query db1 1 dolt "" "create user new_user" ""
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
[ $status -eq 0 ]
[[ $output =~ db1 ]] || false
[[ $output =~ db2 ]] || false
[[ $output =~ db3 ]] || false
[[ $output =~ information_schema ]] || false
[[ $output =~ mysql ]] || false
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
@@ -395,9 +508,16 @@ behavior:
! [[ "$output" =~ "doltcfgdir" ]] || false
! [[ "$output" =~ "privs.db" ]] || false
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query test_db 1 dolt "" "create user new_user" ""
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
@@ -420,10 +540,24 @@ behavior:
! [[ "$output" =~ "privileges.db" ]] || false
! [[ "$output" =~ "privs.db" ]] || false
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
server_query db1 1 dolt "" "create user new_user" ""
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
[ $status -eq 0 ]
[[ $output =~ db1 ]] || false
[[ $output =~ db2 ]] || false
[[ $output =~ db3 ]] || false
[[ $output =~ information_schema ]] || false
[[ $output =~ mysql ]] || false
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
run ls -a
! [[ "$output" =~ ".doltcfg" ]] || false
@@ -447,7 +581,7 @@ behavior:
dolt init
start_sql_server_with_args --host 0.0.0.0 --user=dolt
server_query test_db 1 dolt "" "create user new_user" ""
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
stop_sql_server
sleep 1
run ls -a
@@ -457,65 +591,91 @@ behavior:
cd db_dir
start_sql_server_with_args --host 0.0.0.0 --user=dolt
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ new_user ]] || false
}
@test "sql-privs: basic lack of privileges tests" {
make_test_repo
start_sql_server
server_query test_db 1 dolt "" "create table t1(c1 int)"
server_query test_db 1 dolt "" "create user test"
server_query test_db 1 dolt "" "grant select on test_db.* to test"
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)"
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test"
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test"
# Should only see test_db database
server_query "" 1 test "" "show databases" "Database\ntest_db"
server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1"
run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases"
[ $status -eq 0 ]
[[ $output =~ test_db ]] || false
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "show tables"
[ $status -eq 0 ]
[[ $output =~ t1 ]] || false
# Revoke works as expected
server_query test_db 1 dolt "" "revoke select on test_db.* from test"
server_query test_db 1 test "" "show tables" "" 1
dolt sql-client -P $PORT -u dolt --use-db test_db -q "revoke select on test_db.* from test"
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
[ $status -ne 0 ]
# Host in privileges is respected
server_query test_db 1 dolt "" "drop user test"
server_query test_db 1 dolt "" "create user test@'127.0.0.1'"
server_query test_db 1 dolt "" "grant select on test_db.* to test@'127.0.0.1'"
server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1"
server_query test_db 1 dolt "" "drop user test@'127.0.0.1'"
server_query test_db 1 dolt "" "create user test@'10.10.10.10'"
server_query test_db 1 dolt "" "grant select on test_db.* to test@'10.10.10.10'"
server_query test_db 1 test "" "show tables" "" 1
dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test"
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'127.0.0.1'"
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'127.0.0.1'"
run dolt sql-client -P $PORT -u test -H 127.0.0.1 --use-db test_db -q "show tables"
[ $status -eq 0 ]
[[ $output =~ t1 ]] || false
dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test@'127.0.0.1'"
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'10.10.10.10'"
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'10.10.10.10'"
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
[ $status -ne 0 ]
}
@test "sql-privs: creating user identified by password" {
make_test_repo
start_sql_server
server_query test_db 1 dolt "" "create user test identified by 'test'" ""
server_query test_db 1 dolt "" "grant select on mysql.user to test" ""
dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test identified by 'test'"
dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on mysql.user to test"
# Should not be able to connect to test_db
server_query test_db 1 test test "select user from mysql.user order by user" "" 1
run dolt sql-client -P $PORT -u test -p test --use-db test_db -q "select user from mysql.user order by user"
[ $status -ne 0 ]
server_query "" 1 test test "select user from mysql.user order by user" "User\ndolt\ntest"
run dolt sql-client -P $PORT -u test -p test --use-db '' -q "select user from mysql.user"
[ $status -eq 0 ]
[[ $output =~ dolt ]] || false
[[ $output =~ test ]] || false
# Bad password can't connect
server_query "" 1 test bad "select user from mysql.user order by user" "" 1
run dolt sql-client -P $PORT -u test -p bad --use-db '' -q "select user from mysql.user order by user"
[ $status -ne 0 ]
# Should only see mysql database
server_query "" 1 test test "show databases" "Database\nmysql"
run dolt sql-client -P $PORT -u test -p test --use-db '' -q "show databases"
[ $status -eq 0 ]
[[ $output =~ mysql ]] || false
! [[ $output =~ test_db ]] || false
}
@test "sql-privs: deleting user prevents access by that user" {
make_test_repo
start_sql_server
server_query test_db 1 dolt "" "create user test"
server_query test_db 1 dolt "" "grant select on test_db.* to test" ""
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)"
dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test"
dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on test_db.* to test"
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
[ $status -eq 0 ]
echo $output
[[ $output =~ t1 ]] || false
server_query test_db 1 test "" "show tables" ""
dolt sql-client -P $PORT -u dolt --use-db '' -q "drop user test"
server_query test_db 1 dolt "" "drop user test"
server_query test_db 1 test "" "show tables" "" 1
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
[ $status -ne 0 ]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,27 +1,46 @@
-----BEGIN CERTIFICATE-----
MIIErDCCApQCCQCnSokQKR3M/zANBgkqhkiG9w0BAQUFADAYMRYwFAYDVQQKDA1E
b2x0SHViLCBJbmMuMB4XDTIyMDcyMTIwMDgzMloXDTI2MDcxOTIwMDgzMlowGDEW
MBQGA1UECgwNRG9sdEh1YiwgSW5jLjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC
AgoCggIBAMPmzHy0CmW5Xc27rbRYpJG/QKMXVAz+k2v+AkTQkUzBWKv0z8WhePB/
tDNVfVYuYQ2sBiHTaar9nn2Lokon+YkPjyMis2aMETHVuqx0DmJb9YcxniA8M27o
ZlfDrJtQO5UzIp9q2zhsFWj30Qdm6YUOhZ3rTnvYOMUYG/cIYLWXyQCg1oPqRVRr
GldzLP2GdigdrS6QQjA9AdK+Zi3dP2m2vssG4gJ+lkAWOHe7wvv2RJl/alsvWXmw
pur7Q9Z7M+tQmqGDxlyDtkDDecyqvEkxPH7mnKV1jahJjzUFHND1r44JlCN0eTmD
Q3+RldBNZCZSJWQ42yOIK+mTSp4QUvZL9wnJ1/lMb/v7atDlF/MSLeN6SDyAPod7
Oci8PR+nGhaOKacngrogM6SFQ1kF4tlY5Scrpg61IAcf6uxF3eSBP0qEaFvfLXZV
mc136E4g2G1haLt7y2prckCHLXEnxurXU4xlU/SH4cy4jB/zLZJs46tM7J9ZtCjg
QScZeNBA91kKAvHr36f/+suU3MNPAP2fmMCziH2uxh6SxTP8yzsUoV9PCTeaSnXX
rTMB077j0TOB2qsYhLF3XsLMz+B2Jo0b7ydT7c7rMS9yYvyKPA9JSE44nUrZWj3B
7ity1moIfrzwbH3AK3D5I9iUbBV0+JpuIZFPoqTIb15TUXJSusYHAgMBAAEwDQYJ
KoZIhvcNAQEFBQADggIBABGrQEUFJk5StmyFUGvaw/57H+K1ZT62rusFBq1NacMb
61dMh9xJyDMgLiUllQ8q5CS3bjYt2J2KajpU/58ugF/Ct9aoxA4vFDtfHECllYaH
zvoiK0Dkrf901xxNVeCbHDmXbvzJ0N/xTkP80kbT4o+aBOw6fxQVEBGAGg4EEz1D
k7v3/lEsZ2TkCPua1p9kXHaG8+wwE0hAWsaUYgXHTpzz0gUBJ69bOIlBpLKqO9It
HStkPD7wtYnN54pmOM68EAyXAxUC7yZ9PqncX0X04hH0VlmQGfdXFJDR89mSS6B4
P1qsi1XtnKC/hHuJlrY02uMXn7u1cVCf5uWfFm6Xs8rLL+q28gV6Tr2aXqgY0Cjl
tNtUEIP23/irWN48c5/rKOTiUIHJy2m6UofwMQO91jgKFxIyUmkgPQmos2LLNjtk
VFaPRigAaArwvombUmvfXJl6KoyH/je4H4+Gs+rRQURXU/PD1cioHgsOYNXSmYAj
AQJv/xp9QBmpzb1ExJOKeWjnUWGu0Wdv4TCTXJNvfdQqOVkT6k6ty1urgr9fNOxY
PDbHZTI6rXMtT57G108k2gAkaCE6O2R2Dm+vfW7auauqF3lNiZU9Y8IEGU2ybmE3
s2j+THPWmhuepbZKO5daQH0zlma31QgoyhGSoZ6QUWKEjufEvfx4HwGqMP6BEmaP
MIID/jCCAuagAwIBAgIUB7Qx4HU3Ezu1FCmp4EKMLGzQW5wwDQYJKoZIhvcNAQEL
BQAwWzELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xNDAyBgNV
BAMTK3Rlc3RkYXRhIEludGVybWVkaWF0ZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkw
HhcNMjIxMDI2MjA0MTAwWhcNMjQxMDE1MjA0MTAwWjBBMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjEaMBgGA1UEAxMRdGVzdGRhdGEgSW5zdGFu
Y2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC40mH/fY9PcLNkmDhD
TLW4jRYpZWaQx5GD2rSgodO7HcdEvECnvFH9AzktNnU2V/O18Ns+Q66DqACdBFie
wvi3HVD1lp16PeDDzd+U1gsv09aJkyMQ9rgsc9xER1YsW+9W0jVgCi+uYAgXKRol
kh5E1GPcgXC0PBHs4EhCXIvQ6VsHkswKLjwTWn3RSotkwGlxNQwbKX4BSFdoc5k/
QFjW0gG+OoISPJyN3zkU//fKP4/jncxw6jev9KNe7iR8D81Or2s5WhAfA6iv86a9
qDTWEwP01YmW7bodiv1iytJqrmqLq/Nan1B0HyU9szDE1Ulftf3pSfWJo7pBb1Vc
Rh+3AgMBAAGjgdMwgdAwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUF
BwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBSwrZlshrfvOW37
q6uxSNSYshC2tDAfBgNVHSMEGDAWgBSfaRPMObTFtIs5n3a3/gUuHw+7cjBRBgNV
HREESjBIghF0ZXN0ZGF0YS50ZXN0ZGF0YYYzc3BpZmZlOi8vbG9jYWwuYXdzZGV2
LmxkLWNvcnAuY29tL3NlcnZpY2VzL3Rlc3RkYXRhMA0GCSqGSIb3DQEBCwUAA4IB
AQAUWUnILP1AtiL9e4M0dWfPiVyXBDKhJI4DjF/phNF0X+ou+rjFUCJunf29A9YD
QzJOQaJY0Gw3Gy1zyx7QG1nkZAhNwqsrzHx5XP9b/p07/Oh7RXk27LbMJZ6JTdQ2
zR4V+oWDRJ4Fm81cgLaRlXg77xsg69pblubLGvPp3/YLYItoA9oTJdmSftFXDUUa
vz/PqfWriwiBU3BD8plERt7ljbsOUbo1LQEEd9zxYoPzBKDKj8NMIfmY9NK2QiOy
vAzyAvB7jU7EhcJsrq3G9KW0Fji0/rsLNb9h8U0ketwdXrCjEq9aEOfKDcHYwvPj
TSo+uj5MuTBHveAuwmcXy7hB
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDnTCCAoWgAwIBAgIUfoPtM9PmrcMNEV4V7XhM0NyrE2AwDQYJKoZIhvcNAQEL
BQAwTjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xJzAlBgNV
BAMTHnRlc3RkYXRhIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0yMjEwMjYyMDQw
MDBaFw0yNDEwMTUyMDQwMDBaMFsxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0
SHViLCBJbmMuMTQwMgYDVQQDEyt0ZXN0ZGF0YSBJbnRlcm1lZGlhdGUgQ2VydGlm
aWNhdGUgQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
wcKXpc7l19CyYhbkl6j9EfRP5o2VMoRDUHndxvYIciRhy44lmAscjI3ZnCGRV/TX
iP2x8pvvhltqD5h6Rb0pHG91PwdOb/vqLIfSZ91tCQbpSHIKwWvZmkefp7Xt7AQM
VPZwMJNq2o1S3m167CkXHzSlHBVq+ztAc9rvkgLSe85dDN54OFWUwwJY8QToLANp
ElIym6RIKAqwRASWe8bLG18lGEUnpYwseR0KWYcfL5R15QD3Lk8Xb93FSPakYmvI
7kMje0RwjHZv8GEmiFweFgEiJNtCtdsyoc3reSPHf/hfRSLDV4aqW/BtdYnXJHVn
RGwT/ZrIDinCSSWEQNiY+wIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0T
AQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUn2kTzDm0xbSLOZ92t/4FLh8Pu3IwHwYD
VR0jBBgwFoAUxMCLKgcsWqPtQd5U7ZtkVYR1vIIwDQYJKoZIhvcNAQELBQADggEB
AHwJIEc04BIkww0ljW8A1K9JoNVsnJyxL7cjeEB+A+S64bcG3QN8N1+qwvyOI3a4
WjhWNfV2oJi7PkJ0WPz+anTHugtwbekKqV45Y3W1X/OdPTKMPWBZ5mvkLecTlobl
jMh9kWg3F3n+d+KaWGlvdKDPSwaOhpmkgwPthAuztcAkpvJuz7/4jP5jrM2cqD4+
otDRKr+b73m2w7jqICXxdYXEuFQ9qCZ8VvlYCTF9qOuBlCeAwanRaPj5na+cME5m
0AIZyTeYCpB6eP5HLWCGvEP6lD5Hv8PMAzh8xgfFDyxZc3jAWFRB5xRidAVC/wtF
Nhs1l1AIQ5vUOZrOmsHaIHI=
-----END CERTIFICATE-----

View File

@@ -1,52 +1,27 @@
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDD5sx8tApluV3N
u620WKSRv0CjF1QM/pNr/gJE0JFMwVir9M/FoXjwf7QzVX1WLmENrAYh02mq/Z59
i6JKJ/mJD48jIrNmjBEx1bqsdA5iW/WHMZ4gPDNu6GZXw6ybUDuVMyKfats4bBVo
99EHZumFDoWd60572DjFGBv3CGC1l8kAoNaD6kVUaxpXcyz9hnYoHa0ukEIwPQHS
vmYt3T9ptr7LBuICfpZAFjh3u8L79kSZf2pbL1l5sKbq+0PWezPrUJqhg8Zcg7ZA
w3nMqrxJMTx+5pyldY2oSY81BRzQ9a+OCZQjdHk5g0N/kZXQTWQmUiVkONsjiCvp
k0qeEFL2S/cJydf5TG/7+2rQ5RfzEi3jekg8gD6HeznIvD0fpxoWjimnJ4K6IDOk
hUNZBeLZWOUnK6YOtSAHH+rsRd3kgT9KhGhb3y12VZnNd+hOINhtYWi7e8tqa3JA
hy1xJ8bq11OMZVP0h+HMuIwf8y2SbOOrTOyfWbQo4EEnGXjQQPdZCgLx69+n//rL
lNzDTwD9n5jAs4h9rsYeksUz/Ms7FKFfTwk3mkp1160zAdO+49EzgdqrGISxd17C
zM/gdiaNG+8nU+3O6zEvcmL8ijwPSUhOOJ1K2Vo9we4rctZqCH688Gx9wCtw+SPY
lGwVdPiabiGRT6KkyG9eU1FyUrrGBwIDAQABAoICABUIJlQNEECzkfqQd6mxCpoL
KmlYC9IJUtJ5Rs0Uh0TyTQ7JDbVuDInla/dG6lniSNEq8s2W4PVWnTllUFsdx5CL
dxaSlygfSYlMJOp220R8EvQcw5k6XVs+4B30CAf0qTDveHwdAMQh9np6gJqG1fNP
B9FYfeiV4iJm4Dm5UIiubwn+OomXETJq/Tz+RIpDcVQFO56QJkr/gb6aamXqJvC2
ie1KI+GYrZDb0dwo8FoUqnDAWS7I+pYx/PmlWDciqwRMdw14FEfCbEKvudfbTLOe
8Zu+LnslD7xNiW5ryhg1CE/7f0f/LTSbfxenDap7ZJEoqJMF96Ds8an2AkDOB9nx
XB5kVz5jMsaZ1f68Rx8S4EqEEcXxYwiRe5WoDEnnVr2+Q6QzOqh/4DaA5VuId462
IjPDWmYszSqig9QXjS11SkTMKCKxas4AqfCb8uUlcXdri4aSv0Khb7DgbO2su1KC
+hcXpiAMH9jVX1d4N8c0Q0HLOT09lRnD2mmEX6Lo2kWgb5Hpzo88Ty9WI7oiszsY
J1r6qPkXIc9Ft1YwpdVBhkBbxB024l9IG8I1UzjrLFnR/A5sRefzosNi4/ZACPW4
Kykhy7p+ZV9Kf8cjMbY11afCmi9jlXsVqWwJIMk+LxTCjF/lmbMay/G7j+ibGtSQ
hU+LNPzAOUEwBj1OqoMhAoIBAQDlo3Ecgeu5zxNILnkut6RHHDJUK2N9+5HIDwi4
frMlkM3b8NLz09/GtmX4HTKkDBur4x9QeEIsxG19tk2QWZQ4EAKs8OcEXaCL4Q9g
msZbQC5rrFjRzUC4roxCTEz4g/ANEM+huLq/3a6afUhkmUuGZzK6rf6E36dTx3na
DP4tDAx1s/DqfMtXYYmzrb3V1Nk9NUwQFRselJ8EHeIA7NEcLcv5yREia57RcYm/
EfuA90j1ER6iHZIxopPfo1Cx7I9N4eoQM4/Tjb5qu+krfGOFOQbL6hCPHeHkZlAw
0/2ECxCHS2y+Uih3MkMdnme2tfBr8AQpcfAOxSTMXu1wGDs9AoIBAQDaY+fVJ2G/
/myI3Nly7MZaJ8NT8kcQx55b1s5vqWU+IQo5YC4KGdUp32U5Uxr2war8SuA2pKW0
Cv42IJYlGQQUgpj2k+DJcDz+Qz9nqE5Ft8vNmyA3Y2gbwgTkd9dtFCTph4BNiAad
qyjXwdJ6qwB1dbORsprC/Ue8WcEVwWwvF3PGnvbEiM8qLyxv/WIXnN5B/XcvUFHS
mS3IVkJpdR8Kzp0Ctro5mHd2L6SQa/XM5tU3bye9Hzf1J3rWM/FGzVtYInC//CoO
w/sA/ebfhK1iHjYYp4MjyETBkbD1kpCl6eNdTKN9ydSkUzhWlHn3xKQQrdZ7KiiH
YbIhh1rwB+qTAoIBAFIoOnSfis2MZ3Kgpdxv+UczsFHqwAq3sX1o247eTYu4Fd6F
d4OinuICKdMt5wtIBbJmbLKmg85ubFnYmkF1uxCfscVb3tryAFlrKMxAM408Fh+R
pqlRDMHGOQoTMEqNMZoLFK3gYHf6gNhm0DqlmZ65Vy3wyCmTttLDgDXiBiHpuJ93
xE6wXTOjAtgU5eEV6K78XX03f99d/tJDOrNoBpxVSi/Qnt+4rzZxr317moaWcjSz
bklD2SUG7G7LiDhP0SllFQ+80s02XhTjq9VSCG0GbQcRc+EwKLxFWpVNktrl9oDh
HEOvMykKA3caUDLPPvfvBB4r1F4EbFjt8Xb0RGUCggEAO0PrcRvr2gd4ere8RwTc
WzD5P/m6sWIKpo+nnAPTVsXumV1xgQo7n85hEOptodMyzJ6hNBMAaNim3hd/x3d/
dPVv/1JoKSJNWw7y0PWKsD7NjvFvD7jpUscXPs0K6C4USk+cUO3+JaGCRvLxZJqt
WDLl1T8r4oiLhCCzVm0UJ79sitUu0Gz0E1WT8JxJl3DZm/zl8DAS1Fz/YKOQCEBh
eTRSxZ7C8MhgevE47nxtyvpFmHKQzTEApYXePuz/qCAojsVh5afP3gvvPPiqQ7Qk
vUDHm28yFm7Nwd4AsNPibzQGoJYgtA0mqKVw34YRh1yUzXXvg6MQNpUbmx+5XPQ5
AwKCAQEA5Iye1s7RVxZP5iJ3vgy67OU+zza5yEIoJEabhF4bOBDsCwNcx2EwsQll
X/Su5qqiIVnrRmkdYVhTnZv8bigq/8Hu+BBenMLxkAwZ5ep6gKq9wdiPQArjNBlS
5KkGuj+7LNCsmmldXVXjjg2BNWBDdVv33hhhqsi/Tzau+qAufdNGdBTS4ZTWEH0z
X5EBtOphJbBPeMUrm1PFOXKUDDwPfqX86rg1NHr1l5iB7uqShZak1s1ovoyFO6s7
I9d8chi4/qwwYk8cHczB4C9EwBvWEvcAf1xa6I1Mp8y3tDhWPVIpq5P8i9vQFYIJ
LWLCd/YowgxkNl5j6a5QMFoZvjLi5A==
-----END PRIVATE KEY-----
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAuNJh/32PT3CzZJg4Q0y1uI0WKWVmkMeRg9q0oKHTux3HRLxA
p7xR/QM5LTZ1NlfztfDbPkOug6gAnQRYnsL4tx1Q9Zadej3gw83flNYLL9PWiZMj
EPa4LHPcREdWLFvvVtI1YAovrmAIFykaJZIeRNRj3IFwtDwR7OBIQlyL0OlbB5LM
Ci48E1p90UqLZMBpcTUMGyl+AUhXaHOZP0BY1tIBvjqCEjycjd85FP/3yj+P453M
cOo3r/SjXu4kfA/NTq9rOVoQHwOor/Omvag01hMD9NWJlu26HYr9YsrSaq5qi6vz
Wp9QdB8lPbMwxNVJX7X96Un1iaO6QW9VXEYftwIDAQABAoIBAQC4yEaIPQ2yG/iP
g40E5EXvDMfyfVntOEopLNlYnFLnCl+3PgvaZ/ME5lsc9Ax+V7Lm3bclal+pa6ep
VLYRjNdDpMDTuVEa7ZCx0zxNPy8SE1a0V3JAvJrofrHjZfsnAIerIyGQMr73NTYB
ieuFUrCGml55EMUQvdoiHR7BkmuLYn+3TNJ4Lr2WsNGmChG/W+IwkdmW4RFLZo67
qHjb7yAYEXFBppgm1YpaHwEnCmOsErmBlAwFZwvPLRezjumics+2yrHt/Tm7uicR
GQI2ROCp2rst2UOiPtYd+vWCabYB2TtMq9/CLAgQsIkC/iABWDsB/Yasu/xfo1k+
C0GhlfgJAoGBAMpo1qqjoSDDL+tJrVFg/dEpU7CWodXJ/6vm3Qb1WB7Nm93j5tsf
7v390uQGsPSl/KD9MqTnDWs4xVgvO6eu/LCB9749ctbBkHvpN1eLjlApWG+eLGHf
gfqHMiokQ228J0CUhgvrfb0SxIsRnmHqxfbHo8oHBIW+WnlNwMOG0WvTAoGBAOnB
dlsWedSQJGngsQg7zc9NOJbJz4SxAv/Vp6KjVMiFQOcTJa/PUyaoTDUGoxSysMTl
+5RF70gxPtcZJrqC2OWuULkI1Lm0A69SHU/P2tCJ+Wt/AcK7yH/vTEQd44Pwjkct
uoCM3Euf/S66GOIPkM3RG5CxuU+SBp+wGaOYhAINAoGAQ+WnHNaG1lajXGn6mbHP
crpKOJJO90grW561xf/G745JGsW4SwkLQmhCtfsIoQiNFfPZaTeYaL9Cc7JkcHti
iFMQp+A1BZUowmgZCGTn+DvmTorgmHRBRajUSw6fD9Bt2lv4G0eDhkklZQEj//Sh
M4cEimCQQ8z2zHooj25KEcECgYAV9792ufsDFfTGGn6opm4mEDzENv0QnE4K2vph
F3ZtTdCWpr8A8bv/wws+ZHxJAq4IIxDsk1H0d+RO9KcmGgvmMeaWLRVIynkaLd5h
VMhclsrg5lO1CE7Ebym8sQ5jpOTKHasMT7CYTtXNYWHbRNk37nHnvDwNFU0YDsWq
ETg+tQKBgBSeRiTSx9Up58kNqRj+Z+EAKY3FoIgN6b83pL71gCXLdP0SQmZ9QOiV
5RGF0cbE1n3dx+HMKzNKnKhj783hnLosttq5OlO2cxqLTGLjv5aLPDji3HtK1HaA
IBIg58byFOMoKp95W0QSXVIAymXyvKRyj7dT1kDOidjQo354Y+5R
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,21 @@
-----BEGIN CERTIFICATE-----
MIIDbDCCAlSgAwIBAgIUSixGHtBJBKsVdj56puZy3LHvtkgwDQYJKoZIhvcNAQEL
BQAwTjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xJzAlBgNV
BAMTHnRlc3RkYXRhIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0yMjEwMjUyMjU1
MDBaFw0yNzEwMjQyMjU1MDBaME4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0
SHViLCBJbmMuMScwJQYDVQQDEx50ZXN0ZGF0YSBDZXJ0aWZpY2F0ZSBBdXRob3Jp
dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDArzGWFksWPijYVrsq
Hj1YakHQiUcK7KLsXZHm0Tx3ryUvJZAX86UM+/QXO/TVYoWPoIXVaFFCDMlwKzXU
FgEHJHQU7NaKcDUN8xaM36Y79VouHJzkUl6UvMGZrQXqdsPsQ56t/GcJCbbBLgki
9uQPGOB2KhLTkPV4L6CmubIOakCmNI99Ivoo2YGc3m5RYSv5f8/RnyDYBKWAenwI
omWPFs9te5AriIaXXq4nQhJQ40TCK/P9AZTlJOO5jaZ4Gnt/XWSHoSwxd15bkEos
19wdqK4oHHnO8luIA4lL7PxyOB5Wz/P+n9epY7aM/AHy7gVoekLhFk1CVuzA30Uv
ZEfbAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBTEwIsqByxao+1B3lTtm2RVhHW8gjANBgkqhkiG9w0BAQsFAAOCAQEA
kSXCYNgsLdyWVru8rhshoW7sZjKCttgmUzacv79JPpYuZOTS/YthlYAh6NHqnCuW
cMRNxjr88LQu/U8MwJ+8qeHZBm2k6RMvvm8/w8WfyP1E/2PgFtF9nqlehj1o3BaR
UohLNl7YfJORiW8L/z0FAsz24+xsCtvQnvaGxFZcYHKYyg9xS4dbspN5fg2daxP7
jzzI0xcmpOFJfDpRywhx9iI8J+tJLtVJLZutuah9cK2Y5PGqsTikly//WAv99Rw8
naMb8DOC0p1RiXakbF7LMSyIbaLdmItlx3Ea/b/Ul/kFw/cueHTCyGZ4swvkq6pR
1lEUD4MQPt0u9IterfN4yg==
-----END CERTIFICATE-----

View File

@@ -0,0 +1,21 @@
-----BEGIN CERTIFICATE-----
MIIDbDCCAlSgAwIBAgIUdWEanf/1+cmS33nZDPY+gkQwS+gwDQYJKoZIhvcNAQEL
BQAwTjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xJzAlBgNV
BAMTHnRlc3RkYXRhIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0yMjEwMjYyMDM1
MDBaFw0yNzEwMjUyMDM1MDBaME4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0
SHViLCBJbmMuMScwJQYDVQQDEx50ZXN0ZGF0YSBDZXJ0aWZpY2F0ZSBBdXRob3Jp
dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLAQ88jtxKIH0Uc0Yp
oUmM0Bx3/fBqgbYAGJ1cxtkXahhGp94ICe0gmASnbPuAY22X0zf55C94semPNNgb
xV/FHftvyi720z3wwOk8twa8I4vjb1mnxlPZzS2Xd1pb4KnUtjOemGfZOn6OWbXF
ukf5uNDKUZcFPPjaiAnQ+kK6vjYWZjY6Hn4KVAjBRylQj86hzgF0cc7B4WOX3L6L
ahY56urFElKnFh8vCydSfyZqtz56ng3Gc83PBIEkTTgQVwFJkx+Azh73NaTGwXcv
3Wj4D+TzF2T0JsHe6s1CWyoHxvccwoUdAv8HGzzHVcm+81KMdy9r9e7R3kyu9HSK
D3sBAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBRzOWBY5hQAM5obC3y+nbHKnvQtmzANBgkqhkiG9w0BAQsFAAOCAQEA
yKsw7CLYQQ2i9jzislIUF0pMW03rLTBPSyv78mhUrfaL2TncdJAPTMdR5KaFTKSy
2AzuYkIN9gU0blk73sxbtdNyZlpP0MQHRuRkgpuXii0tWQ0f6uhLaZRJvLm4Hjsj
Sma8ydO3/7FvdTby6Uv1Rivd53BGfVAcw8W1oC+8KfrDhUsWzqcDH6Aiszz0utKr
XAqiOdNUSy2riyxc3s9RH2j20BNj6vWkz8ZoRdBa2pf/oRtYF2ZJjCZq7eH5hlSj
/Am5Yw9Cc0/48Tm58e4V2SDHys9ld8EBKOMlo8djk3q0LxGtZ41O1hr4iaHTkWyl
2wYWEa395xncUBUqvCpKyA==
-----END CERTIFICATE-----

View File

@@ -924,3 +924,177 @@ tests:
result:
columns: ["count(*)"]
rows: [["15"]]
- name: tls, bad root, failover to standby fails
multi_repos:
- name: server1
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: https://localhost:50052/{database}
bootstrap_role: primary
bootstrap_epoch: 1
remotesapi:
port: 50051
tls_key: key.pem
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/chain_key.pem
- name: cert.pem
source_path: testdata/chain_cert.pem
- name: root.pem
source_path: testdata/invalid_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
- name: server2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3310
cluster:
standby_remotes:
- name: standby
remote_url_template: https://localhost:50051/{database}
bootstrap_role: standby
bootstrap_epoch: 1
remotesapi:
port: 50052
tls_key: key.pem
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/chain_key.pem
- name: cert.pem
source_path: testdata/chain_cert.pem
- name: root.pem
source_path: testdata/invalid_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310
connections:
- on: server1
queries:
- exec: 'create database repo1'
- exec: "use repo1"
- query: "call dolt_assume_cluster_role('standby', '11')"
error_match: failed to transition from primary to standby gracefully
- exec: "create table vals (i int primary key)"
- exec: "insert into vals values (0)"
- name: tls, good root, create new database, primary replicates to standby, fails over, new primary replicates to standby, fails over, new primary has all writes
multi_repos:
- name: server1
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: https://localhost:50052/{database}
bootstrap_role: primary
bootstrap_epoch: 1
remotesapi:
port: 50051
tls_key: key.pem
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/chain_key.pem
- name: cert.pem
source_path: testdata/chain_cert.pem
- name: root.pem
source_path: testdata/chain_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
- name: server2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3310
cluster:
standby_remotes:
- name: standby
remote_url_template: https://localhost:50051/{database}
bootstrap_role: standby
bootstrap_epoch: 1
remotesapi:
port: 50052
tls_key: key.pem
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/chain_key.pem
- name: cert.pem
source_path: testdata/chain_cert.pem
- name: root.pem
source_path: testdata/chain_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310
connections:
- on: server1
queries:
- exec: 'create database repo1'
- exec: 'use repo1'
- exec: 'create table vals (i int primary key)'
- exec: 'insert into vals values (0),(1),(2),(3),(4)'
- query: "call dolt_assume_cluster_role('standby', 2)"
result:
columns: ["status"]
rows: [["0"]]
- on: server2
queries:
- exec: 'use repo1'
- query: "select count(*) from vals"
result:
columns: ["count(*)"]
rows: [["5"]]
- query: "call dolt_assume_cluster_role('primary', 2)"
result:
columns: ["status"]
rows: [["0"]]
- on: server2
queries:
- exec: 'use repo1'
- exec: 'insert into vals values (5),(6),(7),(8),(9)'
- query: "call dolt_assume_cluster_role('standby', 3)"
result:
columns: ["status"]
rows: [["0"]]
- on: server1
queries:
- exec: 'use repo1'
- query: "select count(*) from vals"
result:
columns: ["count(*)"]
rows: [["10"]]
- query: "call dolt_assume_cluster_role('primary', 3)"
result:
columns: ["status"]
rows: [["0"]]
- on: server1
queries:
- exec: 'use repo1'
- exec: 'insert into vals values (10),(11),(12),(13),(14)'
- query: "select count(*) from vals"
result:
columns: ["count(*)"]
rows: [["15"]]