mirror of
https://github.com/dolthub/dolt.git
synced 2026-01-21 10:26:10 -06:00
Merge branch 'main' into fulghum/orm-testing
This commit is contained in:
@@ -82,12 +82,10 @@ func (cmdDoc CommandDocumentation) CmdDocToMd() (string, error) {
|
||||
}
|
||||
templ, templErr := template.New("shortDesc").Parse(cmdMdDocTempl)
|
||||
if templErr != nil {
|
||||
|
||||
return "", templErr
|
||||
}
|
||||
var templBuffer bytes.Buffer
|
||||
if err := templ.Execute(&templBuffer, cmdMdDoc); err != nil {
|
||||
|
||||
return "", err
|
||||
}
|
||||
ret := strings.Replace(templBuffer.String(), "HEAD~", "HEAD\\~", -1)
|
||||
|
||||
@@ -136,14 +136,14 @@ func loadCred(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (creds.DoltCred
|
||||
}
|
||||
|
||||
func checkCredAndPrintSuccess(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds, endpoint string) errhand.VerboseError {
|
||||
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
Endpoint: endpoint,
|
||||
Creds: dc,
|
||||
})
|
||||
if err != nil {
|
||||
return errhand.BuildDError("error: unable to build server endpoint options.").AddCause(err).Build()
|
||||
}
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
|
||||
if err != nil {
|
||||
return errhand.BuildDError("error: unable to connect to server with credentials.").AddCause(err).Build()
|
||||
}
|
||||
|
||||
@@ -161,14 +161,14 @@ func updateProfileWithCredentials(ctx context.Context, dEnv *env.DoltEnv, c cred
|
||||
host := dEnv.Config.GetStringOrDefault(env.RemotesApiHostKey, env.DefaultRemotesApiHost)
|
||||
port := dEnv.Config.GetStringOrDefault(env.RemotesApiHostPortKey, env.DefaultRemotesApiPort)
|
||||
hostAndPort := fmt.Sprintf("%s:%s", host, port)
|
||||
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
Endpoint: hostAndPort,
|
||||
Creds: c,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error: unable to build dial options server with credentials: %w", err)
|
||||
}
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error: unable to connect to server with credentials: %w", err)
|
||||
}
|
||||
|
||||
@@ -61,6 +61,7 @@ const (
|
||||
SQLFlag = "sql"
|
||||
CachedFlag = "cached"
|
||||
SkinnyFlag = "skinny"
|
||||
MergeBase = "merge-base"
|
||||
)
|
||||
|
||||
var diffDocs = cli.CommandDocumentationContent{
|
||||
@@ -71,11 +72,17 @@ Show changes between the working and staged tables, changes between the working
|
||||
{{.EmphasisLeft}}dolt diff [--options] [<tables>...]{{.EmphasisRight}}
|
||||
This form is to view the changes you made relative to the staging area for the next commit. In other words, the differences are what you could tell Dolt to further add but you still haven't. You can stage these changes by using dolt add.
|
||||
|
||||
{{.EmphasisLeft}}dolt diff [--options] <commit> [<tables>...]{{.EmphasisRight}}
|
||||
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch.
|
||||
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> [<tables>...]{{.EmphasisRight}}
|
||||
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, instead of using {{.LessThan}}commit{{.GreaterThan}}, use the merge base of {{.LessThan}}commit{{.GreaterThan}} and HEAD. {{.EmphasisLeft}}dolt diff --merge-base A{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A HEAD){{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...HEAD{{.EmphasisRight}}.
|
||||
|
||||
{{.EmphasisLeft}}dolt diff [--options] <commit> <commit> [<tables>...]{{.EmphasisRight}}
|
||||
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}.
|
||||
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> <commit> [<tables>...]{{.EmphasisRight}}
|
||||
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, use the merge base of the two commits for the "before" side. {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}}.
|
||||
|
||||
{{.EmphasisLeft}}dolt diff [--options] <commit>..<commit> [<tables>...]{{.EmphasisRight}}
|
||||
This is synonymous to the above form (without the ..) to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}.
|
||||
|
||||
{{.EmphasisLeft}}dolt diff [--options] <commit>...<commit> [<tables>...]{{.EmphasisRight}}
|
||||
This is to view the changes on the branch containing and up to the second {{.LessThan}}commit{{.GreaterThan}}, starting at a common ancestor of both {{.LessThan}}commit{{.GreaterThan}}. {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}}. You can omit any one of {{.LessThan}}commit{{.GreaterThan}}, which has the same effect as using HEAD instead.
|
||||
|
||||
The diffs displayed can be limited to show the first N by providing the parameter {{.EmphasisLeft}}--limit N{{.EmphasisRight}} where {{.EmphasisLeft}}N{{.EmphasisRight}} is the number of diffs to display.
|
||||
|
||||
@@ -132,6 +139,7 @@ func (cmd DiffCmd) ArgParser() *argparser.ArgParser {
|
||||
ap.SupportsInt(limitParam, "", "record_count", "limits to the first N diffs.")
|
||||
ap.SupportsFlag(CachedFlag, "c", "Show only the unstaged data changes.")
|
||||
ap.SupportsFlag(SkinnyFlag, "sk", "Shows only primary key columns and any columns with data changes.")
|
||||
ap.SupportsFlag(MergeBase, "", "Uses merge base of the first commit and second commit (or HEAD if not supplied) as the first commit")
|
||||
return ap
|
||||
}
|
||||
|
||||
@@ -202,7 +210,7 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
|
||||
dArgs.limit, _ = apr.GetInt(limitParam)
|
||||
dArgs.where = apr.GetValueOrDefault(whereParam, "")
|
||||
|
||||
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(CachedFlag))
|
||||
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(CachedFlag), apr.Contains(MergeBase))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -243,7 +251,7 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
|
||||
|
||||
// applyDiffRoots applies the appropriate |from| and |to| root values to the receiver and returns the table names
|
||||
// (if any) given to the command.
|
||||
func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, args []string, isCached bool) ([]string, error) {
|
||||
func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, args []string, isCached, useMergeBase bool) ([]string, error) {
|
||||
headRoot, err := dEnv.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -271,15 +279,33 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
if useMergeBase {
|
||||
return nil, fmt.Errorf("Must supply at least one revision when using --merge-base flag")
|
||||
}
|
||||
// `dolt diff`
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if strings.Contains(args[0], "..") {
|
||||
if useMergeBase {
|
||||
return nil, fmt.Errorf("Cannot use `..` or `...` with --merge-base flag")
|
||||
}
|
||||
err = dArgs.applyDotRevisions(ctx, dEnv, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return args[1:], err
|
||||
}
|
||||
|
||||
// treat the first arg as a ref spec
|
||||
fromRoot, ok := maybeResolve(ctx, dEnv, args[0])
|
||||
|
||||
// if it doesn't resolve, treat it as a table name
|
||||
if !ok {
|
||||
// `dolt diff table`
|
||||
if useMergeBase {
|
||||
return nil, fmt.Errorf("Must supply at least one revision when using --merge-base flag")
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
@@ -288,23 +314,123 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
|
||||
|
||||
if len(args) == 1 {
|
||||
// `dolt diff from_commit`
|
||||
if useMergeBase {
|
||||
err := dArgs.applyMergeBase(ctx, dEnv, args[0], "HEAD")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
toRoot, ok := maybeResolve(ctx, dEnv, args[1])
|
||||
|
||||
if !ok {
|
||||
// `dolt diff from_commit ...tables`
|
||||
// `dolt diff from_commit [...tables]`
|
||||
if useMergeBase {
|
||||
err := dArgs.applyMergeBase(ctx, dEnv, args[0], "HEAD")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return args[1:], nil
|
||||
}
|
||||
|
||||
dArgs.toRoot = toRoot
|
||||
dArgs.toRef = args[1]
|
||||
|
||||
// `dolt diff from_commit to_commit ...tables`
|
||||
if useMergeBase {
|
||||
err := dArgs.applyMergeBase(ctx, dEnv, args[0], args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// `dolt diff from_commit to_commit [...tables]`
|
||||
return args[2:], nil
|
||||
}
|
||||
|
||||
// applyMergeBase applies the merge base of two revisions to the |from| root
|
||||
// values.
|
||||
func (dArgs *diffArgs) applyMergeBase(ctx context.Context, dEnv *env.DoltEnv, leftStr, rightStr string) error {
|
||||
mergeBaseStr, err := getMergeBaseFromStrings(ctx, dEnv, leftStr, rightStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromRoot, ok := maybeResolve(ctx, dEnv, mergeBaseStr)
|
||||
if !ok {
|
||||
return fmt.Errorf("merge base invalid %s", mergeBaseStr)
|
||||
}
|
||||
|
||||
dArgs.fromRoot = fromRoot
|
||||
dArgs.fromRef = mergeBaseStr
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyDotRevisions applies the appropriate |from| and |to| root values to the
|
||||
// receiver for arguments containing `..` or `...`
|
||||
func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv, args []string) error {
|
||||
// `dolt diff from_commit...to_commit [...tables]`
|
||||
if strings.Contains(args[0], "...") {
|
||||
refs := strings.Split(args[0], "...")
|
||||
var toRoot *doltdb.RootValue
|
||||
ok := true
|
||||
|
||||
if len(refs[0]) > 0 {
|
||||
right := refs[1]
|
||||
// Use current HEAD if right side of `...` does not exist
|
||||
if len(refs[1]) == 0 {
|
||||
right = "HEAD"
|
||||
}
|
||||
|
||||
err := dArgs.applyMergeBase(ctx, dEnv, refs[0], right)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(refs[1]) > 0 {
|
||||
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
|
||||
return fmt.Errorf("to ref in three dot diff must be valid ref: %s", refs[1])
|
||||
}
|
||||
dArgs.toRoot = toRoot
|
||||
dArgs.toRef = refs[1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// `dolt diff from_commit..to_commit [...tables]`
|
||||
if strings.Contains(args[0], "..") {
|
||||
refs := strings.Split(args[0], "..")
|
||||
var fromRoot *doltdb.RootValue
|
||||
var toRoot *doltdb.RootValue
|
||||
ok := true
|
||||
|
||||
if len(refs[0]) > 0 {
|
||||
if fromRoot, ok = maybeResolve(ctx, dEnv, refs[0]); !ok {
|
||||
return fmt.Errorf("from ref in two dot diff must be valid ref: %s", refs[0])
|
||||
}
|
||||
dArgs.fromRoot = fromRoot
|
||||
dArgs.fromRef = refs[0]
|
||||
}
|
||||
|
||||
if len(refs[1]) > 0 {
|
||||
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
|
||||
return fmt.Errorf("to ref in two dot diff must be valid ref: %s", refs[1])
|
||||
}
|
||||
dArgs.toRoot = toRoot
|
||||
dArgs.toRef = refs[1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// todo: distinguish between non-existent CommitSpec and other errors, don't assume non-existent
|
||||
func maybeResolve(ctx context.Context, dEnv *env.DoltEnv, spec string) (*doltdb.RootValue, bool) {
|
||||
cs, err := doltdb.NewCommitSpec(spec)
|
||||
|
||||
@@ -21,8 +21,6 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/cli"
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
@@ -52,10 +50,6 @@ func (cmd *DumpDocsCmd) Description() string {
|
||||
return "dumps all documentation in md format to a directory"
|
||||
}
|
||||
|
||||
func (cmd *DumpDocsCmd) GatedForNBF(nbf *types.NomsBinFormat) bool {
|
||||
return types.IsFormat_DOLT(nbf)
|
||||
}
|
||||
|
||||
// Hidden should return true if this command should be hidden from the help text
|
||||
func (cmd *DumpDocsCmd) Hidden() bool {
|
||||
return true
|
||||
@@ -105,19 +99,17 @@ func (cmd *DumpDocsCmd) Exec(ctx context.Context, commandStr string, args []stri
|
||||
return 1
|
||||
}
|
||||
|
||||
err = cmd.dumpDocs(wr, cmd.DoltCommand.Name(), cmd.DoltCommand.Subcommands)
|
||||
verr := cmd.dumpDocs(wr, cmd.DoltCommand.Name(), cmd.DoltCommand.Subcommands)
|
||||
|
||||
if err != nil {
|
||||
verr := errhand.BuildDError("error: Failed to dump docs.").AddCause(err).Build()
|
||||
if verr != nil {
|
||||
cli.PrintErrln(verr.Verbose())
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.Command) error {
|
||||
func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.Command) errhand.VerboseError {
|
||||
sort.Slice(subCommands, func(i, j int) bool {
|
||||
return subCommands[i].Name() < subCommands[j].Name()
|
||||
})
|
||||
@@ -130,10 +122,9 @@ func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.
|
||||
|
||||
if !hidden {
|
||||
if subCmdHandler, ok := curr.(cli.SubCommandHandler); ok {
|
||||
err := cmd.dumpDocs(wr, cmdStr+" "+subCmdHandler.Name(), subCmdHandler.Subcommands)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
verr := cmd.dumpDocs(wr, cmdStr+" "+subCmdHandler.Name(), subCmdHandler.Subcommands)
|
||||
if verr != nil {
|
||||
return verr
|
||||
}
|
||||
} else {
|
||||
docs := curr.Docs()
|
||||
@@ -142,7 +133,7 @@ func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.
|
||||
docs.CommandStr = fmt.Sprintf("%s %s", cmdStr, curr.Name())
|
||||
err := CreateMarkdown(wr, docs)
|
||||
if err != nil {
|
||||
return err
|
||||
return errhand.BuildDError(fmt.Sprintf("error: Failed to create markdown for command: %s %s.", cmdStr, curr.Name())).AddCause(err).Build()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,7 +238,7 @@ func openBrowserForCredsAdd(dc creds.DoltCreds, loginUrl string) {
|
||||
}
|
||||
|
||||
func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds, authEndpoint string, insecure bool) (remotesapi.CredentialsServiceClient, errhand.VerboseError) {
|
||||
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
Endpoint: authEndpoint,
|
||||
Creds: dc,
|
||||
Insecure: insecure,
|
||||
@@ -246,7 +246,7 @@ func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds, authEndpoint st
|
||||
if err != nil {
|
||||
return nil, errhand.BuildDError("error: unable to build dial options for connecting to server with credentials.").AddCause(err).Build()
|
||||
}
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
|
||||
if err != nil {
|
||||
return nil, errhand.BuildDError("error: unable to connect to server with credentials.").AddCause(err).Build()
|
||||
}
|
||||
|
||||
@@ -80,24 +80,35 @@ func (cmd MergeBaseCmd) Exec(ctx context.Context, commandStr string, args []stri
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
|
||||
}
|
||||
|
||||
left, verr := ResolveCommitWithVErr(dEnv, apr.Arg(0))
|
||||
mergeBaseStr, verr := getMergeBaseFromStrings(ctx, dEnv, apr.Arg(0), apr.Arg(1))
|
||||
if verr != nil {
|
||||
return HandleVErrAndExitCode(verr, usage)
|
||||
}
|
||||
|
||||
right, verr := ResolveCommitWithVErr(dEnv, apr.Arg(1))
|
||||
cli.Println(mergeBaseStr)
|
||||
return 0
|
||||
}
|
||||
|
||||
// getMergeBaseFromStrings resolves two revisions and returns the merge base
|
||||
// commit hash string
|
||||
func getMergeBaseFromStrings(ctx context.Context, dEnv *env.DoltEnv, leftStr, rightStr string) (string, errhand.VerboseError) {
|
||||
left, verr := ResolveCommitWithVErr(dEnv, leftStr)
|
||||
if verr != nil {
|
||||
return HandleVErrAndExitCode(verr, usage)
|
||||
return "", verr
|
||||
}
|
||||
|
||||
right, verr := ResolveCommitWithVErr(dEnv, rightStr)
|
||||
if verr != nil {
|
||||
return "", verr
|
||||
}
|
||||
|
||||
mergeBase, err := merge.MergeBase(ctx, left, right)
|
||||
if err != nil {
|
||||
verr = errhand.BuildDError("could not find merge-base for args %s", apr.Args).AddCause(err).Build()
|
||||
return HandleVErrAndExitCode(verr, usage)
|
||||
verr = errhand.BuildDError("could not find merge-base for args %s %s", leftStr, rightStr).AddCause(err).Build()
|
||||
return "", verr
|
||||
}
|
||||
|
||||
cli.Println(mergeBase.String())
|
||||
return 0
|
||||
return mergeBase.String(), nil
|
||||
}
|
||||
|
||||
func ResolveCommitWithVErr(dEnv *env.DoltEnv, cSpecStr string) (*doltdb.Commit, errhand.VerboseError) {
|
||||
|
||||
@@ -151,14 +151,14 @@ func getGRPCEmitter(dEnv *env.DoltEnv) *events.GrpcEmitter {
|
||||
}
|
||||
|
||||
hostAndPort := fmt.Sprintf("%s:%d", host, port)
|
||||
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
Endpoint: hostAndPort,
|
||||
Insecure: insecure,
|
||||
})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ package sqlserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -261,12 +262,22 @@ func Serve(
|
||||
args := clusterController.RemoteSrvServerArgs(remoteSrvSqlCtx, remotesrv.ServerArgs{
|
||||
Logger: logrus.NewEntry(lgr),
|
||||
})
|
||||
|
||||
clusterRemoteSrvTLSConfig, err := LoadClusterTLSConfig(serverConfig.ClusterConfig())
|
||||
if err != nil {
|
||||
lgr.Errorf("error starting remotesapi server for cluster config, could not load tls config: %v", err)
|
||||
startError = err
|
||||
return
|
||||
}
|
||||
args.TLSConfig = clusterRemoteSrvTLSConfig
|
||||
|
||||
clusterRemoteSrv, err = remotesrv.NewServer(args)
|
||||
if err != nil {
|
||||
lgr.Errorf("error creating remotesapi server on port %d: %v", *serverConfig.RemotesapiPort(), err)
|
||||
startError = err
|
||||
return
|
||||
}
|
||||
|
||||
listeners, err := clusterRemoteSrv.Listeners()
|
||||
if err != nil {
|
||||
lgr.Errorf("error starting remotesapi server listeners for cluster config on port %d: %v", clusterController.RemoteSrvPort(), err)
|
||||
@@ -325,6 +336,22 @@ func Serve(
|
||||
return
|
||||
}
|
||||
|
||||
func LoadClusterTLSConfig(cfg cluster.Config) (*tls.Config, error) {
|
||||
rcfg := cfg.RemotesAPIConfig()
|
||||
if rcfg.TLSKey() == "" && rcfg.TLSCert() == "" {
|
||||
return nil, nil
|
||||
}
|
||||
c, err := tls.LoadX509KeyPair(rcfg.TLSCert(), rcfg.TLSKey())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tls.Config{
|
||||
Certificates: []tls.Certificate{
|
||||
c,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func portInUse(hostPort string) bool {
|
||||
timeout := time.Second
|
||||
conn, _ := net.DialTimeout("tcp", hostPort, timeout)
|
||||
|
||||
@@ -523,6 +523,12 @@ func ValidateClusterConfig(config cluster.Config) error {
|
||||
if config.RemotesAPIConfig().Port() < 0 || config.RemotesAPIConfig().Port() > 65535 {
|
||||
return fmt.Errorf("cluster: remotesapi: port: is not in range 0-65535: %d", config.RemotesAPIConfig().Port())
|
||||
}
|
||||
if config.RemotesAPIConfig().TLSKey() == "" && config.RemotesAPIConfig().TLSCert() != "" {
|
||||
return fmt.Errorf("cluster: remotesapi: tls_key: must supply a tls_key if you supply a tls_cert")
|
||||
}
|
||||
if config.RemotesAPIConfig().TLSKey() != "" && config.RemotesAPIConfig().TLSCert() == "" {
|
||||
return fmt.Errorf("cluster: remotesapi: tls_cert: must supply a tls_cert if you supply a tls_key")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -28,8 +28,9 @@ import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/ishell"
|
||||
"github.com/fatih/color"
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
mysqlDriver "github.com/go-sql-driver/mysql"
|
||||
"github.com/gocraft/dbr/v2"
|
||||
"github.com/gocraft/dbr/v2/dialect"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/cli"
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/commands"
|
||||
@@ -202,42 +203,69 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := dbr.Open("mysql", ConnectionString(serverConfig, dbToUse), nil)
|
||||
// The standard DSN parser cannot handle a forward slash in the database name, so we have to workaround it.
|
||||
// See the original issue: https://github.com/dolthub/dolt/issues/4623
|
||||
parsedMySQLConfig, err := mysqlDriver.ParseDSN(ConnectionString(serverConfig, "no_database"))
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
serverController.StopServer()
|
||||
err = serverController.WaitForClose()
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
}
|
||||
return 1
|
||||
}
|
||||
parsedMySQLConfig.DBName = dbToUse
|
||||
mysqlConnector, err := mysqlDriver.NewConnector(parsedMySQLConfig)
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
return 1
|
||||
}
|
||||
conn := &dbr.Connection{DB: mysql.OpenDB(mysqlConnector), EventReceiver: nil, Dialect: dialect.MySQL}
|
||||
_ = conn.Ping()
|
||||
|
||||
if hasQuery {
|
||||
defer conn.Close()
|
||||
rows, err := conn.Query(query)
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
return 1
|
||||
}
|
||||
if rows != nil {
|
||||
sqlCtx := sql.NewContext(ctx)
|
||||
wrapper, err := NewMysqlRowWrapper(rows)
|
||||
|
||||
if apr.Contains(noAutoCommitFlag) {
|
||||
_, err = conn.Exec("set @@autocommit = off;")
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
return 1
|
||||
}
|
||||
defer wrapper.Close(sqlCtx)
|
||||
if wrapper.HasMoreRows() {
|
||||
err = engine.PrettyPrintResults(sqlCtx, format, wrapper.Schema(), wrapper)
|
||||
}
|
||||
|
||||
scanner := commands.NewSqlStatementScanner(strings.NewReader(query))
|
||||
query = ""
|
||||
for scanner.Scan() {
|
||||
query += scanner.Text()
|
||||
if len(query) == 0 || query == "\n" {
|
||||
continue
|
||||
}
|
||||
|
||||
rows, err := conn.Query(query)
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
return 1
|
||||
}
|
||||
if rows != nil {
|
||||
sqlCtx := sql.NewContext(ctx)
|
||||
wrapper, err := NewMysqlRowWrapper(rows)
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
return 1
|
||||
}
|
||||
defer wrapper.Close(sqlCtx)
|
||||
if wrapper.HasMoreRows() {
|
||||
err = engine.PrettyPrintResults(sqlCtx, format, wrapper.Schema(), wrapper)
|
||||
if err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
query = ""
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
cli.PrintErrln(err.Error())
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
@@ -517,9 +517,34 @@ func (c *ClusterYAMLConfig) RemotesAPIConfig() cluster.RemotesAPIConfig {
|
||||
}
|
||||
|
||||
type clusterRemotesAPIYAMLConfig struct {
|
||||
P int `yaml:"port"`
|
||||
Port_ int `yaml:"port"`
|
||||
TLSKey_ string `yaml:"tls_key"`
|
||||
TLSCert_ string `yaml:"tls_cert"`
|
||||
TLSCA_ string `yaml:"tls_ca"`
|
||||
URLMatches []string `yaml:"server_name_urls"`
|
||||
DNSMatches []string `yaml:"server_name_dns"`
|
||||
}
|
||||
|
||||
func (c clusterRemotesAPIYAMLConfig) Port() int {
|
||||
return c.P
|
||||
return c.Port_
|
||||
}
|
||||
|
||||
func (c clusterRemotesAPIYAMLConfig) TLSKey() string {
|
||||
return c.TLSKey_
|
||||
}
|
||||
|
||||
func (c clusterRemotesAPIYAMLConfig) TLSCert() string {
|
||||
return c.TLSCert_
|
||||
}
|
||||
|
||||
func (c clusterRemotesAPIYAMLConfig) TLSCA() string {
|
||||
return c.TLSCA_
|
||||
}
|
||||
|
||||
func (c clusterRemotesAPIYAMLConfig) ServerNameURLMatches() []string {
|
||||
return c.URLMatches
|
||||
}
|
||||
|
||||
func (c clusterRemotesAPIYAMLConfig) ServerNameDNSMatches() []string {
|
||||
return c.DNSMatches
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "0.50.10"
|
||||
Version = "0.50.11"
|
||||
)
|
||||
|
||||
var dumpDocsCommand = &commands.DumpDocsCmd{}
|
||||
|
||||
@@ -144,7 +144,19 @@ func (rcv *RootValue) MutateForeignKeyAddr(j int, n byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
const RootValueNumFields = 3
|
||||
func (rcv *RootValue) Collation() Collation {
|
||||
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
|
||||
if o != 0 {
|
||||
return Collation(rcv._tab.GetUint16(o + rcv._tab.Pos))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rcv *RootValue) MutateCollation(n Collation) bool {
|
||||
return rcv._tab.MutateUint16Slot(10, uint16(n))
|
||||
}
|
||||
|
||||
const RootValueNumFields = 4
|
||||
|
||||
func RootValueStart(builder *flatbuffers.Builder) {
|
||||
builder.StartObject(RootValueNumFields)
|
||||
@@ -164,6 +176,9 @@ func RootValueAddForeignKeyAddr(builder *flatbuffers.Builder, foreignKeyAddr fla
|
||||
func RootValueStartForeignKeyAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
|
||||
return builder.StartVector(1, numElems, 1)
|
||||
}
|
||||
func RootValueAddCollation(builder *flatbuffers.Builder, collation Collation) {
|
||||
builder.PrependUint16Slot(3, uint16(collation), 0)
|
||||
}
|
||||
func RootValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
|
||||
return builder.EndObject()
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ require (
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
|
||||
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
|
||||
github.com/dolthub/vitess v0.0.0-20221004165409-08281765376f
|
||||
github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||
@@ -57,7 +57,7 @@ require (
|
||||
require (
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible
|
||||
github.com/cenkalti/backoff/v4 v4.1.3
|
||||
github.com/dolthub/go-mysql-server v0.12.1-0.20221025150822-7d1a405cef1f
|
||||
github.com/dolthub/go-mysql-server v0.12.1-0.20221031173152-49134f16cad4
|
||||
github.com/google/flatbuffers v2.0.6+incompatible
|
||||
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
|
||||
github.com/mitchellh/go-ps v1.0.0
|
||||
|
||||
@@ -178,8 +178,8 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
|
||||
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
|
||||
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
|
||||
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
|
||||
github.com/dolthub/go-mysql-server v0.12.1-0.20221025150822-7d1a405cef1f h1:baVuh79XGrmTDo1W8YTXLUJ7gMa5Hsur02l3n2X8UFw=
|
||||
github.com/dolthub/go-mysql-server v0.12.1-0.20221025150822-7d1a405cef1f/go.mod h1:9Q9FhWO82GrV4he13V2ZuDE0T/eDZbPVMOWLcZluOvg=
|
||||
github.com/dolthub/go-mysql-server v0.12.1-0.20221031173152-49134f16cad4 h1:j55/tHWE+PAT7WQjIlAKmIMFma7sVBpTqjtreBquXOc=
|
||||
github.com/dolthub/go-mysql-server v0.12.1-0.20221031173152-49134f16cad4/go.mod h1:KtpU4Sf7J+SIat/nxoA733QTn3tdL34NtoGxEBFcTsA=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
|
||||
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
|
||||
@@ -188,8 +188,8 @@ github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66 h1:WRPDbpJWEnPxP
|
||||
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66/go.mod h1:N5ZIbMGuDUpTpOFQ7HcsN6WSIpTGQjHP+Mz27AfmAgk=
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
|
||||
github.com/dolthub/vitess v0.0.0-20221004165409-08281765376f h1:yE3Cbhk4ylOK1jYqFHnman6fpKN2Cap080GG/UpAVBs=
|
||||
github.com/dolthub/vitess v0.0.0-20221004165409-08281765376f/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
|
||||
github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f h1:2sNrQiE4pcdgCNp09RTOsmNeepgN5rL+ep8NF8Faw9U=
|
||||
github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
|
||||
@@ -34,9 +34,22 @@ import (
|
||||
|
||||
var GRPCDialProviderParam = "__DOLT__grpc_dial_provider"
|
||||
|
||||
// GRPCDialProvider is an interface for getting a *grpc.ClientConn.
|
||||
type GRPCRemoteConfig struct {
|
||||
Endpoint string
|
||||
DialOptions []grpc.DialOption
|
||||
HTTPFetcher grpcendpoint.HTTPFetcher
|
||||
}
|
||||
|
||||
// GRPCDialProvider is an interface for getting a concrete Endpoint,
|
||||
// DialOptions and HTTPFetcher from a slightly more abstract
|
||||
// grpcendpoint.Config. It allows a caller to override certain aspects of how
|
||||
// the grpc.ClientConn and the resulting remotestorage ChunkStore are
|
||||
// configured by dbfactory when it returns remotestorage DBs.
|
||||
//
|
||||
// An instance of this must be provided in |params[GRPCDialProviderParam]| when
|
||||
// calling |CreateDB| with a remotesapi remote. See *env.Remote for example.
|
||||
type GRPCDialProvider interface {
|
||||
GetGRPCDialParams(grpcendpoint.Config) (string, []grpc.DialOption, error)
|
||||
GetGRPCDialParams(grpcendpoint.Config) (GRPCRemoteConfig, error)
|
||||
}
|
||||
|
||||
// DoldRemoteFactory is a DBFactory implementation for creating databases backed by a remote server that implements the
|
||||
@@ -81,10 +94,13 @@ func (fact DoltRemoteFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFo
|
||||
return db, vrw, ns, err
|
||||
}
|
||||
|
||||
// If |params[NoCachingParameter]| is set in |params| of the CreateDB call for
|
||||
// a remotesapi database, then the configured database will have caching at the
|
||||
// remotestorage.ChunkStore layer disabled.
|
||||
var NoCachingParameter = "__dolt__NO_CACHING"
|
||||
|
||||
func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}, dp GRPCDialProvider) (chunks.ChunkStore, error) {
|
||||
endpoint, opts, err := dp.GetGRPCDialParams(grpcendpoint.Config{
|
||||
cfg, err := dp.GetGRPCDialParams(grpcendpoint.Config{
|
||||
Endpoint: urlObj.Host,
|
||||
Insecure: fact.insecure,
|
||||
WithEnvCreds: true,
|
||||
@@ -93,10 +109,10 @@ func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.Noms
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, grpc.WithChainUnaryInterceptor(remotestorage.EventsUnaryClientInterceptor(events.GlobalCollector)))
|
||||
opts := append(cfg.DialOptions, grpc.WithChainUnaryInterceptor(remotestorage.EventsUnaryClientInterceptor(events.GlobalCollector)))
|
||||
opts = append(opts, grpc.WithChainUnaryInterceptor(remotestorage.RetryingUnaryClientInterceptor))
|
||||
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
conn, err := grpc.Dial(cfg.Endpoint, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,6 +122,7 @@ func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.Noms
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not access dolt url '%s': %w", urlObj.String(), err)
|
||||
}
|
||||
cs = cs.WithHTTPFetcher(cfg.HTTPFetcher)
|
||||
|
||||
if _, ok := params[NoCachingParameter]; ok {
|
||||
cs = cs.WithNoopChunkCache()
|
||||
|
||||
@@ -37,9 +37,10 @@ import (
|
||||
const (
|
||||
ddbRootStructName = "dolt_db_root"
|
||||
|
||||
tablesKey = "tables"
|
||||
foreignKeyKey = "foreign_key"
|
||||
featureVersKey = "feature_ver"
|
||||
tablesKey = "tables"
|
||||
foreignKeyKey = "foreign_key"
|
||||
featureVersKey = "feature_ver"
|
||||
rootCollationKey = "root_collation_key"
|
||||
|
||||
// deprecated
|
||||
superSchemasKey = "super_schemas"
|
||||
@@ -85,9 +86,11 @@ type rvStorage interface {
|
||||
|
||||
GetTablesMap(ctx context.Context, vr types.ValueReadWriter, ns tree.NodeStore) (tableMap, error)
|
||||
GetForeignKeys(ctx context.Context, vr types.ValueReader) (types.Value, bool, error)
|
||||
GetCollation(ctx context.Context) (schema.Collation, error)
|
||||
|
||||
SetForeignKeyMap(ctx context.Context, vrw types.ValueReadWriter, m types.Value) (rvStorage, error)
|
||||
SetFeatureVersion(v FeatureVersion) (rvStorage, error)
|
||||
SetCollation(ctx context.Context, collation schema.Collation) (rvStorage, error)
|
||||
|
||||
EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error)
|
||||
|
||||
@@ -156,6 +159,17 @@ func (r nomsRvStorage) GetForeignKeys(context.Context, types.ValueReader) (types
|
||||
return v.(types.Map), true, nil
|
||||
}
|
||||
|
||||
func (r nomsRvStorage) GetCollation(ctx context.Context) (schema.Collation, error) {
|
||||
v, found, err := r.valueSt.MaybeGet(rootCollationKey)
|
||||
if err != nil {
|
||||
return schema.Collation_Unspecified, err
|
||||
}
|
||||
if !found {
|
||||
return schema.Collation_Default, nil
|
||||
}
|
||||
return schema.Collation(v.(types.Uint)), nil
|
||||
}
|
||||
|
||||
func (r nomsRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error) {
|
||||
m, err := r.GetTablesMap(ctx, vrw, ns)
|
||||
if err != nil {
|
||||
@@ -218,6 +232,14 @@ func (r nomsRvStorage) SetFeatureVersion(v FeatureVersion) (rvStorage, error) {
|
||||
return nomsRvStorage{st}, nil
|
||||
}
|
||||
|
||||
func (r nomsRvStorage) SetCollation(ctx context.Context, collation schema.Collation) (rvStorage, error) {
|
||||
st, err := r.valueSt.Set(rootCollationKey, types.Uint(collation))
|
||||
if err != nil {
|
||||
return nomsRvStorage{}, err
|
||||
}
|
||||
return nomsRvStorage{st}, nil
|
||||
}
|
||||
|
||||
func (r nomsRvStorage) DebugString(ctx context.Context) string {
|
||||
var buf bytes.Buffer
|
||||
err := types.WriteEncodedValue(ctx, &buf, r.valueSt)
|
||||
@@ -314,6 +336,7 @@ func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter, ns tree.Node
|
||||
fkoff := builder.CreateByteVector(empty[:])
|
||||
serial.RootValueStart(builder)
|
||||
serial.RootValueAddFeatureVersion(builder, int64(DoltFeatureVersion))
|
||||
serial.RootValueAddCollation(builder, serial.Collationutf8mb4_0900_bin)
|
||||
serial.RootValueAddTables(builder, tablesoff)
|
||||
serial.RootValueAddForeignKeyAddr(builder, fkoff)
|
||||
bs := serial.FinishMessage(builder, serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
|
||||
@@ -361,6 +384,18 @@ func (root *RootValue) setFeatureVersion(v FeatureVersion) (*RootValue, error) {
|
||||
return root.withStorage(newStorage), nil
|
||||
}
|
||||
|
||||
func (root *RootValue) GetCollation(ctx context.Context) (schema.Collation, error) {
|
||||
return root.st.GetCollation(ctx)
|
||||
}
|
||||
|
||||
func (root *RootValue) SetCollation(ctx context.Context, collation schema.Collation) (*RootValue, error) {
|
||||
newStorage, err := root.st.SetCollation(ctx, collation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root.withStorage(newStorage), nil
|
||||
}
|
||||
|
||||
func (root *RootValue) HasTable(ctx context.Context, tName string) (bool, error) {
|
||||
tableMap, err := root.st.GetTablesMap(ctx, root.vrw, root.ns)
|
||||
if err != nil {
|
||||
@@ -1165,6 +1200,15 @@ func (r fbRvStorage) GetForeignKeys(ctx context.Context, vr types.ValueReader) (
|
||||
return v.(types.SerialMessage), true, nil
|
||||
}
|
||||
|
||||
func (r fbRvStorage) GetCollation(ctx context.Context) (schema.Collation, error) {
|
||||
collation := r.srv.Collation()
|
||||
// Pre-existing repositories will return invalid here
|
||||
if collation == serial.Collationinvalid {
|
||||
return schema.Collation_Default, nil
|
||||
}
|
||||
return schema.Collation(collation), nil
|
||||
}
|
||||
|
||||
func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error) {
|
||||
builder := flatbuffers.NewBuilder(80)
|
||||
|
||||
@@ -1222,6 +1266,7 @@ func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWrite
|
||||
fkoff := builder.CreateByteVector(r.srv.ForeignKeyAddrBytes())
|
||||
serial.RootValueStart(builder)
|
||||
serial.RootValueAddFeatureVersion(builder, r.srv.FeatureVersion())
|
||||
serial.RootValueAddCollation(builder, r.srv.Collation())
|
||||
serial.RootValueAddTables(builder, tablesoff)
|
||||
serial.RootValueAddForeignKeyAddr(builder, fkoff)
|
||||
|
||||
@@ -1257,6 +1302,12 @@ func (r fbRvStorage) SetFeatureVersion(v FeatureVersion) (rvStorage, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (r fbRvStorage) SetCollation(ctx context.Context, collation schema.Collation) (rvStorage, error) {
|
||||
ret := r.clone()
|
||||
ret.srv.MutateCollation(serial.Collation(collation))
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (r fbRvStorage) clone() fbRvStorage {
|
||||
bs := make([]byte, len(r.srv.Table().Bytes))
|
||||
copy(bs, r.srv.Table().Bytes)
|
||||
|
||||
3
go/libraries/doltcore/env/environment.go
vendored
3
go/libraries/doltcore/env/environment.go
vendored
@@ -25,7 +25,6 @@ import (
|
||||
"time"
|
||||
|
||||
ps "github.com/mitchellh/go-ps"
|
||||
"google.golang.org/grpc"
|
||||
goerrors "gopkg.in/src-d/go-errors.v1"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
|
||||
@@ -831,7 +830,7 @@ func (dEnv *DoltEnv) UserRPCCreds() (creds.DoltCreds, bool, error) {
|
||||
}
|
||||
|
||||
// GetGRPCDialParams implements dbfactory.GRPCDialProvider
|
||||
func (dEnv *DoltEnv) GetGRPCDialParams(config grpcendpoint.Config) (string, []grpc.DialOption, error) {
|
||||
func (dEnv *DoltEnv) GetGRPCDialParams(config grpcendpoint.Config) (dbfactory.GRPCRemoteConfig, error) {
|
||||
return NewGRPCDialProviderFromDoltEnv(dEnv).GetGRPCDialParams(config)
|
||||
}
|
||||
|
||||
|
||||
26
go/libraries/doltcore/env/grpc_dial_provider.go
vendored
26
go/libraries/doltcore/env/grpc_dial_provider.go
vendored
@@ -16,6 +16,7 @@ package env
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
@@ -50,7 +51,7 @@ func NewGRPCDialProviderFromDoltEnv(dEnv *DoltEnv) *GRPCDialProvider {
|
||||
}
|
||||
|
||||
// GetGRPCDialParms implements dbfactory.GRPCDialProvider
|
||||
func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string, []grpc.DialOption, error) {
|
||||
func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfactory.GRPCRemoteConfig, error) {
|
||||
endpoint := config.Endpoint
|
||||
if strings.IndexRune(endpoint, ':') == -1 {
|
||||
if config.Insecure {
|
||||
@@ -60,8 +61,20 @@ func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string,
|
||||
}
|
||||
}
|
||||
|
||||
var httpfetcher grpcendpoint.HTTPFetcher = http.DefaultClient
|
||||
|
||||
var opts []grpc.DialOption
|
||||
if config.Insecure {
|
||||
if config.TLSConfig != nil {
|
||||
tc := credentials.NewTLS(config.TLSConfig)
|
||||
opts = append(opts, grpc.WithTransportCredentials(tc))
|
||||
|
||||
httpfetcher = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: config.TLSConfig,
|
||||
ForceAttemptHTTP2: true,
|
||||
},
|
||||
}
|
||||
} else if config.Insecure {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
} else {
|
||||
tc := credentials.NewTLS(&tls.Config{})
|
||||
@@ -76,14 +89,17 @@ func (p GRPCDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string,
|
||||
} else if config.WithEnvCreds {
|
||||
rpcCreds, err := p.getRPCCreds()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return dbfactory.GRPCRemoteConfig{}, err
|
||||
}
|
||||
if rpcCreds != nil {
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(rpcCreds))
|
||||
}
|
||||
}
|
||||
|
||||
return endpoint, opts, nil
|
||||
return dbfactory.GRPCRemoteConfig{
|
||||
Endpoint: endpoint,
|
||||
DialOptions: opts,
|
||||
HTTPFetcher: httpfetcher,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getRPCCreds returns any RPC credentials available to this dial provider. If a DoltEnv has been configured
|
||||
|
||||
@@ -15,6 +15,9 @@
|
||||
package grpcendpoint
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
@@ -23,4 +26,12 @@ type Config struct {
|
||||
Insecure bool
|
||||
Creds credentials.PerRPCCredentials
|
||||
WithEnvCreds bool
|
||||
|
||||
// If non-nil, this is used for transport level security in the dial
|
||||
// options, instead of a default option based on `Insecure`.
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
type HTTPFetcher interface {
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/set"
|
||||
"github.com/dolthub/dolt/go/store/chunks"
|
||||
"github.com/dolthub/dolt/go/store/datas"
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
@@ -277,6 +278,16 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root
|
||||
return nil, err
|
||||
}
|
||||
|
||||
removedTables, err := getRemovedTableNames(ctx, oldParent, oldRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrated, err = migrated.RemoveTables(ctx, true, false, removedTables...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = oldRoot.IterTables(ctx, func(name string, oldTbl *doltdb.Table, sch schema.Schema) (bool, error) {
|
||||
ok, err := oldTbl.HasConflicts(ctx)
|
||||
if err != nil {
|
||||
@@ -345,6 +356,21 @@ func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.Root
|
||||
return migrated, nil
|
||||
}
|
||||
|
||||
// renames also get returned here
|
||||
func getRemovedTableNames(ctx context.Context, prev, curr *doltdb.RootValue) ([]string, error) {
|
||||
prevNames, err := prev.GetTableNames(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tblNameSet := set.NewStrSet(prevNames)
|
||||
currNames, err := curr.GetTableNames(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tblNameSet.Remove(currNames...)
|
||||
return tblNameSet.AsSlice(), nil
|
||||
}
|
||||
|
||||
func migrateTable(ctx context.Context, newSch schema.Schema, oldParentTbl, oldTbl, newParentTbl *doltdb.Table) (*doltdb.Table, error) {
|
||||
idx, err := oldParentTbl.GetRowData(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -42,21 +42,24 @@ import (
|
||||
var ErrUnimplemented = errors.New("unimplemented")
|
||||
|
||||
type RemoteChunkStore struct {
|
||||
HttpHost string
|
||||
csCache DBCache
|
||||
bucket string
|
||||
fs filesys.Filesys
|
||||
lgr *logrus.Entry
|
||||
sealer Sealer
|
||||
HttpHost string
|
||||
httpScheme string
|
||||
|
||||
csCache DBCache
|
||||
bucket string
|
||||
fs filesys.Filesys
|
||||
lgr *logrus.Entry
|
||||
sealer Sealer
|
||||
remotesapi.UnimplementedChunkStoreServiceServer
|
||||
}
|
||||
|
||||
func NewHttpFSBackedChunkStore(lgr *logrus.Entry, httpHost string, csCache DBCache, fs filesys.Filesys, sealer Sealer) *RemoteChunkStore {
|
||||
func NewHttpFSBackedChunkStore(lgr *logrus.Entry, httpHost string, csCache DBCache, fs filesys.Filesys, scheme string, sealer Sealer) *RemoteChunkStore {
|
||||
return &RemoteChunkStore{
|
||||
HttpHost: httpHost,
|
||||
csCache: csCache,
|
||||
bucket: "",
|
||||
fs: fs,
|
||||
HttpHost: httpHost,
|
||||
httpScheme: scheme,
|
||||
csCache: csCache,
|
||||
bucket: "",
|
||||
fs: fs,
|
||||
lgr: lgr.WithFields(logrus.Fields{
|
||||
"service": "dolt.services.remotesapi.v1alpha1.ChunkStoreServiceServer",
|
||||
}),
|
||||
@@ -286,7 +289,7 @@ func (rs *RemoteChunkStore) getHost(md metadata.MD) string {
|
||||
func (rs *RemoteChunkStore) getDownloadUrl(logger *logrus.Entry, md metadata.MD, path string) (*url.URL, error) {
|
||||
host := rs.getHost(md)
|
||||
return &url.URL{
|
||||
Scheme: "http",
|
||||
Scheme: rs.httpScheme,
|
||||
Host: host,
|
||||
Path: path,
|
||||
}, nil
|
||||
@@ -359,7 +362,7 @@ func (rs *RemoteChunkStore) getUploadUrl(logger *logrus.Entry, md metadata.MD, r
|
||||
params.Add("content_length", strconv.Itoa(int(tfd.ContentLength)))
|
||||
params.Add("content_hash", base64.RawURLEncoding.EncodeToString(tfd.ContentHash))
|
||||
return &url.URL{
|
||||
Scheme: "http",
|
||||
Scheme: rs.httpScheme,
|
||||
Host: rs.getHost(md),
|
||||
Path: fmt.Sprintf("%s/%s", repoPath, fileID),
|
||||
RawQuery: params.Encode(),
|
||||
|
||||
@@ -16,6 +16,7 @@ package remotesrv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -39,6 +40,8 @@ type Server struct {
|
||||
grpcSrv *grpc.Server
|
||||
httpPort int
|
||||
httpSrv http.Server
|
||||
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
func (s *Server) GracefulStop() {
|
||||
@@ -55,6 +58,11 @@ type ServerArgs struct {
|
||||
DBCache DBCache
|
||||
ReadOnly bool
|
||||
Options []grpc.ServerOption
|
||||
|
||||
// If supplied, the listener(s) returned from Listeners() will be TLS
|
||||
// listeners. The scheme used in the URLs returned from the gRPC server
|
||||
// will be https.
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
func NewServer(args ServerArgs) (*Server, error) {
|
||||
@@ -70,10 +78,16 @@ func NewServer(args ServerArgs) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if args.TLSConfig != nil {
|
||||
scheme = "https"
|
||||
}
|
||||
s.tlsConfig = args.TLSConfig
|
||||
|
||||
s.wg.Add(2)
|
||||
s.grpcPort = args.GrpcPort
|
||||
s.grpcSrv = grpc.NewServer(append([]grpc.ServerOption{grpc.MaxRecvMsgSize(128 * 1024 * 1024)}, args.Options...)...)
|
||||
var chnkSt remotesapi.ChunkStoreServiceServer = NewHttpFSBackedChunkStore(args.Logger, args.HttpHost, args.DBCache, args.FS, sealer)
|
||||
var chnkSt remotesapi.ChunkStoreServiceServer = NewHttpFSBackedChunkStore(args.Logger, args.HttpHost, args.DBCache, args.FS, scheme, sealer)
|
||||
if args.ReadOnly {
|
||||
chnkSt = ReadOnlyChunkStore{chnkSt}
|
||||
}
|
||||
@@ -113,14 +127,25 @@ type Listeners struct {
|
||||
}
|
||||
|
||||
func (s *Server) Listeners() (Listeners, error) {
|
||||
httpListener, err := net.Listen("tcp", fmt.Sprintf(":%d", s.httpPort))
|
||||
var httpListener net.Listener
|
||||
var grpcListener net.Listener
|
||||
var err error
|
||||
if s.tlsConfig != nil {
|
||||
httpListener, err = tls.Listen("tcp", fmt.Sprintf(":%d", s.httpPort), s.tlsConfig)
|
||||
} else {
|
||||
httpListener, err = net.Listen("tcp", fmt.Sprintf(":%d", s.httpPort))
|
||||
}
|
||||
if err != nil {
|
||||
return Listeners{}, err
|
||||
}
|
||||
if s.httpPort == s.grpcPort {
|
||||
return Listeners{http: httpListener}, nil
|
||||
}
|
||||
grpcListener, err := net.Listen("tcp", fmt.Sprintf(":%d", s.grpcPort))
|
||||
if s.tlsConfig != nil {
|
||||
grpcListener, err = tls.Listen("tcp", fmt.Sprintf(":%d", s.grpcPort), s.tlsConfig)
|
||||
} else {
|
||||
grpcListener, err = net.Listen("tcp", fmt.Sprintf(":%d", s.grpcPort))
|
||||
}
|
||||
if err != nil {
|
||||
httpListener.Close()
|
||||
return Listeners{}, err
|
||||
|
||||
@@ -293,6 +293,6 @@ const (
|
||||
Collation_utf8mb4_zh_0900_as_cs = Collation(serial.Collationutf8mb4_zh_0900_as_cs)
|
||||
Collation_utf8mb4_0900_bin = Collation(serial.Collationutf8mb4_0900_bin)
|
||||
|
||||
Collation_Default = Collation_utf8mb4_0900_bin
|
||||
Collation_Invalid = Collation(serial.Collationinvalid)
|
||||
Collation_Default = Collation_utf8mb4_0900_bin
|
||||
Collation_Unspecified = Collation(serial.Collationinvalid)
|
||||
)
|
||||
|
||||
@@ -77,8 +77,8 @@ func (c CollationTupleComparator) Validated(types []val.Type) val.TupleComparato
|
||||
}
|
||||
i := 0
|
||||
for ; i < len(c.Collations); i++ {
|
||||
if types[i].Enc == val.StringEnc && c.Collations[i] == sql.Collation_Invalid {
|
||||
panic("string type encoding is missing its collation")
|
||||
if types[i].Enc == val.StringEnc && c.Collations[i] == sql.Collation_Unspecified {
|
||||
c.Collations[i] = sql.Collation_Default
|
||||
}
|
||||
}
|
||||
if len(c.Collations) == len(types) {
|
||||
@@ -90,7 +90,7 @@ func (c CollationTupleComparator) Validated(types []val.Type) val.TupleComparato
|
||||
if types[i].Enc == val.StringEnc {
|
||||
panic("string type encoding is missing its collation")
|
||||
}
|
||||
newCollations[i] = sql.Collation_Invalid
|
||||
newCollations[i] = sql.Collation_Unspecified
|
||||
}
|
||||
return CollationTupleComparator{Collations: newCollations}
|
||||
}
|
||||
|
||||
@@ -375,7 +375,7 @@ func (si *schemaImpl) GetKeyDescriptor() val.TupleDesc {
|
||||
useCollations = true
|
||||
collations = append(collations, sqlType.(sql.StringType).Collation())
|
||||
} else {
|
||||
collations = append(collations, sql.Collation_Invalid)
|
||||
collations = append(collations, sql.Collation_Unspecified)
|
||||
}
|
||||
return
|
||||
})
|
||||
@@ -397,7 +397,7 @@ func (si *schemaImpl) GetValueDescriptor() val.TupleDesc {
|
||||
var collations []sql.CollationID
|
||||
if IsKeyless(si) {
|
||||
tt = []val.Type{val.KeylessCardType}
|
||||
collations = []sql.CollationID{sql.Collation_Invalid}
|
||||
collations = []sql.CollationID{sql.Collation_Unspecified}
|
||||
}
|
||||
|
||||
useCollations := false // We only use collations if a string exists
|
||||
@@ -412,7 +412,7 @@ func (si *schemaImpl) GetValueDescriptor() val.TupleDesc {
|
||||
useCollations = true
|
||||
collations = append(collations, sqlType.(sql.StringType).Collation())
|
||||
} else {
|
||||
collations = append(collations, sql.Collation_Invalid)
|
||||
collations = append(collations, sql.Collation_Unspecified)
|
||||
}
|
||||
return
|
||||
})
|
||||
@@ -430,19 +430,19 @@ func (si *schemaImpl) GetValueDescriptor() val.TupleDesc {
|
||||
|
||||
// GetCollation implements the Schema interface.
|
||||
func (si *schemaImpl) GetCollation() Collation {
|
||||
// Schemas made before this change (and invalid schemas) will contain invalid, so we'll the default collation
|
||||
// Schemas made before this change (and invalid schemas) will contain unspecified, so we'll the inherent collation
|
||||
// instead (as that matches their behavior).
|
||||
if si.collation == Collation_Invalid {
|
||||
return Collation_Default
|
||||
if si.collation == Collation_Unspecified {
|
||||
return Collation_utf8mb4_0900_bin
|
||||
}
|
||||
return si.collation
|
||||
}
|
||||
|
||||
// SetCollation implements the Schema interface.
|
||||
func (si *schemaImpl) SetCollation(collation Collation) {
|
||||
// Schemas made before this change may try to set this to invalid, so we'll set it to the default collation.
|
||||
if collation == Collation_Invalid {
|
||||
si.collation = Collation_Default
|
||||
// Schemas made before this change may try to set this to unspecified, so we'll set it to the inherent collation.
|
||||
if collation == Collation_Unspecified {
|
||||
si.collation = Collation_utf8mb4_0900_bin
|
||||
} else {
|
||||
si.collation = collation
|
||||
}
|
||||
|
||||
@@ -23,6 +23,11 @@ type Config interface {
|
||||
|
||||
type RemotesAPIConfig interface {
|
||||
Port() int
|
||||
TLSKey() string
|
||||
TLSCert() string
|
||||
TLSCA() string
|
||||
ServerNameURLMatches() []string
|
||||
ServerNameDNSMatches() []string
|
||||
}
|
||||
|
||||
type StandbyRemoteConfig interface {
|
||||
|
||||
@@ -193,7 +193,7 @@ func (c *Controller) applyCommitHooks(ctx context.Context, name string, bt *sql.
|
||||
}
|
||||
|
||||
func (c *Controller) gRPCDialProvider(denv *env.DoltEnv) dbfactory.GRPCDialProvider {
|
||||
return grpcDialProvider{env.NewGRPCDialProviderFromDoltEnv(denv), &c.cinterceptor}
|
||||
return grpcDialProvider{env.NewGRPCDialProviderFromDoltEnv(denv), &c.cinterceptor, c.cfg}
|
||||
}
|
||||
|
||||
func (c *Controller) RegisterStoredProcedures(store procedurestore) {
|
||||
|
||||
@@ -15,6 +15,10 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
@@ -32,16 +36,22 @@ import (
|
||||
type grpcDialProvider struct {
|
||||
orig dbfactory.GRPCDialProvider
|
||||
ci *clientinterceptor
|
||||
cfg Config
|
||||
}
|
||||
|
||||
func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string, []grpc.DialOption, error) {
|
||||
config.WithEnvCreds = false
|
||||
endpoint, opts, err := p.orig.GetGRPCDialParams(config)
|
||||
func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfactory.GRPCRemoteConfig, error) {
|
||||
tlsConfig, err := p.tlsConfig()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return dbfactory.GRPCRemoteConfig{}, err
|
||||
}
|
||||
opts = append(opts, p.ci.Options()...)
|
||||
opts = append(opts, grpc.WithConnectParams(grpc.ConnectParams{
|
||||
config.TLSConfig = tlsConfig
|
||||
config.WithEnvCreds = false
|
||||
cfg, err := p.orig.GetGRPCDialParams(config)
|
||||
if err != nil {
|
||||
return dbfactory.GRPCRemoteConfig{}, err
|
||||
}
|
||||
cfg.DialOptions = append(cfg.DialOptions, p.ci.Options()...)
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(grpc.ConnectParams{
|
||||
Backoff: backoff.Config{
|
||||
BaseDelay: 250 * time.Millisecond,
|
||||
Multiplier: 1.6,
|
||||
@@ -50,5 +60,114 @@ func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (string,
|
||||
},
|
||||
MinConnectTimeout: 250 * time.Millisecond,
|
||||
}))
|
||||
return endpoint, opts, nil
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// Within a cluster, if remotesapi is configured with a tls_ca, we take the
|
||||
// following semantics:
|
||||
// * The configured tls_ca file holds a set of PEM encoded x509 certificates,
|
||||
// all of which are trusted roots for the outbound connections the
|
||||
// remotestorage client establishes.
|
||||
// * The certificate chain presented by the server must validate to a root
|
||||
// which was present in tls_ca. In particular, every certificate in the chain
|
||||
// must be within its validity window, the signatures must be valid, key usage
|
||||
// and isCa must be correctly set for the roots and the intermediates, and the
|
||||
// leaf must have extended key usage server auth.
|
||||
// * On the other hand, no verification is done against the SAN or the Subject
|
||||
// of the certificate.
|
||||
//
|
||||
// We use these TLS semantics for both connections to the gRPC endpoint which
|
||||
// is the actual remotesapi, and for connections to any HTTPS endpoints to
|
||||
// which the gRPC service returns URLs. For now, this works perfectly for our
|
||||
// use case, but it's tightly coupled to `cluster:` deployment topologies and
|
||||
// the likes.
|
||||
//
|
||||
// If tls_ca is not set then default TLS handling is performed. In particular,
|
||||
// if the remotesapi endpoints is HTTPS, then the system roots are used and
|
||||
// ServerName is verified against the presented URL SANs of the certificates.
|
||||
func (p grpcDialProvider) tlsConfig() (*tls.Config, error) {
|
||||
tlsCA := p.cfg.RemotesAPIConfig().TLSCA()
|
||||
if tlsCA == "" {
|
||||
return nil, nil
|
||||
}
|
||||
urlmatches := p.cfg.RemotesAPIConfig().ServerNameURLMatches()
|
||||
dnsmatches := p.cfg.RemotesAPIConfig().ServerNameDNSMatches()
|
||||
pem, err := ioutil.ReadFile(tlsCA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roots := x509.NewCertPool()
|
||||
if ok := roots.AppendCertsFromPEM(pem); !ok {
|
||||
return nil, errors.New("error loading ca roots from " + tlsCA)
|
||||
}
|
||||
verifyFunc := func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
|
||||
certs := make([]*x509.Certificate, len(rawCerts))
|
||||
var err error
|
||||
for i, asn1Data := range rawCerts {
|
||||
certs[i], err = x509.ParseCertificate(asn1Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
keyUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: roots,
|
||||
CurrentTime: time.Now(),
|
||||
Intermediates: x509.NewCertPool(),
|
||||
KeyUsages: keyUsages,
|
||||
}
|
||||
for _, cert := range certs[1:] {
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
_, err = certs[0].Verify(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(urlmatches) > 0 {
|
||||
found := false
|
||||
for _, n := range urlmatches {
|
||||
for _, cn := range certs[0].URIs {
|
||||
if n == cn.String() {
|
||||
found = true
|
||||
}
|
||||
break
|
||||
}
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return errors.New("expected certificate to match something in server_name_urls, but it did not")
|
||||
}
|
||||
}
|
||||
if len(dnsmatches) > 0 {
|
||||
found := false
|
||||
for _, n := range dnsmatches {
|
||||
for _, cn := range certs[0].DNSNames {
|
||||
if n == cn {
|
||||
found = true
|
||||
}
|
||||
break
|
||||
}
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return errors.New("expected certificate to match something in server_name_dns, but it did not")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return &tls.Config{
|
||||
// We have to InsecureSkipVerify because ServerName is always
|
||||
// set by the grpc dial provider and golang tls.Config does not
|
||||
// have good support for performing certificate validation
|
||||
// without server name validation.
|
||||
InsecureSkipVerify: true,
|
||||
|
||||
VerifyPeerCertificate: verifyFunc,
|
||||
|
||||
NextProtos: []string{"h2"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -30,6 +30,15 @@ import (
|
||||
const clusterRoleHeader = "x-dolt-cluster-role"
|
||||
const clusterRoleEpochHeader = "x-dolt-cluster-role-epoch"
|
||||
|
||||
var writeEndpoints map[string]bool
|
||||
|
||||
func init() {
|
||||
writeEndpoints = make(map[string]bool)
|
||||
writeEndpoints["/dolt.services.remotesapi.v1alpha1.ChunkStoreService/Commit"] = true
|
||||
writeEndpoints["/dolt.services.remotesapi.v1alpha1.ChunkStoreService/AddTableFiles"] = true
|
||||
writeEndpoints["/dolt.services.remotesapi.v1alpha1.ChunkStoreService/GetUploadLocations"] = true
|
||||
}
|
||||
|
||||
// clientinterceptor is installed as a Unary and Stream client interceptor on
|
||||
// the client conns that are used to communicate with standby remotes. The
|
||||
// cluster.Controller sets this server's current Role and role epoch on the
|
||||
@@ -134,17 +143,21 @@ func (ci *clientinterceptor) Options() []grpc.DialOption {
|
||||
// serverinterceptor is installed as a Unary and Stream interceptor on a
|
||||
// ChunkStoreServer which is serving a SQL database as a standby remote. The
|
||||
// cluster.Controller sets this server's current Role and role epoch on the
|
||||
// interceptor anytime it changes. In turn, this interceptor:
|
||||
// * adds the server's current role and epoch to the response headers for every
|
||||
// request.
|
||||
// * fails all incoming requests immediately with codes.FailedPrecondition if the
|
||||
// current role != RoleStandby, since nothing should be replicating to us in
|
||||
// that state.
|
||||
// interceptor anytime it changes. In turn, this interceptor has the following
|
||||
// behavior:
|
||||
// * for any incoming standby traffic, it will add the server's current role
|
||||
// and epoch to the response headers for every request.
|
||||
// * for any incoming standby traffic, it will fail incoming requests
|
||||
// immediately with codes.FailedPrecondition if the current role !=
|
||||
// RoleStandby, since nothing should be replicating to us in that state.
|
||||
// * watches incoming request headers for a situation which causes this server
|
||||
// to force downgrade from primary to standby. In particular, when an incoming
|
||||
// request asserts that the client is the current primary at an epoch higher
|
||||
// than our current epoch, this interceptor coordinates with the Controller to
|
||||
// immediately transition to standby and allow replication requests through.
|
||||
// * for incoming requests which are not standby, it will currently fail the
|
||||
// requests with codes.Unauthenticated. Eventually, it will allow read-only
|
||||
// traffic through which is authenticated and authorized.
|
||||
type serverinterceptor struct {
|
||||
lgr *logrus.Entry
|
||||
role Role
|
||||
@@ -154,71 +167,90 @@ type serverinterceptor struct {
|
||||
}
|
||||
|
||||
func (si *serverinterceptor) Stream() grpc.StreamServerInterceptor {
|
||||
return func(srv interface{}, ss grpc.ServerStream, into *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
fromStandby := false
|
||||
if md, ok := metadata.FromIncomingContext(ss.Context()); ok {
|
||||
role, epoch := si.getRole()
|
||||
si.handleRequestHeaders(md, role, epoch)
|
||||
fromStandby = si.handleRequestHeaders(md, role, epoch)
|
||||
}
|
||||
// After handleRequestHeaders, our role may have changed, so we fetch it again here.
|
||||
role, epoch := si.getRole()
|
||||
if err := grpc.SetHeader(ss.Context(), metadata.Pairs(clusterRoleHeader, string(role), clusterRoleEpochHeader, strconv.Itoa(epoch))); err != nil {
|
||||
return err
|
||||
if fromStandby {
|
||||
// After handleRequestHeaders, our role may have changed, so we fetch it again here.
|
||||
role, epoch := si.getRole()
|
||||
if err := grpc.SetHeader(ss.Context(), metadata.Pairs(clusterRoleHeader, string(role), clusterRoleEpochHeader, strconv.Itoa(epoch))); err != nil {
|
||||
return err
|
||||
}
|
||||
if role == RolePrimary {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return status.Error(codes.FailedPrecondition, "this server is a primary and is not currently accepting replication")
|
||||
}
|
||||
if role == RoleDetectedBrokenConfig {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return status.Error(codes.FailedPrecondition, "this server is currently in detected_broken_config and is not currently accepting replication")
|
||||
}
|
||||
return handler(srv, ss)
|
||||
} else if isWrite := writeEndpoints[info.FullMethod]; isWrite {
|
||||
return status.Error(codes.Unimplemented, "unimplemented")
|
||||
} else {
|
||||
return status.Error(codes.Unauthenticated, "unauthenticated")
|
||||
}
|
||||
if role == RolePrimary {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return status.Error(codes.FailedPrecondition, "this server is a primary and is not currently accepting replication")
|
||||
}
|
||||
if role == RoleDetectedBrokenConfig {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return status.Error(codes.FailedPrecondition, "this server is currently in detected_broken_config and is not currently accepting replication")
|
||||
}
|
||||
return handler(srv, ss)
|
||||
}
|
||||
}
|
||||
|
||||
func (si *serverinterceptor) Unary() grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
fromStandby := false
|
||||
if md, ok := metadata.FromIncomingContext(ctx); ok {
|
||||
role, epoch := si.getRole()
|
||||
si.handleRequestHeaders(md, role, epoch)
|
||||
fromStandby = si.handleRequestHeaders(md, role, epoch)
|
||||
}
|
||||
// After handleRequestHeaders, our role may have changed, so we fetch it again here.
|
||||
role, epoch := si.getRole()
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(clusterRoleHeader, string(role), clusterRoleEpochHeader, strconv.Itoa(epoch))); err != nil {
|
||||
return nil, err
|
||||
if fromStandby {
|
||||
// After handleRequestHeaders, our role may have changed, so we fetch it again here.
|
||||
role, epoch := si.getRole()
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(clusterRoleHeader, string(role), clusterRoleEpochHeader, strconv.Itoa(epoch))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if role == RolePrimary {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return nil, status.Error(codes.FailedPrecondition, "this server is a primary and is not currently accepting replication")
|
||||
}
|
||||
if role == RoleDetectedBrokenConfig {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return nil, status.Error(codes.FailedPrecondition, "this server is currently in detected_broken_config and is not currently accepting replication")
|
||||
}
|
||||
return handler(ctx, req)
|
||||
} else if isWrite := writeEndpoints[info.FullMethod]; isWrite {
|
||||
return nil, status.Error(codes.Unimplemented, "unimplemented")
|
||||
} else {
|
||||
return nil, status.Error(codes.Unauthenticated, "unauthenticated")
|
||||
}
|
||||
if role == RolePrimary {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return nil, status.Error(codes.FailedPrecondition, "this server is a primary and is not currently accepting replication")
|
||||
}
|
||||
if role == RoleDetectedBrokenConfig {
|
||||
// As a primary, we do not accept replication requests.
|
||||
return nil, status.Error(codes.FailedPrecondition, "this server is currently in detected_broken_config and is not currently accepting replication")
|
||||
}
|
||||
return handler(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
func (si *serverinterceptor) handleRequestHeaders(header metadata.MD, role Role, epoch int) {
|
||||
func (si *serverinterceptor) handleRequestHeaders(header metadata.MD, role Role, epoch int) bool {
|
||||
epochs := header.Get(clusterRoleEpochHeader)
|
||||
roles := header.Get(clusterRoleHeader)
|
||||
if len(epochs) > 0 && len(roles) > 0 && roles[0] == string(RolePrimary) && role == RolePrimary {
|
||||
if reqepoch, err := strconv.Atoi(epochs[0]); err == nil {
|
||||
if reqepoch == epoch {
|
||||
// Misconfiguration in the cluster means this
|
||||
// server and its standby are marked as Primary
|
||||
// at the same epoch. We will become standby
|
||||
// and our peer will become standby. An
|
||||
// operator will need to get involved.
|
||||
si.lgr.Errorf("cluster: serverinterceptor: this server and its standby replica are both primary at the same epoch. force transitioning to detected_broken_config.")
|
||||
si.roleSetter(string(RoleDetectedBrokenConfig), reqepoch)
|
||||
} else if reqepoch > epoch {
|
||||
// The client replicating to us thinks it is the primary at a higher epoch than us.
|
||||
si.lgr.Warnf("cluster: serverinterceptor: this server is primary at epoch %d. the server replicating to it is primary at epoch %d. force transitioning to standby.", epoch, reqepoch)
|
||||
si.roleSetter(string(RoleStandby), reqepoch)
|
||||
if len(epochs) > 0 && len(roles) > 0 {
|
||||
if roles[0] == string(RolePrimary) && role == RolePrimary {
|
||||
if reqepoch, err := strconv.Atoi(epochs[0]); err == nil {
|
||||
if reqepoch == epoch {
|
||||
// Misconfiguration in the cluster means this
|
||||
// server and its standby are marked as Primary
|
||||
// at the same epoch. We will become standby
|
||||
// and our peer will become standby. An
|
||||
// operator will need to get involved.
|
||||
si.lgr.Errorf("cluster: serverinterceptor: this server and its standby replica are both primary at the same epoch. force transitioning to detected_broken_config.")
|
||||
si.roleSetter(string(RoleDetectedBrokenConfig), reqepoch)
|
||||
} else if reqepoch > epoch {
|
||||
// The client replicating to us thinks it is the primary at a higher epoch than us.
|
||||
si.lgr.Warnf("cluster: serverinterceptor: this server is primary at epoch %d. the server replicating to it is primary at epoch %d. force transitioning to standby.", epoch, reqepoch)
|
||||
si.roleSetter(string(RoleStandby), reqepoch)
|
||||
}
|
||||
}
|
||||
}
|
||||
// returns true if the request was from a standby replica, false otherwise
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (si *serverinterceptor) Options() []grpc.ServerOption {
|
||||
|
||||
@@ -17,6 +17,7 @@ package cluster
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -89,6 +90,47 @@ func withClient(t *testing.T, cb func(*testing.T, grpc_health_v1.HealthClient),
|
||||
return hs
|
||||
}
|
||||
|
||||
func outboundCtx(vals ...interface{}) context.Context {
|
||||
ctx := context.Background()
|
||||
if len(vals) == 0 {
|
||||
return ctx
|
||||
}
|
||||
if len(vals) == 2 {
|
||||
return metadata.AppendToOutgoingContext(ctx,
|
||||
clusterRoleHeader, string(vals[0].(Role)),
|
||||
clusterRoleEpochHeader, strconv.Itoa(vals[1].(int)))
|
||||
}
|
||||
panic("bad test --- outboundCtx must take 0 or 2 values")
|
||||
}
|
||||
|
||||
func TestServerInterceptorUnauthenticatedWithoutClientHeaders(t *testing.T) {
|
||||
var si serverinterceptor
|
||||
si.roleSetter = noopSetRole
|
||||
si.lgr = lgr
|
||||
si.setRole(RoleStandby, 10)
|
||||
t.Run("Standby", func(t *testing.T) {
|
||||
withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
_, err := client.Check(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.Equal(t, codes.Unauthenticated, status.Code(err))
|
||||
srv, err := client.Watch(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.NoError(t, err)
|
||||
_, err = srv.Recv()
|
||||
assert.Equal(t, codes.Unauthenticated, status.Code(err))
|
||||
}, si.Options(), nil)
|
||||
})
|
||||
si.setRole(RolePrimary, 10)
|
||||
t.Run("Primary", func(t *testing.T) {
|
||||
withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
_, err := client.Check(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.Equal(t, codes.Unauthenticated, status.Code(err))
|
||||
srv, err := client.Watch(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.NoError(t, err)
|
||||
_, err = srv.Recv()
|
||||
assert.Equal(t, codes.Unauthenticated, status.Code(err))
|
||||
}, si.Options(), nil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServerInterceptorAddsUnaryResponseHeaders(t *testing.T) {
|
||||
var si serverinterceptor
|
||||
si.setRole(RoleStandby, 10)
|
||||
@@ -96,7 +138,7 @@ func TestServerInterceptorAddsUnaryResponseHeaders(t *testing.T) {
|
||||
si.lgr = lgr
|
||||
withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
var md metadata.MD
|
||||
_, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}, grpc.Header(&md))
|
||||
_, err := client.Check(outboundCtx(RolePrimary, 10), &grpc_health_v1.HealthCheckRequest{}, grpc.Header(&md))
|
||||
assert.Equal(t, codes.Unimplemented, status.Code(err))
|
||||
if assert.Len(t, md.Get(clusterRoleHeader), 1) {
|
||||
assert.Equal(t, "standby", md.Get(clusterRoleHeader)[0])
|
||||
@@ -114,7 +156,7 @@ func TestServerInterceptorAddsStreamResponseHeaders(t *testing.T) {
|
||||
si.lgr = lgr
|
||||
withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
var md metadata.MD
|
||||
srv, err := client.Watch(context.Background(), &grpc_health_v1.HealthCheckRequest{}, grpc.Header(&md))
|
||||
srv, err := client.Watch(outboundCtx(RolePrimary, 10), &grpc_health_v1.HealthCheckRequest{}, grpc.Header(&md))
|
||||
require.NoError(t, err)
|
||||
_, err = srv.Recv()
|
||||
assert.Equal(t, codes.Unimplemented, status.Code(err))
|
||||
@@ -133,10 +175,10 @@ func TestServerInterceptorAsPrimaryDoesNotSendRequest(t *testing.T) {
|
||||
si.roleSetter = noopSetRole
|
||||
si.lgr = lgr
|
||||
srv := withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
ctx := metadata.AppendToOutgoingContext(context.Background(), "test-header", "test-header-value")
|
||||
ctx := metadata.AppendToOutgoingContext(outboundCtx(RoleStandby, 10), "test-header", "test-header-value")
|
||||
_, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.Equal(t, codes.FailedPrecondition, status.Code(err))
|
||||
ctx = metadata.AppendToOutgoingContext(context.Background(), "test-header", "test-header-value")
|
||||
ctx = metadata.AppendToOutgoingContext(outboundCtx(RoleStandby, 10), "test-header", "test-header-value")
|
||||
ss, err := client.Watch(ctx, &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.NoError(t, err)
|
||||
_, err = ss.Recv()
|
||||
@@ -151,7 +193,7 @@ func TestClientInterceptorAddsUnaryRequestHeaders(t *testing.T) {
|
||||
ci.roleSetter = noopSetRole
|
||||
ci.lgr = lgr
|
||||
srv := withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
_, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{})
|
||||
_, err := client.Check(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.Equal(t, codes.Unimplemented, status.Code(err))
|
||||
}, nil, ci.Options())
|
||||
if assert.Len(t, srv.md.Get(clusterRoleHeader), 1) {
|
||||
@@ -168,7 +210,7 @@ func TestClientInterceptorAddsStreamRequestHeaders(t *testing.T) {
|
||||
ci.roleSetter = noopSetRole
|
||||
ci.lgr = lgr
|
||||
srv := withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
srv, err := client.Watch(context.Background(), &grpc_health_v1.HealthCheckRequest{})
|
||||
srv, err := client.Watch(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
require.NoError(t, err)
|
||||
_, err = srv.Recv()
|
||||
assert.Equal(t, codes.Unimplemented, status.Code(err))
|
||||
@@ -187,12 +229,12 @@ func TestClientInterceptorAsStandbyDoesNotSendRequest(t *testing.T) {
|
||||
ci.roleSetter = noopSetRole
|
||||
ci.lgr = lgr
|
||||
srv := withClient(t, func(t *testing.T, client grpc_health_v1.HealthClient) {
|
||||
_, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{})
|
||||
_, err := client.Check(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.Equal(t, codes.Unimplemented, status.Code(err))
|
||||
ci.setRole(RoleStandby, 11)
|
||||
_, err = client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{})
|
||||
_, err = client.Check(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.Equal(t, codes.FailedPrecondition, status.Code(err))
|
||||
_, err = client.Watch(context.Background(), &grpc_health_v1.HealthCheckRequest{})
|
||||
_, err = client.Watch(outboundCtx(), &grpc_health_v1.HealthCheckRequest{})
|
||||
assert.Equal(t, codes.FailedPrecondition, status.Code(err))
|
||||
}, nil, ci.Options())
|
||||
if assert.Len(t, srv.md.Get(clusterRoleHeader), 1) {
|
||||
|
||||
@@ -91,12 +91,21 @@ type Database struct {
|
||||
revision string
|
||||
}
|
||||
|
||||
var _ SqlDatabase = Database{}
|
||||
var _ dsess.RevisionDatabase = Database{}
|
||||
var _ globalstate.StateProvider = Database{}
|
||||
var _ sql.CollatedDatabase = Database{}
|
||||
var _ sql.Database = Database{}
|
||||
var _ sql.StoredProcedureDatabase = Database{}
|
||||
var _ sql.TableCreator = Database{}
|
||||
var _ sql.ViewDatabase = Database{}
|
||||
var _ sql.TableDropper = Database{}
|
||||
var _ sql.TableRenamer = Database{}
|
||||
var _ sql.TemporaryTableCreator = Database{}
|
||||
var _ sql.TemporaryTableDatabase = Database{}
|
||||
var _ dsess.RevisionDatabase = Database{}
|
||||
var _ sql.TransactionDatabase = Database{}
|
||||
var _ sql.TriggerDatabase = Database{}
|
||||
var _ sql.VersionedDatabase = Database{}
|
||||
var _ sql.ViewDatabase = Database{}
|
||||
|
||||
type ReadOnlyDatabase struct {
|
||||
Database
|
||||
@@ -160,17 +169,6 @@ func (db Database) EditOptions() editor.Options {
|
||||
return db.editOpts
|
||||
}
|
||||
|
||||
var _ SqlDatabase = Database{}
|
||||
var _ sql.VersionedDatabase = Database{}
|
||||
var _ sql.TableDropper = Database{}
|
||||
var _ sql.TableCreator = Database{}
|
||||
var _ sql.TemporaryTableCreator = Database{}
|
||||
var _ sql.TableRenamer = Database{}
|
||||
var _ sql.TriggerDatabase = Database{}
|
||||
var _ sql.StoredProcedureDatabase = Database{}
|
||||
var _ sql.TransactionDatabase = Database{}
|
||||
var _ globalstate.StateProvider = Database{}
|
||||
|
||||
// NewDatabase returns a new dolt database to use in queries.
|
||||
func NewDatabase(ctx context.Context, name string, dbData env.DbData, editOpts editor.Options) (Database, error) {
|
||||
globalState, err := globalstate.NewGlobalStateStoreForDb(ctx, dbData.Ddb)
|
||||
@@ -1280,6 +1278,38 @@ func (db Database) GetAllTemporaryTables(ctx *sql.Context) ([]sql.Table, error)
|
||||
return sess.GetAllTemporaryTables(ctx, db.Name())
|
||||
}
|
||||
|
||||
// GetCollation implements the interface sql.CollatedDatabase.
|
||||
func (db Database) GetCollation(ctx *sql.Context) sql.CollationID {
|
||||
root, err := db.GetRoot(ctx)
|
||||
if err != nil {
|
||||
return sql.Collation_Default
|
||||
}
|
||||
collation, err := root.GetCollation(ctx)
|
||||
if err != nil {
|
||||
return sql.Collation_Default
|
||||
}
|
||||
return sql.CollationID(collation)
|
||||
}
|
||||
|
||||
// SetCollation implements the interface sql.CollatedDatabase.
|
||||
func (db Database) SetCollation(ctx *sql.Context, collation sql.CollationID) error {
|
||||
if err := branch_control.CheckAccess(ctx, branch_control.Permissions_Write); err != nil {
|
||||
return err
|
||||
}
|
||||
if collation == sql.Collation_Unspecified {
|
||||
collation = sql.Collation_Default
|
||||
}
|
||||
root, err := db.GetRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRoot, err := root.SetCollation(ctx, schema.Collation(collation))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return db.SetRoot(ctx, newRoot)
|
||||
}
|
||||
|
||||
// TODO: this is a hack to make user space DBs appear to the analyzer as full DBs with state etc., but the state is
|
||||
// really skeletal. We need to reexamine the DB / session initialization to make this simpler -- most of these things
|
||||
// aren't needed at initialization time and for most code paths.
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dprocedures"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
@@ -60,6 +61,7 @@ type DoltDatabaseProvider struct {
|
||||
var _ sql.DatabaseProvider = (*DoltDatabaseProvider)(nil)
|
||||
var _ sql.FunctionProvider = (*DoltDatabaseProvider)(nil)
|
||||
var _ sql.MutableDatabaseProvider = (*DoltDatabaseProvider)(nil)
|
||||
var _ sql.CollatedDatabaseProvider = (*DoltDatabaseProvider)(nil)
|
||||
var _ sql.ExternalStoredProcedureProvider = (*DoltDatabaseProvider)(nil)
|
||||
var _ sql.TableFunctionProvider = (*DoltDatabaseProvider)(nil)
|
||||
var _ dsess.DoltDatabaseProvider = (*DoltDatabaseProvider)(nil)
|
||||
@@ -298,6 +300,10 @@ func (p DoltDatabaseProvider) GetRemoteDB(ctx *sql.Context, srcDB *doltdb.DoltDB
|
||||
}
|
||||
|
||||
func (p DoltDatabaseProvider) CreateDatabase(ctx *sql.Context, name string) error {
|
||||
return p.CreateCollatedDatabase(ctx, name, sql.Collation_Default)
|
||||
}
|
||||
|
||||
func (p DoltDatabaseProvider) CreateCollatedDatabase(ctx *sql.Context, name string, collation sql.CollationID) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
@@ -350,6 +356,25 @@ func (p DoltDatabaseProvider) CreateDatabase(ctx *sql.Context, name string) erro
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the collation
|
||||
if collation != sql.Collation_Default {
|
||||
workingRoot, err := newEnv.WorkingRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRoot, err := workingRoot.SetCollation(ctx, schema.Collation(collation))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// As this is a newly created database, we set both the working and staged roots to the same root value
|
||||
if err = newEnv.UpdateWorkingRoot(ctx, newRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = newEnv.UpdateStagedRoot(ctx, newRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// if calling process has a lockfile, also create one for new database
|
||||
if env.FsIsLocked(p.fs) {
|
||||
err := newEnv.Lock()
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
@@ -34,6 +35,7 @@ type DiffSummaryTableFunction struct {
|
||||
|
||||
fromCommitExpr sql.Expression
|
||||
toCommitExpr sql.Expression
|
||||
dotCommitExpr sql.Expression
|
||||
tableNameExpr sql.Expression
|
||||
database sql.Database
|
||||
}
|
||||
@@ -84,16 +86,29 @@ func (ds *DiffSummaryTableFunction) FunctionName() string {
|
||||
return "dolt_diff_summary"
|
||||
}
|
||||
|
||||
// Resolved implements the sql.Resolvable interface
|
||||
func (ds *DiffSummaryTableFunction) Resolved() bool {
|
||||
if ds.tableNameExpr != nil {
|
||||
return ds.fromCommitExpr.Resolved() && ds.toCommitExpr.Resolved() && ds.tableNameExpr.Resolved()
|
||||
func (ds *DiffSummaryTableFunction) commitsResolved() bool {
|
||||
if ds.dotCommitExpr != nil {
|
||||
return ds.dotCommitExpr.Resolved()
|
||||
}
|
||||
return ds.fromCommitExpr.Resolved() && ds.toCommitExpr.Resolved()
|
||||
}
|
||||
|
||||
// Resolved implements the sql.Resolvable interface
|
||||
func (ds *DiffSummaryTableFunction) Resolved() bool {
|
||||
if ds.tableNameExpr != nil {
|
||||
return ds.commitsResolved() && ds.tableNameExpr.Resolved()
|
||||
}
|
||||
return ds.commitsResolved()
|
||||
}
|
||||
|
||||
// String implements the Stringer interface
|
||||
func (ds *DiffSummaryTableFunction) String() string {
|
||||
if ds.dotCommitExpr != nil {
|
||||
if ds.tableNameExpr != nil {
|
||||
return fmt.Sprintf("DOLT_DIFF_SUMMARY(%s, %s)", ds.dotCommitExpr.String(), ds.tableNameExpr.String())
|
||||
}
|
||||
return fmt.Sprintf("DOLT_DIFF_SUMMARY(%s)", ds.dotCommitExpr.String())
|
||||
}
|
||||
if ds.tableNameExpr != nil {
|
||||
return fmt.Sprintf("DOLT_DIFF_SUMMARY(%s, %s, %s)", ds.fromCommitExpr.String(), ds.toCommitExpr.String(), ds.tableNameExpr.String())
|
||||
}
|
||||
@@ -154,7 +169,12 @@ func (ds *DiffSummaryTableFunction) CheckPrivileges(ctx *sql.Context, opChecker
|
||||
|
||||
// Expressions implements the sql.Expressioner interface.
|
||||
func (ds *DiffSummaryTableFunction) Expressions() []sql.Expression {
|
||||
exprs := []sql.Expression{ds.fromCommitExpr, ds.toCommitExpr}
|
||||
exprs := []sql.Expression{}
|
||||
if ds.dotCommitExpr != nil {
|
||||
exprs = append(exprs, ds.dotCommitExpr)
|
||||
} else {
|
||||
exprs = append(exprs, ds.fromCommitExpr, ds.toCommitExpr)
|
||||
}
|
||||
if ds.tableNameExpr != nil {
|
||||
exprs = append(exprs, ds.tableNameExpr)
|
||||
}
|
||||
@@ -163,8 +183,8 @@ func (ds *DiffSummaryTableFunction) Expressions() []sql.Expression {
|
||||
|
||||
// WithExpressions implements the sql.Expressioner interface.
|
||||
func (ds *DiffSummaryTableFunction) WithExpressions(expression ...sql.Expression) (sql.Node, error) {
|
||||
if len(expression) < 2 || len(expression) > 3 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "2 or 3", len(expression))
|
||||
if len(expression) < 1 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "1 to 3", len(expression))
|
||||
}
|
||||
|
||||
for _, expr := range expression {
|
||||
@@ -173,19 +193,37 @@ func (ds *DiffSummaryTableFunction) WithExpressions(expression ...sql.Expression
|
||||
}
|
||||
}
|
||||
|
||||
ds.fromCommitExpr = expression[0]
|
||||
ds.toCommitExpr = expression[1]
|
||||
if len(expression) == 3 {
|
||||
ds.tableNameExpr = expression[2]
|
||||
if strings.Contains(expression[0].String(), "..") {
|
||||
if len(expression) < 1 || len(expression) > 2 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "1 or 2", len(expression))
|
||||
}
|
||||
ds.dotCommitExpr = expression[0]
|
||||
if len(expression) == 2 {
|
||||
ds.tableNameExpr = expression[1]
|
||||
}
|
||||
} else {
|
||||
if len(expression) < 2 || len(expression) > 3 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "2 or 3", len(expression))
|
||||
}
|
||||
ds.fromCommitExpr = expression[0]
|
||||
ds.toCommitExpr = expression[1]
|
||||
if len(expression) == 3 {
|
||||
ds.tableNameExpr = expression[2]
|
||||
}
|
||||
}
|
||||
|
||||
// validate the expressions
|
||||
if !sql.IsText(ds.fromCommitExpr.Type()) {
|
||||
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.fromCommitExpr.String())
|
||||
}
|
||||
|
||||
if !sql.IsText(ds.toCommitExpr.Type()) {
|
||||
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.toCommitExpr.String())
|
||||
if ds.dotCommitExpr != nil {
|
||||
if !sql.IsText(ds.dotCommitExpr.Type()) {
|
||||
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.dotCommitExpr.String())
|
||||
}
|
||||
} else {
|
||||
if !sql.IsText(ds.fromCommitExpr.Type()) {
|
||||
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.fromCommitExpr.String())
|
||||
}
|
||||
if !sql.IsText(ds.toCommitExpr.Type()) {
|
||||
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.toCommitExpr.String())
|
||||
}
|
||||
}
|
||||
|
||||
if ds.tableNameExpr != nil {
|
||||
@@ -199,7 +237,7 @@ func (ds *DiffSummaryTableFunction) WithExpressions(expression ...sql.Expression
|
||||
|
||||
// RowIter implements the sql.Node interface
|
||||
func (ds *DiffSummaryTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.RowIter, error) {
|
||||
fromCommitVal, toCommitVal, tableName, err := ds.evaluateArguments()
|
||||
fromCommitVal, toCommitVal, dotCommitVal, tableName, err := ds.evaluateArguments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -209,13 +247,18 @@ func (ds *DiffSummaryTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.
|
||||
return nil, fmt.Errorf("unexpected database type: %T", ds.database)
|
||||
}
|
||||
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
fromRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), fromCommitVal)
|
||||
fromCommitStr, toCommitStr, err := loadCommitStrings(ctx, fromCommitVal, toCommitVal, dotCommitVal, sqledb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), toCommitVal)
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
fromRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), fromCommitStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), toCommitStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -256,42 +299,43 @@ func (ds *DiffSummaryTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.
|
||||
return NewDiffSummaryTableFunctionRowIter(diffSummaries), nil
|
||||
}
|
||||
|
||||
// evaluateArguments returns fromCommitValStr, toCommitValStr and tableName.
|
||||
// It evaluates the argument expressions to turn them into values this DiffTableFunction
|
||||
// evaluateArguments returns fromCommitVal, toCommitVal, dotCommitVal, and tableName.
|
||||
// It evaluates the argument expressions to turn them into values this DiffSummaryTableFunction
|
||||
// can use. Note that this method only evals the expressions, and doesn't validate the values.
|
||||
func (ds *DiffSummaryTableFunction) evaluateArguments() (string, string, string, error) {
|
||||
func (ds *DiffSummaryTableFunction) evaluateArguments() (interface{}, interface{}, interface{}, string, error) {
|
||||
var tableName string
|
||||
if ds.tableNameExpr != nil {
|
||||
tableNameVal, err := ds.tableNameExpr.Eval(ds.ctx, nil)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
tn, ok := tableNameVal.(string)
|
||||
if !ok {
|
||||
return "", "", "", ErrInvalidTableName.New(ds.tableNameExpr.String())
|
||||
return nil, nil, nil, "", ErrInvalidTableName.New(ds.tableNameExpr.String())
|
||||
}
|
||||
tableName = tn
|
||||
}
|
||||
|
||||
if ds.dotCommitExpr != nil {
|
||||
dotCommitVal, err := ds.dotCommitExpr.Eval(ds.ctx, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
|
||||
return nil, nil, dotCommitVal, tableName, nil
|
||||
}
|
||||
|
||||
fromCommitVal, err := ds.fromCommitExpr.Eval(ds.ctx, nil)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
fromCommitValStr, ok := fromCommitVal.(string)
|
||||
if !ok {
|
||||
return "", "", "", fmt.Errorf("received '%v' when expecting commit hash string", fromCommitVal)
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
|
||||
toCommitVal, err := ds.toCommitExpr.Eval(ds.ctx, nil)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
toCommitValStr, ok := toCommitVal.(string)
|
||||
if !ok {
|
||||
return "", "", "", fmt.Errorf("received '%v' when expecting commit hash string", toCommitVal)
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
|
||||
return fromCommitValStr, toCommitValStr, tableName, nil
|
||||
return fromCommitVal, toCommitVal, nil, tableName, nil
|
||||
}
|
||||
|
||||
// getDiffSummaryNodeFromDelta returns diffSummaryNode object and whether there is data diff or not. It gets tables
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/rowconv"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
@@ -40,6 +42,7 @@ type DiffTableFunction struct {
|
||||
ctx *sql.Context
|
||||
fromCommitExpr sql.Expression
|
||||
toCommitExpr sql.Expression
|
||||
dotCommitExpr sql.Expression
|
||||
tableNameExpr sql.Expression
|
||||
database sql.Database
|
||||
sqlSch sql.Schema
|
||||
@@ -79,6 +82,11 @@ func (dtf *DiffTableFunction) WithDatabase(database sql.Database) (sql.Node, err
|
||||
|
||||
// Expressions implements the sql.Expressioner interface
|
||||
func (dtf *DiffTableFunction) Expressions() []sql.Expression {
|
||||
if dtf.dotCommitExpr != nil {
|
||||
return []sql.Expression{
|
||||
dtf.dotCommitExpr, dtf.tableNameExpr,
|
||||
}
|
||||
}
|
||||
return []sql.Expression{
|
||||
dtf.fromCommitExpr, dtf.toCommitExpr, dtf.tableNameExpr,
|
||||
}
|
||||
@@ -86,8 +94,8 @@ func (dtf *DiffTableFunction) Expressions() []sql.Expression {
|
||||
|
||||
// WithExpressions implements the sql.Expressioner interface
|
||||
func (dtf *DiffTableFunction) WithExpressions(expression ...sql.Expression) (sql.Node, error) {
|
||||
if len(expression) != 3 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(dtf.FunctionName(), 3, len(expression))
|
||||
if len(expression) < 2 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(dtf.FunctionName(), "2 to 3", len(expression))
|
||||
}
|
||||
|
||||
// TODO: For now, we will only support literal / fully-resolved arguments to the
|
||||
@@ -99,16 +107,27 @@ func (dtf *DiffTableFunction) WithExpressions(expression ...sql.Expression) (sql
|
||||
}
|
||||
}
|
||||
|
||||
dtf.fromCommitExpr = expression[0]
|
||||
dtf.toCommitExpr = expression[1]
|
||||
dtf.tableNameExpr = expression[2]
|
||||
if strings.Contains(expression[0].String(), "..") {
|
||||
if len(expression) != 2 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(fmt.Sprintf("%v with .. or ...", dtf.FunctionName()), 2, len(expression))
|
||||
}
|
||||
dtf.dotCommitExpr = expression[0]
|
||||
dtf.tableNameExpr = expression[1]
|
||||
} else {
|
||||
if len(expression) != 3 {
|
||||
return nil, sql.ErrInvalidArgumentNumber.New(dtf.FunctionName(), 3, len(expression))
|
||||
}
|
||||
dtf.fromCommitExpr = expression[0]
|
||||
dtf.toCommitExpr = expression[1]
|
||||
dtf.tableNameExpr = expression[2]
|
||||
}
|
||||
|
||||
fromCommitVal, toCommitVal, tableName, err := dtf.evaluateArguments()
|
||||
fromCommitVal, toCommitVal, dotCommitVal, tableName, err := dtf.evaluateArguments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dtf.generateSchema(dtf.ctx, fromCommitVal, toCommitVal, tableName)
|
||||
err = dtf.generateSchema(dtf.ctx, fromCommitVal, toCommitVal, dotCommitVal, tableName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -127,29 +146,29 @@ func (dtf *DiffTableFunction) RowIter(ctx *sql.Context, _ sql.Row) (sql.RowIter,
|
||||
// TODO: When we add support for joining on table functions, we'll need to evaluate this against the
|
||||
// specified row. That row is what has the left_table context in a join query.
|
||||
// This will expand the test cases we need to cover significantly.
|
||||
fromCommit, toCommit, _, err := dtf.evaluateArguments()
|
||||
fromCommitVal, toCommitVal, dotCommitVal, _, err := dtf.evaluateArguments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromHash, fromOk := fromCommit.(string)
|
||||
toHash, toOk := toCommit.(string)
|
||||
if !fromOk || !toOk {
|
||||
return nil, fmt.Errorf("expected strings for from and to revisions, got: %v, %v", fromHash, toHash)
|
||||
}
|
||||
|
||||
sqledb, ok := dtf.database.(Database)
|
||||
if !ok {
|
||||
panic("unable to get dolt database")
|
||||
return nil, fmt.Errorf("unable to get dolt database")
|
||||
}
|
||||
ddb := sqledb.GetDoltDB()
|
||||
|
||||
dp := dtables.NewDiffPartition(dtf.tableDelta.ToTable, dtf.tableDelta.FromTable, toHash, fromHash, dtf.toDate, dtf.fromDate, dtf.tableDelta.ToSch, dtf.tableDelta.FromSch)
|
||||
fromCommitStr, toCommitStr, err := loadCommitStrings(ctx, fromCommitVal, toCommitVal, dotCommitVal, sqledb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ddb := sqledb.GetDoltDB()
|
||||
dp := dtables.NewDiffPartition(dtf.tableDelta.ToTable, dtf.tableDelta.FromTable, toCommitStr, fromCommitStr, dtf.toDate, dtf.fromDate, dtf.tableDelta.ToSch, dtf.tableDelta.FromSch)
|
||||
|
||||
return NewDiffTableFunctionRowIterForSinglePartition(*dp, ddb, dtf.joiner), nil
|
||||
}
|
||||
|
||||
// findMatchingDelta returns the best matching table delta for the table name given, taking renames into account
|
||||
// findMatchingDelta returns the best matching table delta for the table name
|
||||
// given, taking renames into account
|
||||
func findMatchingDelta(deltas []diff.TableDelta, tableName string) diff.TableDelta {
|
||||
tableName = strings.ToLower(tableName)
|
||||
for _, d := range deltas {
|
||||
@@ -168,25 +187,132 @@ func findMatchingDelta(deltas []diff.TableDelta, tableName string) diff.TableDel
|
||||
return diff.TableDelta{}
|
||||
}
|
||||
|
||||
// loadDetailsForRef loads the root, hash, and timestamp for the specified ref value
|
||||
func loadDetailsForRef(
|
||||
ctx *sql.Context,
|
||||
ref interface{},
|
||||
ddb Database,
|
||||
) (*doltdb.RootValue, string, *types.Timestamp, error) {
|
||||
hashStr, ok := ref.(string)
|
||||
if !ok {
|
||||
return nil, "", nil, fmt.Errorf("received '%v' when expecting commit hash string", ref)
|
||||
type refDetails struct {
|
||||
root *doltdb.RootValue
|
||||
hashStr string
|
||||
commitTime *types.Timestamp
|
||||
}
|
||||
|
||||
// loadDetailsForRef loads the root, hash, and timestamp for the specified from
|
||||
// and to ref values
|
||||
func loadDetailsForRefs(ctx *sql.Context, fromRef, toRef, dotRef interface{}, db Database) (*refDetails, *refDetails, error) {
|
||||
fromCommitStr, toCommitStr, err := loadCommitStrings(ctx, fromRef, toRef, dotRef, db)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
|
||||
root, commitTime, err := sess.ResolveRootForRef(ctx, ddb.Name(), hashStr)
|
||||
fromDetails, err := resolveRoot(ctx, sess, db.Name(), fromCommitStr)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return root, hashStr, commitTime, nil
|
||||
toDetails, err := resolveRoot(ctx, sess, db.Name(), toCommitStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return fromDetails, toDetails, nil
|
||||
}
|
||||
|
||||
func resolveCommitStrings(ctx *sql.Context, fromRef, toRef, dotRef interface{}, db Database) (string, string, error) {
|
||||
if dotRef != nil {
|
||||
dotStr, err := interfaceToString(dotRef)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
|
||||
if strings.Contains(dotStr, "...") {
|
||||
refs := strings.Split(dotStr, "...")
|
||||
|
||||
headRef, err := sess.CWBHeadRef(ctx, db.Name())
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
rightCm, err := resolveCommit(ctx, db.ddb, headRef, refs[0])
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
leftCm, err := resolveCommit(ctx, db.ddb, headRef, refs[1])
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
mergeBase, err := merge.MergeBase(ctx, rightCm, leftCm)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return mergeBase.String(), refs[1], nil
|
||||
} else {
|
||||
refs := strings.Split(dotStr, "..")
|
||||
return refs[0], refs[1], nil
|
||||
}
|
||||
}
|
||||
|
||||
fromStr, err := interfaceToString(fromRef)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
toStr, err := interfaceToString(toRef)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return fromStr, toStr, nil
|
||||
}
|
||||
|
||||
// loadCommitStrings gets the to and from commit strings, using the common
|
||||
// ancestor as the from commit string for three dot diff
|
||||
func loadCommitStrings(ctx *sql.Context, fromRef, toRef, dotRef interface{}, db Database) (string, string, error) {
|
||||
fromStr, toStr, err := resolveCommitStrings(ctx, fromRef, toRef, dotRef, db)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if len(fromStr) == 0 || len(toStr) == 0 {
|
||||
return "", "", fmt.Errorf("expected strings for from and to revisions, got: %v, %v", fromStr, toStr)
|
||||
}
|
||||
|
||||
return fromStr, toStr, nil
|
||||
}
|
||||
|
||||
// interfaceToString converts an interface to a string
|
||||
func interfaceToString(r interface{}) (string, error) {
|
||||
str, ok := r.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("received '%v' when expecting commit hash string", str)
|
||||
}
|
||||
return str, nil
|
||||
}
|
||||
|
||||
func resolveRoot(ctx *sql.Context, sess *dsess.DoltSession, dbName, hashStr string) (*refDetails, error) {
|
||||
root, commitTime, err := sess.ResolveRootForRef(ctx, dbName, hashStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &refDetails{root, hashStr, commitTime}, nil
|
||||
}
|
||||
|
||||
func resolveCommit(ctx *sql.Context, ddb *doltdb.DoltDB, headRef ref.DoltRef, cSpecStr string) (*doltdb.Commit, error) {
|
||||
rightCs, err := doltdb.NewCommitSpec(cSpecStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rightCm, err := ddb.Resolve(ctx, rightCs, headRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rightCm, nil
|
||||
}
|
||||
|
||||
// WithChildren implements the sql.Node interface
|
||||
@@ -199,7 +325,7 @@ func (dtf *DiffTableFunction) WithChildren(node ...sql.Node) (sql.Node, error) {
|
||||
|
||||
// CheckPrivileges implements the sql.Node interface
|
||||
func (dtf *DiffTableFunction) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool {
|
||||
_, _, tableName, err := dtf.evaluateArguments()
|
||||
_, _, _, tableName, err := dtf.evaluateArguments()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -211,46 +337,58 @@ func (dtf *DiffTableFunction) CheckPrivileges(ctx *sql.Context, opChecker sql.Pr
|
||||
|
||||
// evaluateArguments evaluates the argument expressions to turn them into values this DiffTableFunction
|
||||
// can use. Note that this method only evals the expressions, and doesn't validate the values.
|
||||
func (dtf *DiffTableFunction) evaluateArguments() (interface{}, interface{}, string, error) {
|
||||
func (dtf *DiffTableFunction) evaluateArguments() (interface{}, interface{}, interface{}, string, error) {
|
||||
if !dtf.Resolved() {
|
||||
return nil, nil, "", nil
|
||||
return nil, nil, nil, "", nil
|
||||
}
|
||||
|
||||
if !sql.IsText(dtf.tableNameExpr.Type()) {
|
||||
return nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.tableNameExpr.String())
|
||||
}
|
||||
|
||||
if !sql.IsText(dtf.fromCommitExpr.Type()) {
|
||||
return nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.fromCommitExpr.String())
|
||||
}
|
||||
|
||||
if !sql.IsText(dtf.toCommitExpr.Type()) {
|
||||
return nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.toCommitExpr.String())
|
||||
return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.tableNameExpr.String())
|
||||
}
|
||||
|
||||
tableNameVal, err := dtf.tableNameExpr.Eval(dtf.ctx, nil)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
|
||||
tableName, ok := tableNameVal.(string)
|
||||
if !ok {
|
||||
return nil, nil, "", ErrInvalidTableName.New(dtf.tableNameExpr.String())
|
||||
return nil, nil, nil, "", ErrInvalidTableName.New(dtf.tableNameExpr.String())
|
||||
}
|
||||
|
||||
if dtf.dotCommitExpr != nil {
|
||||
if !sql.IsText(dtf.dotCommitExpr.Type()) {
|
||||
return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.dotCommitExpr.String())
|
||||
}
|
||||
|
||||
dotCommitVal, err := dtf.dotCommitExpr.Eval(dtf.ctx, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
|
||||
return nil, nil, dotCommitVal, tableName, nil
|
||||
}
|
||||
|
||||
if !sql.IsText(dtf.fromCommitExpr.Type()) {
|
||||
return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.fromCommitExpr.String())
|
||||
}
|
||||
if !sql.IsText(dtf.toCommitExpr.Type()) {
|
||||
return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.toCommitExpr.String())
|
||||
}
|
||||
|
||||
fromCommitVal, err := dtf.fromCommitExpr.Eval(dtf.ctx, nil)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
|
||||
toCommitVal, err := dtf.toCommitExpr.Eval(dtf.ctx, nil)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
return nil, nil, nil, "", err
|
||||
}
|
||||
|
||||
return fromCommitVal, toCommitVal, tableName, nil
|
||||
return fromCommitVal, toCommitVal, nil, tableName, nil
|
||||
}
|
||||
|
||||
func (dtf *DiffTableFunction) generateSchema(ctx *sql.Context, fromCommitVal, toCommitVal interface{}, tableName string) error {
|
||||
func (dtf *DiffTableFunction) generateSchema(ctx *sql.Context, fromCommitVal, toCommitVal, dotCommitVal interface{}, tableName string) error {
|
||||
if !dtf.Resolved() {
|
||||
return nil
|
||||
}
|
||||
@@ -260,7 +398,7 @@ func (dtf *DiffTableFunction) generateSchema(ctx *sql.Context, fromCommitVal, to
|
||||
return fmt.Errorf("unexpected database type: %T", dtf.database)
|
||||
}
|
||||
|
||||
delta, err := dtf.cacheTableDelta(ctx, fromCommitVal, toCommitVal, tableName, sqledb)
|
||||
delta, err := dtf.cacheTableDelta(ctx, fromCommitVal, toCommitVal, dotCommitVal, tableName, sqledb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -308,22 +446,18 @@ func (dtf *DiffTableFunction) generateSchema(ctx *sql.Context, fromCommitVal, to
|
||||
|
||||
// cacheTableDelta caches and returns an appropriate table delta for the table name given, taking renames into
|
||||
// consideration. Returns a sql.ErrTableNotFound if the given table name cannot be found in either revision.
|
||||
func (dtf *DiffTableFunction) cacheTableDelta(ctx *sql.Context, fromCommitVal interface{}, toCommitVal interface{}, tableName string, db Database) (diff.TableDelta, error) {
|
||||
fromRoot, _, fromDate, err := loadDetailsForRef(ctx, fromCommitVal, db)
|
||||
func (dtf *DiffTableFunction) cacheTableDelta(ctx *sql.Context, fromCommitVal, toCommitVal, dotCommitVal interface{}, tableName string, db Database) (diff.TableDelta, error) {
|
||||
fromRefDetails, toRefDetails, err := loadDetailsForRefs(ctx, fromCommitVal, toCommitVal, dotCommitVal, db)
|
||||
if err != nil {
|
||||
return diff.TableDelta{}, err
|
||||
}
|
||||
|
||||
toRoot, _, toDate, err := loadDetailsForRef(ctx, toCommitVal, db)
|
||||
fromTable, _, fromTableExists, err := fromRefDetails.root.GetTableInsensitive(ctx, tableName)
|
||||
if err != nil {
|
||||
return diff.TableDelta{}, err
|
||||
}
|
||||
|
||||
fromTable, _, fromTableExists, err := fromRoot.GetTableInsensitive(ctx, tableName)
|
||||
if err != nil {
|
||||
return diff.TableDelta{}, err
|
||||
}
|
||||
toTable, _, toTableExists, err := toRoot.GetTableInsensitive(ctx, tableName)
|
||||
toTable, _, toTableExists, err := toRefDetails.root.GetTableInsensitive(ctx, tableName)
|
||||
if err != nil {
|
||||
return diff.TableDelta{}, err
|
||||
}
|
||||
@@ -333,13 +467,13 @@ func (dtf *DiffTableFunction) cacheTableDelta(ctx *sql.Context, fromCommitVal in
|
||||
}
|
||||
|
||||
// TODO: it would be nice to limit this to just the table under consideration, not all tables with a diff
|
||||
deltas, err := diff.GetTableDeltas(ctx, fromRoot, toRoot)
|
||||
deltas, err := diff.GetTableDeltas(ctx, fromRefDetails.root, toRefDetails.root)
|
||||
if err != nil {
|
||||
return diff.TableDelta{}, err
|
||||
}
|
||||
|
||||
dtf.fromDate = fromDate
|
||||
dtf.toDate = toDate
|
||||
dtf.fromDate = fromRefDetails.commitTime
|
||||
dtf.toDate = toRefDetails.commitTime
|
||||
|
||||
delta := findMatchingDelta(deltas, tableName)
|
||||
|
||||
@@ -389,11 +523,19 @@ func (dtf *DiffTableFunction) Schema() sql.Schema {
|
||||
|
||||
// Resolved implements the sql.Resolvable interface
|
||||
func (dtf *DiffTableFunction) Resolved() bool {
|
||||
if dtf.dotCommitExpr != nil {
|
||||
return dtf.tableNameExpr.Resolved() && dtf.dotCommitExpr.Resolved()
|
||||
}
|
||||
return dtf.tableNameExpr.Resolved() && dtf.fromCommitExpr.Resolved() && dtf.toCommitExpr.Resolved()
|
||||
}
|
||||
|
||||
// String implements the Stringer interface
|
||||
func (dtf *DiffTableFunction) String() string {
|
||||
if dtf.dotCommitExpr != nil {
|
||||
return fmt.Sprintf("DOLT_DIFF(%s, %s)",
|
||||
dtf.dotCommitExpr.String(),
|
||||
dtf.tableNameExpr.String())
|
||||
}
|
||||
return fmt.Sprintf("DOLT_DIFF(%s, %s, %s)",
|
||||
dtf.fromCommitExpr.String(),
|
||||
dtf.toCommitExpr.String(),
|
||||
|
||||
@@ -46,7 +46,7 @@ var skipPrepared bool
|
||||
// SkipPreparedsCount is used by the "ci-check-repo CI workflow
|
||||
// as a reminder to consider prepareds when adding a new
|
||||
// enginetest suite.
|
||||
const SkipPreparedsCount = 81
|
||||
const SkipPreparedsCount = 82
|
||||
|
||||
const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS"
|
||||
|
||||
@@ -1426,6 +1426,12 @@ func TestCharsetCollationWire(t *testing.T) {
|
||||
enginetest.TestCharsetCollationWire(t, harness, newSessionBuilder(harness))
|
||||
}
|
||||
|
||||
func TestDatabaseCollationWire(t *testing.T) {
|
||||
skipOldFormat(t)
|
||||
harness := newDoltHarness(t)
|
||||
enginetest.TestDatabaseCollationWire(t, harness, newSessionBuilder(harness))
|
||||
}
|
||||
|
||||
func TestAddDropPrimaryKeys(t *testing.T) {
|
||||
t.Run("adding and dropping primary keys does not result in duplicate NOT NULL constraints", func(t *testing.T) {
|
||||
harness := newDoltHarness(t)
|
||||
|
||||
@@ -767,6 +767,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff('main~', 'main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// Without access to the database, dolt_diff with dots should fail with a database access error
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff('main~..main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// Without access to the database, dolt_diff_summary should fail with a database access error
|
||||
User: "tester",
|
||||
@@ -774,6 +781,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff_summary('main~', 'main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// Without access to the database, dolt_diff_summary with dots should fail with a database access error
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff_summary('main~..main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// Without access to the database, dolt_log should fail with a database access error
|
||||
User: "tester",
|
||||
@@ -795,6 +809,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff('main~', 'main', 'test');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// After granting access to mydb.test, dolt_diff with dots should work
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff('main~..main', 'test');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// With access to the db, but not the table, dolt_diff should fail
|
||||
User: "tester",
|
||||
@@ -802,6 +823,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff('main~', 'main', 'test2');",
|
||||
ExpectedErr: sql.ErrPrivilegeCheckFailed,
|
||||
},
|
||||
{
|
||||
// With access to the db, but not the table, dolt_diff with dots should fail
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff('main~..main', 'test2');",
|
||||
ExpectedErr: sql.ErrPrivilegeCheckFailed,
|
||||
},
|
||||
{
|
||||
// With access to the db, but not the table, dolt_diff_summary should fail
|
||||
User: "tester",
|
||||
@@ -809,6 +837,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff_summary('main~', 'main', 'test2');",
|
||||
ExpectedErr: sql.ErrPrivilegeCheckFailed,
|
||||
},
|
||||
{
|
||||
// With access to the db, but not the table, dolt_diff_summary with dots should fail
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff_summary('main~...main', 'test2');",
|
||||
ExpectedErr: sql.ErrPrivilegeCheckFailed,
|
||||
},
|
||||
{
|
||||
// With access to the db, dolt_diff_summary should fail for all tables if no access any of tables
|
||||
User: "tester",
|
||||
@@ -816,6 +851,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff_summary('main~', 'main');",
|
||||
ExpectedErr: sql.ErrPrivilegeCheckFailed,
|
||||
},
|
||||
{
|
||||
// With access to the db, dolt_diff_summary with dots should fail for all tables if no access any of tables
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff_summary('main~...main');",
|
||||
ExpectedErr: sql.ErrPrivilegeCheckFailed,
|
||||
},
|
||||
{
|
||||
// Revoke select on mydb.test
|
||||
User: "root",
|
||||
@@ -830,6 +872,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff('main~', 'main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// After revoking access, dolt_diff with dots should fail
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff('main~..main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// Grant multi-table access for all of mydb
|
||||
User: "root",
|
||||
@@ -844,6 +893,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff('main~', 'main', 'test');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// After granting access to the entire db, dolt_diff should work
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff('main~..main', 'test');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// After granting access to the entire db, dolt_diff_summary should work
|
||||
User: "tester",
|
||||
@@ -851,6 +907,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff_summary('main~', 'main');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// After granting access to the entire db, dolt_diff_summary with dots should work
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff_summary('main~...main');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// After granting access to the entire db, dolt_log should work
|
||||
User: "tester",
|
||||
@@ -872,6 +935,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff('main~', 'main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// After revoking access, dolt_diff with dots should fail
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff('main~...main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// After revoking access, dolt_diff_summary should fail
|
||||
User: "tester",
|
||||
@@ -900,6 +970,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff('main~', 'main', 'test');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// After granting global access to *.*, dolt_diff should work
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT COUNT(*) FROM dolt_diff('main~...main', 'test');",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// Revoke global access
|
||||
User: "root",
|
||||
@@ -914,6 +991,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
|
||||
Query: "SELECT * FROM dolt_diff('main~', 'main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
{
|
||||
// After revoking global access, dolt_diff with dots should fail
|
||||
User: "tester",
|
||||
Host: "localhost",
|
||||
Query: "SELECT * FROM dolt_diff('main~..main', 'test');",
|
||||
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -4418,6 +4502,10 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
"set @Commit2 = dolt_commit('-am', 'inserting into t');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "SELECT * from dolt_diff();",
|
||||
ExpectedErr: sql.ErrInvalidArgumentNumber,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('t');",
|
||||
ExpectedErr: sql.ErrInvalidArgumentNumber,
|
||||
@@ -4470,6 +4558,43 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "SELECT * from dolt_diff(hashof('main'), @Commit2, LOWER('T'));",
|
||||
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
|
||||
},
|
||||
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('main..main~');",
|
||||
ExpectedErr: sql.ErrInvalidArgumentNumber,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('main..main~', 'extra', 't');",
|
||||
ExpectedErr: sql.ErrInvalidArgumentNumber,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('main..main^', 123);",
|
||||
ExpectedErr: sql.ErrInvalidArgumentDetails,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('main..main~', 'doesnotexist');",
|
||||
ExpectedErr: sql.ErrTableNotFound,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('fakefakefakefakefakefakefakefake..main', 't');",
|
||||
ExpectedErrStr: "target commit not found",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('main..fakefakefakefakefakefakefakefake', 't');",
|
||||
ExpectedErrStr: "target commit not found",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('fakefakefakefakefakefakefakefake...main', 't');",
|
||||
ExpectedErrStr: "target commit not found",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('main...fakefakefakefakefakefakefakefake', 't');",
|
||||
ExpectedErrStr: "target commit not found",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff('main..main~', LOWER('T'));",
|
||||
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -4573,6 +4698,14 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
{nil, nil, nil, 3, "five", "six", "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT from_pk, from_c1, from_c2, to_pk, to_c1, to_c2, diff_type from dolt_diff('STAGED..WORKING', 't') order by coalesce(from_pk, to_pk);",
|
||||
Expected: []sql.Row{
|
||||
{1, "one", "two", 1, "one", "100", "modified"},
|
||||
{2, "three", "four", nil, nil, nil, "removed"},
|
||||
{nil, nil, nil, 3, "five", "six", "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT from_pk, from_c1, from_c2, to_pk, to_c1, to_c2, diff_type from dolt_diff('WORKING', 'STAGED', 't') order by coalesce(from_pk, to_pk);",
|
||||
Expected: []sql.Row{
|
||||
@@ -4585,6 +4718,10 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "SELECT from_pk, from_c1, from_c2, to_pk, to_c1, to_c2, diff_type from dolt_diff('WORKING', 'WORKING', 't') order by coalesce(from_pk, to_pk);",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "SELECT from_pk, from_c1, from_c2, to_pk, to_c1, to_c2, diff_type from dolt_diff('WORKING..WORKING', 't') order by coalesce(from_pk, to_pk);",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "SELECT from_pk, from_c1, from_c2, to_pk, to_c1, to_c2, diff_type from dolt_diff('STAGED', 'STAGED', 't') order by coalesce(from_pk, to_pk);",
|
||||
Expected: []sql.Row{},
|
||||
@@ -4639,6 +4776,13 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
{2, "two", 2, "two", "three", "modified"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, from_c2, diff_type from dolt_diff('main..branch1', 't');",
|
||||
Expected: []sql.Row{
|
||||
{nil, nil, 1, "one", "two", "removed"},
|
||||
{2, "two", 2, "two", "three", "modified"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, to_c2, from_pk, from_c1, diff_type from dolt_diff('branch1', 'main', 't');",
|
||||
Expected: []sql.Row{
|
||||
@@ -4646,6 +4790,13 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
{2, "two", "three", 2, "two", "modified"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, to_c2, from_pk, from_c1, diff_type from dolt_diff('branch1..main', 't');",
|
||||
Expected: []sql.Row{
|
||||
{1, "one", "two", nil, nil, "added"},
|
||||
{2, "two", "three", 2, "two", "modified"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, from_c2, diff_type from dolt_diff('main~', 'branch1', 't');",
|
||||
Expected: []sql.Row{
|
||||
@@ -4653,6 +4804,41 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
{2, "two", nil, nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, from_c2, diff_type from dolt_diff('main~..branch1', 't');",
|
||||
Expected: []sql.Row{
|
||||
{nil, nil, 1, "one", "two", "removed"},
|
||||
{2, "two", nil, nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
|
||||
// Three dot
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, from_c2, diff_type from dolt_diff('main...branch1', 't');",
|
||||
Expected: []sql.Row{
|
||||
{nil, nil, 1, "one", "two", "removed"},
|
||||
{2, "two", nil, nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, to_c2, from_pk, from_c1, diff_type from dolt_diff('branch1...main', 't');",
|
||||
Expected: []sql.Row{
|
||||
{2, "two", "three", nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, from_c2, diff_type from dolt_diff('main~...branch1', 't');",
|
||||
Expected: []sql.Row{
|
||||
{nil, nil, 1, "one", "two", "removed"},
|
||||
{2, "two", nil, nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, from_c2, diff_type from dolt_diff('main...branch1~', 't');",
|
||||
Expected: []sql.Row{
|
||||
{nil, nil, 1, "one", "two", "removed"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -4880,6 +5066,10 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "select from_a, from_b, from_commit, to_commit, diff_type from dolt_diff('HEAD~', 'HEAD', 't1')",
|
||||
Expected: []sql.Row{{1, 2, "HEAD~", "HEAD", "removed"}},
|
||||
},
|
||||
{
|
||||
Query: "select from_a, from_b, from_commit, to_commit, diff_type from dolt_diff('HEAD~..HEAD', 't1')",
|
||||
Expected: []sql.Row{{1, 2, "HEAD~", "HEAD", "removed"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -4899,6 +5089,10 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "select to_a, to_b, from_commit, to_commit, diff_type from dolt_diff('HEAD~', 'HEAD', 't2')",
|
||||
Expected: []sql.Row{{3, 4, "HEAD~", "HEAD", "added"}},
|
||||
},
|
||||
{
|
||||
Query: "select to_a, to_b, from_commit, to_commit, diff_type from dolt_diff('HEAD~..HEAD', 't2')",
|
||||
Expected: []sql.Row{{3, 4, "HEAD~", "HEAD", "added"}},
|
||||
},
|
||||
{
|
||||
// Maybe confusing? We match the old table name as well
|
||||
Query: "select to_a, to_b, from_commit, to_commit, diff_type from dolt_diff('HEAD~', 'HEAD', 't1')",
|
||||
@@ -4928,10 +5122,18 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "select to_pk2, to_col1, from_pk, from_col1, diff_type from dolt_diff('HEAD~', 'HEAD', 't1')",
|
||||
Expected: []sql.Row{{1, 100, 1, 1, "modified"}},
|
||||
},
|
||||
{
|
||||
Query: "select to_pk2, to_col1, from_pk, from_col1, diff_type from dolt_diff('HEAD~..HEAD', 't1')",
|
||||
Expected: []sql.Row{{1, 100, 1, 1, "modified"}},
|
||||
},
|
||||
{
|
||||
Query: "select to_pk2a, to_pk2b, to_col1, from_pk1a, from_pk1b, from_col1, diff_type from dolt_diff('HEAD~', 'HEAD', 't2');",
|
||||
Expected: []sql.Row{{1, 1, 100, 1, 1, 1, "modified"}},
|
||||
},
|
||||
{
|
||||
Query: "select to_pk2a, to_pk2b, to_col1, from_pk1a, from_pk1b, from_col1, diff_type from dolt_diff('HEAD~..HEAD', 't2');",
|
||||
Expected: []sql.Row{{1, 1, 100, 1, 1, 1, "modified"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -5451,6 +5653,10 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
|
||||
"set @Commit2 = dolt_commit('-am', 'inserting into t');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary();",
|
||||
ExpectedErr: sql.ErrInvalidArgumentNumber,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('t');",
|
||||
ExpectedErr: sql.ErrInvalidArgumentNumber,
|
||||
@@ -5479,14 +5685,26 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "SELECT * from dolt_diff_summary('fake-branch', @Commit2, 't');",
|
||||
ExpectedErrStr: "branch not found: fake-branch",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('fake-branch..main', 't');",
|
||||
ExpectedErrStr: "branch not found: fake-branch",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary(@Commit1, 'fake-branch', 't');",
|
||||
ExpectedErrStr: "branch not found: fake-branch",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main..fake-branch', 't');",
|
||||
ExpectedErrStr: "branch not found: fake-branch",
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 'doesnotexist');",
|
||||
ExpectedErr: sql.ErrTableNotFound,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main^..main', 'doesnotexist');",
|
||||
ExpectedErr: sql.ErrTableNotFound,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary(@Commit1, concat('fake', '-', 'branch'), 't');",
|
||||
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
|
||||
@@ -5499,6 +5717,10 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, LOWER('T'));",
|
||||
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main..main~', LOWER('T'));",
|
||||
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -5702,6 +5924,10 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "SELECT * from dolt_diff_summary('STAGED', 'WORKING', 't')",
|
||||
Expected: []sql.Row{{"t", 0, 1, 1, 1, 3, 3, 1, 2, 2, 6, 6}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('STAGED..WORKING', 't')",
|
||||
Expected: []sql.Row{{"t", 0, 1, 1, 1, 3, 3, 1, 2, 2, 6, 6}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('WORKING', 'STAGED', 't')",
|
||||
Expected: []sql.Row{{"t", 0, 1, 1, 1, 3, 3, 1, 2, 2, 6, 6}},
|
||||
@@ -5710,6 +5936,10 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "SELECT * from dolt_diff_summary('WORKING', 'WORKING', 't')",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('WORKING..WORKING', 't')",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('STAGED', 'STAGED', 't')",
|
||||
Expected: []sql.Row{},
|
||||
@@ -5751,20 +5981,83 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
|
||||
"select dolt_checkout('main');",
|
||||
"insert into t values (2, 'two', 'three');",
|
||||
"set @Commit6 = dolt_commit('-am', 'inserting row 2 in main');",
|
||||
|
||||
"create table newtable (pk int primary key);",
|
||||
"insert into newtable values (1), (2);",
|
||||
"set @Commit7 = dolt_commit('-Am', 'new table newtable');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main', 'branch1', 't');",
|
||||
Expected: []sql.Row{{"t", 0, 0, 1, 1, 0, 4, 0, 2, 1, 6, 2}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main..branch1', 't');",
|
||||
Expected: []sql.Row{{"t", 0, 0, 1, 1, 0, 4, 0, 2, 1, 6, 2}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main', 'branch1');",
|
||||
Expected: []sql.Row{
|
||||
{"t", 0, 0, 1, 1, 0, 4, 0, 2, 1, 6, 2},
|
||||
{"newtable", 0, 0, 2, 0, 0, 2, 0, 2, 0, 2, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main..branch1');",
|
||||
Expected: []sql.Row{
|
||||
{"t", 0, 0, 1, 1, 0, 4, 0, 2, 1, 6, 2},
|
||||
{"newtable", 0, 0, 2, 0, 0, 2, 0, 2, 0, 2, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('branch1', 'main', 't');",
|
||||
Expected: []sql.Row{{"t", 0, 1, 0, 1, 4, 0, 1, 1, 2, 2, 6}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main~', 'branch1', 't');",
|
||||
Query: "SELECT * from dolt_diff_summary('branch1..main', 't');",
|
||||
Expected: []sql.Row{{"t", 0, 1, 0, 1, 4, 0, 1, 1, 2, 2, 6}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main~2', 'branch1', 't');",
|
||||
Expected: []sql.Row{{"t", 0, 1, 1, 0, 2, 3, 0, 1, 1, 3, 2}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main~2..branch1', 't');",
|
||||
Expected: []sql.Row{{"t", 0, 1, 1, 0, 2, 3, 0, 1, 1, 3, 2}},
|
||||
},
|
||||
|
||||
// Three dot
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main...branch1', 't');",
|
||||
Expected: []sql.Row{{"t", 0, 1, 1, 0, 2, 3, 0, 1, 1, 3, 2}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main...branch1');",
|
||||
Expected: []sql.Row{{"t", 0, 1, 1, 0, 2, 3, 0, 1, 1, 3, 2}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('branch1...main', 't');",
|
||||
Expected: []sql.Row{{"t", 1, 1, 0, 0, 3, 0, 0, 1, 2, 3, 6}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('branch1...main');",
|
||||
Expected: []sql.Row{
|
||||
{"t", 1, 1, 0, 0, 3, 0, 0, 1, 2, 3, 6},
|
||||
{"newtable", 0, 2, 0, 0, 2, 0, 0, 0, 2, 0, 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('branch1...main^');",
|
||||
Expected: []sql.Row{{"t", 1, 1, 0, 0, 3, 0, 0, 1, 2, 3, 6}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('branch1...main', 'newtable');",
|
||||
Expected: []sql.Row{{"newtable", 0, 2, 0, 0, 2, 0, 0, 0, 2, 0, 2}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * from dolt_diff_summary('main...main', 'newtable');",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -5922,11 +6215,20 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't2')",
|
||||
Expected: []sql.Row{{"t2", 1, 1, 0, 0, 2, 0, 0, 1, 2, 2, 4}},
|
||||
},
|
||||
{
|
||||
Query: "select * from dolt_diff_summary('HEAD~..HEAD', 't2')",
|
||||
Expected: []sql.Row{{"t2", 1, 1, 0, 0, 2, 0, 0, 1, 2, 2, 4}},
|
||||
},
|
||||
{
|
||||
// Old table name can be matched as well
|
||||
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't1')",
|
||||
Expected: []sql.Row{{"t1", 1, 1, 0, 0, 2, 0, 0, 1, 2, 2, 4}},
|
||||
},
|
||||
{
|
||||
// Old table name can be matched as well
|
||||
Query: "select * from dolt_diff_summary('HEAD~..HEAD', 't1')",
|
||||
Expected: []sql.Row{{"t1", 1, 1, 0, 0, 2, 0, 0, 1, 2, 2, 4}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -88,9 +88,9 @@ func BasicSelectTests() []SelectTest {
|
||||
var headCommitHash string
|
||||
switch types.Format_Default {
|
||||
case types.Format_DOLT:
|
||||
headCommitHash = "4ej7hfduufg4o2837g3gc4p5uolrlmv9"
|
||||
headCommitHash = "a0gt4vif0b0bf19g89k87gs55qqlqpod"
|
||||
case types.Format_DOLT_DEV:
|
||||
headCommitHash = "4ej7hfduufg4o2837g3gc4p5uolrlmv9"
|
||||
headCommitHash = "a0gt4vif0b0bf19g89k87gs55qqlqpod"
|
||||
case types.Format_LD_1:
|
||||
headCommitHash = "73hc2robs4v0kt9taoe3m5hd49dmrgun"
|
||||
}
|
||||
|
||||
@@ -37,6 +37,14 @@ func ParseCreateTableStatement(ctx context.Context, root *doltdb.RootValue, quer
|
||||
|
||||
ts := ddl.(*sqlparser.DDL).TableSpec
|
||||
s, collation, err := parse.TableSpecToSchema(sql.NewContext(ctx), ts, false)
|
||||
for _, col := range s.Schema {
|
||||
if collatedType, ok := col.Type.(sql.TypeWithCollation); ok {
|
||||
col.Type, err = collatedType.WithNewCollation(sql.Collation_Default)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
include "prolly.fbs";
|
||||
include "collation.fbs";
|
||||
|
||||
namespace serial;
|
||||
|
||||
@@ -22,6 +23,8 @@ table RootValue {
|
||||
tables:[ubyte]; // Serialized AddressMap.
|
||||
|
||||
foreign_key_addr:[ubyte];
|
||||
|
||||
collation:Collation;
|
||||
}
|
||||
|
||||
// KEEP THIS IN SYNC WITH fileidentifiers.go
|
||||
|
||||
@@ -26,10 +26,9 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/chunks"
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
"github.com/dolthub/dolt/go/store/nbs"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/chunks"
|
||||
"github.com/dolthub/dolt/go/store/prolly/tree"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
)
|
||||
|
||||
@@ -39,6 +39,10 @@ type database struct {
|
||||
ns tree.NodeStore
|
||||
}
|
||||
|
||||
const (
|
||||
databaseCollation = "db_collation"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrOptimisticLockFailed = errors.New("optimistic lock failed on database Root update")
|
||||
ErrMergeNeeded = errors.New("dataset head is not ancestor of commit")
|
||||
|
||||
@@ -190,10 +190,12 @@ teardown() {
|
||||
|
||||
start_sql_server
|
||||
|
||||
server_query "" 1 dolt "" "create database testdb" ""
|
||||
server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql\ntestdb" ""
|
||||
server_query "testdb" 1 dolt "" "create table a(x int)" ""
|
||||
server_query "testdb" 1 dolt "" "insert into a values (1), (2)" ""
|
||||
dolt sql-client --use-db '' -u dolt -P $PORT -q "create database testdb"
|
||||
run dolt sql-client --use-db '' -u dolt -P $PORT -r csv -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "testdb" ]] || false
|
||||
dolt sql-client --use-db testdb -u dolt -P $PORT -q "create table a(x int)"
|
||||
dolt sql-client --use-db testdb -u dolt -P $PORT -q "insert into a values (1), (2)"
|
||||
|
||||
[ -d "testdb" ]
|
||||
cd testdb
|
||||
|
||||
@@ -65,11 +65,14 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_checkout('to_keep')"
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_branch('-D', 'main');"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "id\n" ""
|
||||
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "describe test"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "id" ]] || false
|
||||
}
|
||||
|
||||
@test "deleted-branches: can SQL connect with existing branch revision specifier when checked out branch is deleted" {
|
||||
@@ -77,14 +80,14 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
|
||||
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");'
|
||||
|
||||
# Against the default branch it fails
|
||||
run server_query "dolt_repo_$$" 1 "" dolt "" "SELECT * FROM test" "id\n" ""
|
||||
[ "$status" -eq 1 ] || fail "expected query against the default branch, which was deleted, to fail"
|
||||
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Against to_keep it succeeds
|
||||
server_query "dolt_repo_$$/to_keep" 1 dolt "" "SELECT * FROM test" "id\n" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$/to_keep" -u dolt -P $PORT -q "SELECT * FROM test"
|
||||
}
|
||||
|
||||
@test "deleted-branches: can SQL connect with existing branch revision specifier when dolt_default_branch is invalid" {
|
||||
@@ -92,13 +95,14 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'"
|
||||
|
||||
# Against the default branch it fails
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" "" 1
|
||||
run dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test" ""
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Against main, which exists it succeeds
|
||||
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test" "id\n" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$/main" -u dolt -P $PORT -q "SELECT * FROM test"
|
||||
}
|
||||
|
||||
@test "deleted-branches: calling DOLT_CHECKOUT on SQL connection with existing branch revision specifier when dolt_default_branch is invalid does not panic" {
|
||||
@@ -106,13 +110,14 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'this_branch_does_not_exist'" ""
|
||||
|
||||
# We are able to use a database branch revision in the connection string
|
||||
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test;"
|
||||
dolt sql-client --use-db "dolt_repo_$$/main" -u dolt -P $PORT -q "SELECT * FROM test;"
|
||||
|
||||
# Trying to checkout a new branch throws an error, but doesn't panic
|
||||
run server_query "dolt_repo_$$/main" 1 dolt "" "CALL DOLT_CHECKOUT('to_keep');" "" 1
|
||||
run dolt sql-client --use-db "dolt_repo_$$/main" -u dolt -P $PORT -q"CALL DOLT_CHECKOUT('to_keep');"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "branch not found" ]] || false
|
||||
}
|
||||
|
||||
@@ -123,14 +128,14 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_checkout('to_keep'); call dolt_branch('-D', 'main');"
|
||||
|
||||
# We are able to use a database branch revision in the connection string
|
||||
server_query "dolt_repo_$$/to_keep" 1 dolt "" "SELECT * FROM test;"
|
||||
dolt sql-client --use-db "dolt_repo_$$/to_keep" -u dolt -P $PORT -q "SELECT * FROM test;"
|
||||
|
||||
# Trying to checkout a new branch throws an error, but doesn't panic
|
||||
run server_query "dolt_repo_$$/to_keep" 1 dolt "" "CALL DOLT_CHECKOUT('to_checkout');" "" 1
|
||||
|
||||
run dolt sql-client --use-db "dolt_repo_$$/to_keep" -u dolt -P $PORT -q "CALL DOLT_CHECKOUT('to_checkout');"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "branch not found" ]] || false
|
||||
}
|
||||
|
||||
@@ -141,11 +146,11 @@ make_it() {
|
||||
|
||||
start_sql_server "dolt_repo_$$"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SET @@GLOBAL.dolt_repo_$$_default_branch = 'to_keep'" ""
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" 'call dolt_checkout("to_keep"); call dolt_branch("-D", "main");' ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "call dolt_checkout('to_keep'); call dolt_branch('-D', 'main');"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "SELECT * FROM test"
|
||||
|
||||
server_query "dolt_repo_$$" 1 dolt "" "CALL DOLT_CHECKOUT('to_checkout');" ""
|
||||
dolt sql-client --use-db "dolt_repo_$$" -u dolt -P $PORT -q "CALL DOLT_CHECKOUT('to_checkout')"
|
||||
}
|
||||
|
||||
@@ -49,6 +49,27 @@ teardown() {
|
||||
run dolt diff head head^
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "- | 0" ]] || false
|
||||
|
||||
# Two dot
|
||||
run dolt diff head..
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
run dolt diff head^..
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 0" ]] || false
|
||||
|
||||
run dolt diff head^..head
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 0" ]] || false
|
||||
|
||||
run dolt diff head..head^
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "- | 0" ]] || false
|
||||
|
||||
run dolt diff ..head^
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "- | 0" ]] || false
|
||||
}
|
||||
|
||||
@test "diff: dirty working set" {
|
||||
@@ -70,6 +91,132 @@ teardown() {
|
||||
[[ "$output" =~ "+ | 0" ]] || false
|
||||
}
|
||||
|
||||
@test "diff: two and three dot diff" {
|
||||
dolt checkout main
|
||||
dolt sql -q 'insert into test values (0,0,0,0,0,0)'
|
||||
dolt add .
|
||||
dolt commit -m table
|
||||
dolt checkout -b branch1
|
||||
dolt sql -q 'insert into test values (1,1,1,1,1,1)'
|
||||
dolt add .
|
||||
dolt commit -m row
|
||||
dolt checkout main
|
||||
dolt sql -q 'insert into test values (2,2,2,2,2,2)'
|
||||
dolt add .
|
||||
dolt commit -m newrow
|
||||
|
||||
# Two dot shows all changes between branches
|
||||
run dolt diff branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff branch1..
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff branch1..main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff branch1 main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff ..branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 1" ]] || false
|
||||
[[ "$output" =~ "- | 2" ]] || false
|
||||
|
||||
run dolt diff main..branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 1" ]] || false
|
||||
[[ "$output" =~ "- | 2" ]] || false
|
||||
|
||||
run dolt diff main branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 1" ]] || false
|
||||
[[ "$output" =~ "- | 2" ]] || false
|
||||
|
||||
# Three dot shows changes between common ancestor and branch
|
||||
run dolt diff branch1...
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff $(dolt merge-base branch1 HEAD)
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff --merge-base branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff branch1...main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff --merge-base branch1 main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff main...branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 1" ]] || false
|
||||
[[ ! "$output" =~ "- | 2" ]] || false
|
||||
|
||||
run dolt diff --merge-base main branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 1" ]] || false
|
||||
[[ ! "$output" =~ "- | 2" ]] || false
|
||||
|
||||
run dolt diff --merge-base main branch1 test
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 1" ]] || false
|
||||
[[ ! "$output" =~ "- | 2" ]] || false
|
||||
|
||||
run dolt diff $(dolt merge-base branch1 main) main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "- | 1" ]] || false
|
||||
[[ "$output" =~ "+ | 2" ]] || false
|
||||
|
||||
run dolt diff $(dolt merge-base main branch1) branch1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 1" ]] || false
|
||||
[[ ! "$output" =~ "- | 2" ]] || false
|
||||
|
||||
# Dots work with --summary
|
||||
run dolt diff main..branch1 --summary
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1 Row Unmodified (50.00%)" ]] || false
|
||||
[[ "$output" =~ "1 Row Added (50.00%)" ]] || false
|
||||
[[ "$output" =~ "1 Row Deleted (50.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
|
||||
[[ "$output" =~ "6 Cells Added (50.00%)" ]] || false
|
||||
[[ "$output" =~ "6 Cells Deleted (50.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
|
||||
[[ "$output" =~ "(2 Row Entries vs 2 Row Entries)" ]] || false
|
||||
|
||||
run dolt diff main...branch1 --summary
|
||||
echo $output
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1 Row Unmodified (100.00%)" ]] || false
|
||||
[[ "$output" =~ "1 Row Added (100.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
|
||||
[[ "$output" =~ "6 Cells Added (100.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
|
||||
[[ "$output" =~ "(1 Row Entry vs 2 Row Entries)" ]] || false
|
||||
}
|
||||
|
||||
@test "diff: data and schema changes" {
|
||||
dolt sql <<SQL
|
||||
drop table test;
|
||||
@@ -242,8 +389,28 @@ EOF
|
||||
run dolt diff head^ head fake
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ "table fake does not exist in either revision" ]] || false
|
||||
|
||||
# Two dot
|
||||
run dolt diff head^..head test other
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 0" ]] || false
|
||||
[[ "$output" =~ "+ | 9" ]] || false
|
||||
|
||||
run dolt diff head^..head fake
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ "table fake does not exist in either revision" ]] || false
|
||||
|
||||
run dolt diff head^.. test other
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 0" ]] || false
|
||||
[[ "$output" =~ "+ | 9" ]] || false
|
||||
|
||||
run dolt diff head^.. fake
|
||||
[ "$status" -ne 0 ]
|
||||
[[ "$output" =~ "table fake does not exist in either revision" ]] || false
|
||||
}
|
||||
|
||||
|
||||
@test "diff: with table and branch of the same name" {
|
||||
dolt sql -q 'create table dolomite (pk int not null primary key)'
|
||||
dolt add .
|
||||
@@ -270,6 +437,10 @@ EOF
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 9" ]] || false
|
||||
[[ ! "$output" =~ "+ | 0" ]] || false
|
||||
run dolt diff head^..head dolomite
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "+ | 9" ]] || false
|
||||
[[ ! "$output" =~ "+ | 0" ]] || false
|
||||
dolt branch -D dolomite
|
||||
dolt sql -q 'insert into dolomite values (8)'
|
||||
run dolt diff dolomite
|
||||
@@ -411,6 +582,16 @@ SQL
|
||||
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
|
||||
[[ "$output" =~ "(1 Row Entry vs 2 Row Entries)" ]] || false
|
||||
run dolt diff --summary firstbranch..newbranch
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1 Row Unmodified (100.00%)" ]] || false
|
||||
[[ "$output" =~ "1 Row Added (100.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
|
||||
[[ "$output" =~ "6 Cells Added (100.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
|
||||
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
|
||||
[[ "$output" =~ "(1 Row Entry vs 2 Row Entries)" ]] || false
|
||||
}
|
||||
|
||||
@test "diff: summary shows correct changes after schema change" {
|
||||
@@ -556,6 +737,22 @@ SQL
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "Error running diff query" ]] || false
|
||||
[[ "$output" =~ "where pk=4" ]] || false
|
||||
|
||||
# Two dot
|
||||
run dolt diff test1..test2
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "44" ]] || false
|
||||
[[ "$output" =~ "55" ]] || false
|
||||
|
||||
run dolt diff test1..test2 --where "from_pk=4 OR to_pk=5"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "44" ]] || false
|
||||
[[ "$output" =~ "55" ]] || false
|
||||
|
||||
run dolt diff test1..test2 --where "pk=4"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "Error running diff query" ]] || false
|
||||
[[ "$output" =~ "where pk=4" ]] || false
|
||||
}
|
||||
|
||||
@test "diff: diff summary incorrect primary key set change regression test" {
|
||||
@@ -643,6 +840,36 @@ SQL
|
||||
run dolt diff ref.with.period test-branch
|
||||
[ $status -eq 1 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
|
||||
run dolt diff $FIRST_COMMIT..test-branch
|
||||
[ $status -eq 0 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
run dolt diff main@$FIRST_COMMIT..test-branch
|
||||
[ $status -eq 1 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
run dolt diff ref.with.period..test-branch
|
||||
[ $status -eq 1 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
|
||||
run dolt diff $FIRST_COMMIT...test-branch
|
||||
[ $status -eq 0 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
run dolt diff main@$FIRST_COMMIT...test-branch
|
||||
[ $status -eq 1 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
run dolt diff ref.with.period...test-branch
|
||||
[ $status -eq 1 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
|
||||
run dolt diff --merge-base $FIRST_COMMIT test-branch
|
||||
[ $status -eq 0 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
run dolt diff --merge-base main@$FIRST_COMMIT test-branch
|
||||
[ $status -eq 1 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
run dolt diff --merge-base ref.with.period test-branch
|
||||
[ $status -eq 1 ]
|
||||
[[ ! $output =~ "panic" ]]
|
||||
}
|
||||
|
||||
@test "diff: with foreign key and sql output" {
|
||||
@@ -698,6 +925,13 @@ SQL
|
||||
[[ "$output" =~ "pv1" ]] || false
|
||||
[[ "$output" =~ "cv1" ]] || false
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt diff main..another-branch
|
||||
echo $output
|
||||
! [[ "$output" =~ "panic" ]] || false
|
||||
[[ "$output" =~ "pv1" ]] || false
|
||||
[[ "$output" =~ "cv1" ]] || false
|
||||
[ $status -eq 0 ]
|
||||
}
|
||||
|
||||
@test "diff: sql update queries only show changed columns" {
|
||||
|
||||
17
integration-tests/bats/dump-docs.bats
Normal file
17
integration-tests/bats/dump-docs.bats
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bats
|
||||
load $BATS_TEST_DIRNAME/helper/common.bash
|
||||
|
||||
setup() {
|
||||
setup_common
|
||||
}
|
||||
|
||||
teardown() {
|
||||
assert_feature_version
|
||||
teardown_common
|
||||
}
|
||||
|
||||
@test "dump-docs: works" {
|
||||
run dolt dump-docs
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "error: Failed to dump docs" ]] || false
|
||||
}
|
||||
@@ -13,15 +13,6 @@ def _print_err_and_exit(e):
|
||||
print(e, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def csv_to_row_maps(csv_str):
|
||||
csv_str = csv_str.replace('\\n', '\n')
|
||||
rd = csv.DictReader(StringIO(csv_str))
|
||||
rows = []
|
||||
for row in rd:
|
||||
rows.append(row)
|
||||
|
||||
return rows
|
||||
|
||||
class DoltConnection(object):
|
||||
def __init__(self, user='root', password=None, host='127.0.0.1', port=3306, database='dolt', auto_commit=False):
|
||||
self.user = user
|
||||
@@ -42,30 +33,6 @@ class DoltConnection(object):
|
||||
def close(self):
|
||||
self.cnx.close()
|
||||
|
||||
def query(self, query_str, exit_on_err=True):
|
||||
try:
|
||||
cursor = self.cnx.cursor()
|
||||
cursor.execute(query_str)
|
||||
|
||||
if cursor.description is None:
|
||||
return [], cursor.rowcount
|
||||
|
||||
raw = cursor.fetchall()
|
||||
|
||||
row_maps = []
|
||||
for curr in raw:
|
||||
r = {}
|
||||
for i, k in enumerate(cursor.column_names):
|
||||
r[k] = str(curr[i])
|
||||
row_maps.append(r)
|
||||
|
||||
return row_maps, cursor.rowcount
|
||||
|
||||
except BaseException as e:
|
||||
if exit_on_err:
|
||||
_print_err_and_exit(e)
|
||||
raise e
|
||||
|
||||
class InfiniteRetryConnection(DoltConnection):
|
||||
def connect(self):
|
||||
while True:
|
||||
|
||||
@@ -2,88 +2,6 @@ SERVER_REQS_INSTALLED="FALSE"
|
||||
SERVER_PID=""
|
||||
DEFAULT_DB=""
|
||||
|
||||
PYTHON_QUERY_SCRIPT="
|
||||
import os
|
||||
import sys
|
||||
|
||||
args = sys.argv[sys.argv.index('--') + 1:]
|
||||
query_results = None
|
||||
expected_exception = None
|
||||
|
||||
working_dir, database, port_str, auto_commit, username, password, query_strs = args[0:7]
|
||||
if len(args) > 7:
|
||||
query_results = args[7]
|
||||
if len(args) > 8:
|
||||
expected_exception = args[8]
|
||||
|
||||
print('User: ' + username)
|
||||
print('Password: ' + password)
|
||||
print('Query Strings: ' + query_strs)
|
||||
print('Working Dir: ' + working_dir)
|
||||
print('Database: ' + database)
|
||||
print('Port: ' + port_str)
|
||||
print('Autocommit: ' + auto_commit)
|
||||
print('Expected Results: ' + str(query_results))
|
||||
print('Expected Exception: ' + str(expected_exception))
|
||||
|
||||
os.chdir(working_dir)
|
||||
|
||||
if auto_commit == '1':
|
||||
auto_commit = True
|
||||
else:
|
||||
auto_commit = False
|
||||
|
||||
from pytest import DoltConnection, csv_to_row_maps
|
||||
|
||||
if not database:
|
||||
dc = DoltConnection(port=int(port_str), database=None, user=username, password=password, auto_commit=auto_commit)
|
||||
else:
|
||||
dc = DoltConnection(port=int(port_str), database=database, user=username, password=password, auto_commit=auto_commit)
|
||||
|
||||
try:
|
||||
dc.connect()
|
||||
except BaseException as e:
|
||||
print('caught exception', str(e))
|
||||
if expected_exception is not None and len(expected_exception) > 0:
|
||||
if expected_exception not in str(e):
|
||||
print('expected exception: ', expected_exception, '\n got: ', str(e))
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
queries = query_strs.split(';')
|
||||
expected = [None]*len(queries)
|
||||
|
||||
if query_results is not None:
|
||||
expected = query_results.split(';')
|
||||
if len(expected) < len(queries):
|
||||
expected.extend(['']*(len(queries)-len(expected)))
|
||||
|
||||
for i in range(len(queries)):
|
||||
query_str = queries[i].strip()
|
||||
print('executing:', query_str)
|
||||
|
||||
actual_rows, num_rows = None, None
|
||||
try:
|
||||
actual_rows, num_rows = dc.query(query_str, False)
|
||||
except BaseException as e:
|
||||
print('caught exception', str(e))
|
||||
if expected_exception is not None and len(expected_exception) > 0:
|
||||
if expected_exception not in str(e):
|
||||
print('expected exception: ', expected_exception, '\n got: ', str(e))
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
if expected[i] is not None and expected[i] != '':
|
||||
print('Raw Expected: ', expected[i])
|
||||
expected_rows = csv_to_row_maps(expected[i])
|
||||
if expected_rows != actual_rows:
|
||||
print('expected:', expected_rows, '\n actual:', actual_rows)
|
||||
sys.exit(1)
|
||||
"
|
||||
|
||||
set_server_reqs_installed() {
|
||||
SERVER_REQS_INSTALLED=$(python3 -c "
|
||||
requirements_installed = True
|
||||
@@ -206,53 +124,6 @@ stop_sql_server() {
|
||||
SERVER_PID=
|
||||
}
|
||||
|
||||
# server_query connects to a running mysql server,
|
||||
# executes a query (or list of queries separated by a `;`),
|
||||
# and compares the results against what is expected.
|
||||
#
|
||||
# EXAMPLE: server_query db1 1 dolt "" "select * from test" "c1\n0"
|
||||
#
|
||||
# If executing multiple queries, separate the expected CSV values with a `;`.
|
||||
#
|
||||
# EXAMPLE: server_query "" 1 dolt "" "use db1; select * from test" ";c1\n0"
|
||||
#
|
||||
# If you expect an exception, leave query results blank and add an additional
|
||||
# value of 1 to the end of the call. This could be improved to actually send
|
||||
# up the exception string to be checked but I could not figure out how to do
|
||||
# that. When calling with bats use `run` and then check the $output if you
|
||||
# want to inspect the exception string.
|
||||
#
|
||||
# EXAMPLE: run server_query "" 1 dolt "" "garbage" "" 1
|
||||
# [[ "$output" =~ "error" ]] || false
|
||||
#
|
||||
# In the event that the results do not match expectations,
|
||||
# the python process will exit with an exit code of 1
|
||||
#
|
||||
# * param1: The database name for the connection string.
|
||||
# Leave empy for no database.
|
||||
# * param2: 1 for autocommit = true, 0 for autocommit = false
|
||||
# * param3: User
|
||||
# * param4: Password
|
||||
# * param5: Query string or query strings separated by `;`
|
||||
# * param6: A csv representing the expected result set.
|
||||
# If a query is not expected to have a result set "" should
|
||||
# be passed. Seprate multiple result sets with `;`
|
||||
# * param7: Expected exception value of 1. Mutually exclusive with param6.
|
||||
#
|
||||
server_query() {
|
||||
server_query_with_port "$PORT" "$@"
|
||||
}
|
||||
|
||||
# See server_query, but first parameter is the port sql-server is running on,
|
||||
# every other parameter is positionally one later.
|
||||
server_query_with_port() {
|
||||
let PORT="$1"
|
||||
shift
|
||||
PYTEST_DIR="$BATS_TEST_DIRNAME/helper"
|
||||
echo Executing server_query
|
||||
python3 -u -c "$PYTHON_QUERY_SCRIPT" -- "$PYTEST_DIR" "$1" "$PORT" "$2" "$3" "$4" "$5" "$6" "$7"
|
||||
}
|
||||
|
||||
definePORT() {
|
||||
getPORT=""
|
||||
for i in {0..9}
|
||||
|
||||
@@ -248,3 +248,21 @@ SQL
|
||||
run dolt schema show t
|
||||
[[ "$output" =~ "PRIMARY KEY (\`pk1\`,\`pk2\`)" ]] || false
|
||||
}
|
||||
|
||||
@test "migrate: removed tables stay removed" {
|
||||
dolt sql -q "create table alpha (pk int primary key);"
|
||||
dolt sql -q "create table beta (pk int primary key);"
|
||||
dolt commit -Am "create tables"
|
||||
|
||||
dolt sql -q "alter table alpha rename to zulu;"
|
||||
dolt sql -q "drop table beta"
|
||||
dolt commit -Am "rename table alpha to zeta, drop table beta"
|
||||
|
||||
dolt migrate
|
||||
|
||||
run dolt ls
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "zulu" ]] || false
|
||||
[[ ! "$output" =~ "alpha" ]] || false
|
||||
[[ ! "$output" =~ "beta" ]] || false
|
||||
}
|
||||
|
||||
@@ -14,14 +14,14 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "migration-integration: first-hour-db" {
|
||||
dolt clone dolthub/first-hour-db
|
||||
cd first-hour-db
|
||||
dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int
|
||||
cd first-hour-db-migration-int
|
||||
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
|
||||
[[ ! "$output" =~ "hc7v93mjpbfassljk9kdk48b1fntsn6a" ]] || false
|
||||
[[ ! "$output" =~ "tdkt7s7805k1ml4hu37pm688g5i0b8ie" ]] || false
|
||||
|
||||
dolt migrate
|
||||
[[ $(cat ./.dolt/noms/manifest | cut -f 2 -d :) = "$TARGET_NBF" ]] || false
|
||||
@@ -29,7 +29,7 @@ teardown() {
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "hc7v93mjpbfassljk9kdk48b1fntsn6a" ]] || false
|
||||
[[ "$output" =~ "tdkt7s7805k1ml4hu37pm688g5i0b8ie" ]] || false
|
||||
[[ ! "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
|
||||
|
||||
# validate TEXT migration
|
||||
@@ -39,15 +39,15 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "migration-integration: first-hour-db after garbage collection" {
|
||||
dolt clone dolthub/first-hour-db
|
||||
cd first-hour-db
|
||||
dolt clone https://doltremoteapi.dolthub.com/dolthub/first-hour-db-migration-int
|
||||
cd first-hour-db-migration-int
|
||||
dolt gc
|
||||
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
|
||||
[[ ! "$output" =~ "hc7v93mjpbfassljk9kdk48b1fntsn6a" ]] || false
|
||||
[[ ! "$output" =~ "tdkt7s7805k1ml4hu37pm688g5i0b8ie" ]] || false
|
||||
|
||||
dolt migrate
|
||||
[[ $(cat ./.dolt/noms/manifest | cut -f 2 -d :) = "$TARGET_NBF" ]] || false
|
||||
@@ -55,7 +55,7 @@ teardown() {
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "hc7v93mjpbfassljk9kdk48b1fntsn6a" ]] || false
|
||||
[[ "$output" =~ "tdkt7s7805k1ml4hu37pm688g5i0b8ie" ]] || false
|
||||
[[ ! "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
|
||||
|
||||
# validate TEXT migration
|
||||
@@ -65,14 +65,14 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "migration-integration: us-jails" {
|
||||
dolt clone dolthub/us-jails
|
||||
cd us-jails
|
||||
dolt clone https://doltremoteapi.dolthub.com/dolthub/us-jails-migration-integration
|
||||
cd us-jails-migration-integration
|
||||
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "u8s83gapv7ghnbmrtpm8q5es0dbl7lpd" ]] || false
|
||||
[[ ! "$output" =~ "i3f3orlfmbjgqnst90c8r96jps7tdtv9" ]] || false
|
||||
[[ ! "$output" =~ "apdp3stea20mmm80oiu2ipo07a7v1hvb" ]] || false
|
||||
|
||||
dolt migrate
|
||||
[[ $(cat ./.dolt/noms/manifest | cut -f 2 -d :) = "$TARGET_NBF" ]] || false
|
||||
@@ -80,6 +80,6 @@ teardown() {
|
||||
dolt tag -v
|
||||
run dolt tag -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "i3f3orlfmbjgqnst90c8r96jps7tdtv9" ]] || false
|
||||
[[ "$output" =~ "apdp3stea20mmm80oiu2ipo07a7v1hvb" ]] || false
|
||||
[[ ! "$output" =~ "u8s83gapv7ghnbmrtpm8q5es0dbl7lpd" ]] || false
|
||||
}
|
||||
|
||||
@@ -30,8 +30,14 @@ teardown() {
|
||||
@test "multidb: database default branches" {
|
||||
cd dbs1
|
||||
start_multi_db_server repo1
|
||||
server_query repo1 1 dolt "" "create database new; use new; call dcheckout('-b', 'feat'); create table t (x int); call dolt_add('.'); call dcommit('-am', 'cm'); set @@global.new_default_branch='feat'"
|
||||
server_query repo1 1 dolt "" "use repo1"
|
||||
dolt sql-client -u dolt -P $PORT --use-db repo1 -q "create database new;
|
||||
use new;
|
||||
call dcheckout('-b', 'feat');
|
||||
create table t (x int);
|
||||
call dolt_add('.');
|
||||
call dcommit('-am', 'cm');
|
||||
set @@global.new_default_branch='feat'"
|
||||
dolt sql-client -u dolt --use-db '' -P $PORT -q "use repo1"
|
||||
}
|
||||
|
||||
@test "multidb: incompatible BIN FORMATs" {
|
||||
|
||||
@@ -45,13 +45,14 @@ teardown() {
|
||||
dolt checkout -b other
|
||||
start_sql_server repo1
|
||||
|
||||
run server_query repo1 1 dolt "" "call dolt_push()" "" "" 1
|
||||
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "the current branch has no upstream branch" ]] || false
|
||||
|
||||
server_query repo1 1 dolt "" "call dolt_push('--set-upstream', 'origin', 'other') " ""
|
||||
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push('--set-upstream', 'origin', 'other')"
|
||||
|
||||
skip "In-memory branch doesn't track upstream"
|
||||
server_query repo1 1 dolt "" "call dolt_push()" ""
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "call dolt_push()"
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: push on sql-session commit" {
|
||||
@@ -61,7 +62,7 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_to_remote remote1
|
||||
start_sql_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
|
||||
cd ../repo2
|
||||
dolt pull remote1
|
||||
@@ -81,7 +82,7 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_async_replication 1
|
||||
start_sql_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
dolt sql-client --use-db repo1 -P $PORT -u dolt -q "CALL DOLT_COMMIT('-am', 'Step 1');"
|
||||
|
||||
# wait for the process to exit after we stop it
|
||||
stop_sql_server 1
|
||||
@@ -108,7 +109,10 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Tables_in_repo2" ]] || false
|
||||
[[ "$output" =~ "test" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: pull remote not found error" {
|
||||
@@ -133,7 +137,9 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo1
|
||||
|
||||
run server_query repo1 1 dolt "" "show tables" "Table\n"
|
||||
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: push remote not found error" {
|
||||
@@ -156,7 +162,10 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_to_remote unknown
|
||||
start_sql_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\ntest"
|
||||
run dolt sql-client --use-db repo1 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Tables_in_repo1" ]] || false
|
||||
[[ "$output" =~ "test" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: pull multiple heads" {
|
||||
@@ -172,8 +181,16 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main,new_feature
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "select dolt_checkout('new_feature') as b" "b\n0"
|
||||
server_query repo2 1 dolt "" "select name from dolt_branches order by name" "name\nmain\nnew_feature"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select dolt_checkout('new_feature') as b"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "b" ]] || false
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "select name from dolt_branches order by name"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "name" ]] || false
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
[[ "$output" =~ "new_feature" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: connect to remote head" {
|
||||
@@ -194,25 +211,37 @@ teardown() {
|
||||
start_sql_server repo2
|
||||
|
||||
# No data on main
|
||||
server_query repo2 1 dolt "" "show tables" ""
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
# Can't use dolt sql-client to connect to branches
|
||||
|
||||
# Connecting to heads that exist only on the remote should work fine (they get fetched)
|
||||
server_query "repo2/new_feature" 1 dolt "" "show tables" "Tables_in_repo2/new_feature\ntest"
|
||||
server_query repo2 1 dolt "" 'use `repo2/new_feature2`' ""
|
||||
server_query repo2 1 dolt "" 'select * from `repo2/new_feature2`.test' "pk\n0\n1\n2"
|
||||
|
||||
dolt sql-client --use-db "repo2/new_feature" -u dolt -P $PORT -q "show tables" "Tables_in_repo2/new_feature\ntest"
|
||||
dolt sql-client --use-db repo2 -P $PORT -u dolt -q 'use `repo2/new_feature2`'
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q 'select * from `repo2/new_feature2`.test'
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk" ]] || false
|
||||
[[ "$output" =~ " 0 " ]] || false
|
||||
[[ "$output" =~ " 1 " ]] || false
|
||||
[[ "$output" =~ " 2 " ]] || false
|
||||
|
||||
# Connecting to heads that don't exist should error out
|
||||
run server_query "repo2/notexist" 1 dolt "" 'use `repo2/new_feature2`' "" 1
|
||||
run dolt sql-client --use-db "repo2/notexist" -u dolt -P $PORT -q 'use `repo2/new_feature2`'
|
||||
[ $status -ne 0 ]
|
||||
[[ $output =~ "database not found" ]] || false
|
||||
|
||||
run server_query repo2 1 dolt "" 'use `repo2/notexist`' "" 1
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q 'use `repo2/notexist`'
|
||||
[ $status -ne 0 ]
|
||||
[[ $output =~ "database not found" ]] || false
|
||||
|
||||
# Creating a branch locally that doesn't exist on the remote
|
||||
# works, but connecting to it is an error (nothing to pull)
|
||||
server_query "repo2/new_feature" 1 dolt "" "select dolt_checkout('-b', 'new_branch') as b" "b\n0"
|
||||
dolt sql-client --use-db "repo2/new_feature" -u dolt -P $PORT -q "select dolt_checkout('-b', 'new_branch')"
|
||||
|
||||
run server_query "repo2/new_branch" 1 dolt "" "show tables" "Table\ntest" "" 1
|
||||
run dolt sql-client --use-db "repo2/new_branch" -u dolt -P $PORT -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
[[ $output =~ "database not found" ]] || false
|
||||
}
|
||||
|
||||
@@ -228,7 +257,10 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "Tables_in_repo2" ]] || false
|
||||
[[ $output =~ "test" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: pull invalid head" {
|
||||
@@ -240,7 +272,8 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads unknown
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "" 1
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@@ -253,7 +286,8 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "" 1
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@@ -270,7 +304,9 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Table\n"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: connect to missing branch pulls remote" {
|
||||
@@ -286,8 +322,15 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "SHOW tables" "" # no tables on main
|
||||
server_query "repo2/feature-branch" 1 dolt "" "SHOW Tables" "Tables_in_repo2/feature-branch\ntest"
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "SHOW tables"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
# Can't connect to a specific branch with dolt sql-client
|
||||
run dolt sql-client --use-db "repo2/feature-branch" -u dolt -P $PORT -q "SHOW Tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "feature-branch" ]] || false
|
||||
[[ $output =~ "test" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: connect to hash works" {
|
||||
@@ -303,8 +346,14 @@ teardown() {
|
||||
dolt config --local --add sqlserver.global.dolt_replicate_heads main
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
server_query repo2 1 dolt "" "use \`repo2/$head_hash\`" ""
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "Tables_in_repo2" ]] || false
|
||||
[[ $output =~ "test" ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "use \`repo2/$head_hash\`"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
@test "remotes-sql-server: connect to tag works" {
|
||||
@@ -321,8 +370,14 @@ teardown() {
|
||||
dolt tag v1
|
||||
start_sql_server repo2
|
||||
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\ntest"
|
||||
server_query repo2 1 dolt "" "use \`repo2/v1\`" ""
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "Tables_in_repo2" ]] || false
|
||||
[[ $output =~ "test" ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -P $PORT -u dolt -q "use \`repo2/v1\`"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
}
|
||||
|
||||
get_head_commit() {
|
||||
|
||||
@@ -290,15 +290,15 @@ SQL
|
||||
start_multi_db_server repo1
|
||||
cd ..
|
||||
|
||||
server_query repo1 1 dolt "" "create table t1 (a int primary key)"
|
||||
server_query repo1 1 dolt "" "call dolt_add('.')"
|
||||
server_query repo1 1 dolt "" "call dolt_commit('-am', 'cm')"
|
||||
server_query repo2 1 dolt "" "create table t2 (a int primary key)"
|
||||
server_query repo2 1 dolt "" "call dolt_add('.')"
|
||||
server_query repo2 1 dolt "" "call dolt_commit('-am', 'cm')"
|
||||
server_query repo3 1 dolt "" "create table t3 (a int primary key)"
|
||||
server_query repo3 1 dolt "" "call dolt_add('.')"
|
||||
server_query repo3 1 dolt "" "call dolt_commit('-am', 'cm')"
|
||||
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "create table t1 (a int primary key)"
|
||||
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_add('.')"
|
||||
dolt sql-client --use-db repo1 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
|
||||
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "create table t2 (a int primary key)"
|
||||
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_add('.')"
|
||||
dolt sql-client --use-db repo2 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
|
||||
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "create table t3 (a int primary key)"
|
||||
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_add('.')"
|
||||
dolt sql-client --use-db repo3 -u dolt -P $PORT -q "call dolt_commit('-am', 'cm')"
|
||||
|
||||
clone_helper $TMPDIRS
|
||||
|
||||
@@ -344,7 +344,18 @@ SQL
|
||||
cd dbs1
|
||||
start_multi_db_server repo1
|
||||
|
||||
server_query repo1 1 dolt "" "show tables" "Tables_in_repo1\nt1"
|
||||
server_query repo2 1 dolt "" "show tables" "Tables_in_repo2\nt2"
|
||||
server_query repo3 1 dolt "" "show tables" "Tables_in_repo3\nt3"
|
||||
run dolt sql-client --use-db repo1 -u dolt -P $PORT -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ Tables_in_repo1 ]] || false
|
||||
[[ "$output" =~ t1 ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo2 -u dolt -P $PORT -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ Tables_in_repo2 ]] || false
|
||||
[[ "$output" =~ t2 ]] || false
|
||||
|
||||
run dolt sql-client --use-db repo3 -u dolt -P $PORT -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ Tables_in_repo3 ]] || false
|
||||
[[ "$output" =~ t3 ]] || false
|
||||
}
|
||||
|
||||
@@ -39,10 +39,9 @@ teardown() {
|
||||
@test "sql-charsets-collations: define charset and collation on a database" {
|
||||
start_sql_server
|
||||
|
||||
server_query "" 1 dolt "" "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;"
|
||||
skip "Defining charsets and collations on a database not supported"
|
||||
server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1"
|
||||
server_query "test" 1 dolt "" "use test; SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci"
|
||||
dolt sql-client -u dolt --use-db '' -P $PORT -q "CREATE DATABASE test CHARACTER SET latin1 COLLATE latin1_swedish_ci;"
|
||||
dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.character_set_database\nlatin1"
|
||||
dolt sql-client -u dolt --use-db test -P $PORT -q "SELECT @@character_set_database" ";@@SESSION.collation_database\nlatin1_swedish_ci"
|
||||
}
|
||||
|
||||
@test "sql-charsets-collations: define and use a collation and charset" {
|
||||
|
||||
@@ -72,4 +72,65 @@ teardown() {
|
||||
run dolt sql-client
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "--user or -u argument is required" ]] || false
|
||||
}
|
||||
}
|
||||
|
||||
@test "sql-client: multiple statments in --query" {
|
||||
cd repo1
|
||||
start_sql_server repo1
|
||||
|
||||
dolt sql-client -u dolt -P $PORT --use-db repo1 -q "
|
||||
create table t(c int);
|
||||
insert into t values (0),(1);
|
||||
update t set c=2 where c=0;"
|
||||
run dolt sql-client -u dolt -P $PORT --use-db repo1 -q "select c from t"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ " 1 " ]] || false
|
||||
[[ $output =~ " 2 " ]] || false
|
||||
! [[ $output =~ " 0 " ]] || false
|
||||
}
|
||||
|
||||
@test "sql-client: no-auto-commit" {
|
||||
cd repo1
|
||||
start_sql_server repo1
|
||||
|
||||
dolt sql-client -u dolt -P $PORT --use-db repo1 --no-auto-commit -q "CREATE TABLE one_pk (
|
||||
pk BIGINT NOT NULL,
|
||||
c1 BIGINT,
|
||||
c2 BIGINT,
|
||||
PRIMARY KEY (pk)
|
||||
)"
|
||||
run dolt ls
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "No tables in working set" ]] || false
|
||||
|
||||
# Now issue a manual commit
|
||||
dolt sql-client -u dolt -P $PORT --use-db repo1 --no-auto-commit -q "CREATE TABLE one_pk (
|
||||
pk BIGINT NOT NULL,
|
||||
c1 BIGINT,
|
||||
c2 BIGINT,
|
||||
PRIMARY KEY (pk));
|
||||
COMMIT;"
|
||||
run dolt ls
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "one_pk" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-client: connect directly to a branch using --use-db" {
|
||||
cd repo1
|
||||
dolt branch branch1
|
||||
start_sql_server repo1
|
||||
|
||||
dolt sql-client -u dolt -P $PORT --use-db repo1/branch1 -q "
|
||||
create table t(c int);
|
||||
insert into t values (0),(1);
|
||||
update t set c=2 where c=0;"
|
||||
run dolt sql-client -u dolt -P $PORT --use-db repo1/branch1 -q "select c from t"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ " 1 " ]] || false
|
||||
[[ $output =~ " 2 " ]] || false
|
||||
! [[ $output =~ " 0 " ]] || false
|
||||
|
||||
run dolt sql-client -u dolt -P $PORT --use-db repo1 -q "select c from t"
|
||||
[ $status -ne 0 ]
|
||||
[[ $output =~ "not found" ]] || false
|
||||
}
|
||||
|
||||
@@ -60,9 +60,15 @@ teardown() {
|
||||
SERVER_PID=$! # will get killed by teardown_common
|
||||
sleep 5 # not using python wait so this works on windows
|
||||
|
||||
server_query test_db 1 root "" "select user from mysql.user order by user" "User\nroot"
|
||||
server_query test_db 1 root "" "create user new_user" ""
|
||||
server_query test_db 1 root "" "select user from mysql.user order by user" "User\nnew_user\nroot"
|
||||
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "root" ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u root --use-db test_db -q "create user new_user"
|
||||
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "root" ]] || false
|
||||
[[ $output =~ "new_user" ]] || false
|
||||
|
||||
stop_sql_server
|
||||
rm -f .dolt/sql-server.lock
|
||||
@@ -73,7 +79,8 @@ teardown() {
|
||||
SERVER_PID=$! # will get killed by teardown_common
|
||||
sleep 5 # not using python wait so this works on windows
|
||||
|
||||
server_query test_db 1 root "" "select user from mysql.user order by user" "" 1
|
||||
run dolt sql-client -P $PORT -u root --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -ne 0 ]
|
||||
}
|
||||
|
||||
@test "sql-privs: starting server with empty config works" {
|
||||
@@ -82,10 +89,16 @@ teardown() {
|
||||
|
||||
start_sql_server_with_config test_db server.yaml
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "dolt" ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "dolt" ]] || false
|
||||
[[ $output =~ "new_user" ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
|
||||
@@ -111,9 +124,12 @@ behavior:
|
||||
|
||||
dolt sql-server --port=$PORT --config server.yaml --user cmddolt &
|
||||
SERVER_PID=$!
|
||||
sleep 1
|
||||
sleep 5
|
||||
|
||||
server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt"
|
||||
|
||||
run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "cmddolt" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: yaml with user is also replaced with command line user" {
|
||||
@@ -136,9 +152,11 @@ behavior:
|
||||
|
||||
dolt sql-server --port=$PORT --config server.yaml --user cmddolt &
|
||||
SERVER_PID=$!
|
||||
sleep 1
|
||||
sleep 5
|
||||
|
||||
server_query test_db 1 cmddolt "" "select user from mysql.user order by user" "User\ncmddolt"
|
||||
run dolt sql-client -P $PORT -u cmddolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "cmddolt" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: yaml specifies doltcfg dir" {
|
||||
@@ -148,9 +166,16 @@ behavior:
|
||||
|
||||
start_sql_server_with_config test_db server.yaml
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -167,10 +192,17 @@ behavior:
|
||||
|
||||
start_sql_server_with_config test_db server.yaml
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
[[ "$output" =~ "privs.db" ]] || false
|
||||
@@ -186,9 +218,18 @@ behavior:
|
||||
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nprivs_user"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ privs_user ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
[[ $output =~ privs_user ]] || false
|
||||
|
||||
# Test that privs.json file is not in json format
|
||||
run cat privs.json
|
||||
@@ -198,7 +239,12 @@ behavior:
|
||||
rm -f ./.dolt/sql-server.lock
|
||||
stop_sql_server
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt --privilege-file=privs.json
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user\nprivs_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
[[ $output =~ privs_user ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: errors instead of panic when reading badly formatted privilege file" {
|
||||
@@ -219,9 +265,16 @@ behavior:
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -234,7 +287,9 @@ behavior:
|
||||
make_test_repo
|
||||
|
||||
start_sql_server_with_args --host 127.0.0.1 --user=dolt
|
||||
server_query test_db 1 dolt "" "select user, host from mysql.user order by user" "User,Host\ndolt,%"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db --result-format csv -q "select user, host from mysql.user order by user"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "dolt,%" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: multiple doltcfg directories causes error" {
|
||||
@@ -269,10 +324,24 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privileges.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -295,10 +364,17 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "doltcfgdir" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
[[ "$output" =~ "doltcfgdir" ]] || false
|
||||
@@ -316,9 +392,16 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
[[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -339,10 +422,24 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privileges.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -370,10 +467,24 @@ behavior:
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -397,9 +508,16 @@ behavior:
|
||||
! [[ "$output" =~ "doltcfgdir" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
server_query test_db 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -422,10 +540,24 @@ behavior:
|
||||
! [[ "$output" =~ "privileges.db" ]] || false
|
||||
! [[ "$output" =~ "privs.db" ]] || false
|
||||
|
||||
server_query db1 1 dolt "" "show databases" "Database\ndb1\ndb2\ndb3\ninformation_schema\nmysql"
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt"
|
||||
server_query db1 1 dolt "" "create user new_user" ""
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ db1 ]] || false
|
||||
[[ $output =~ db2 ]] || false
|
||||
[[ $output =~ db3 ]] || false
|
||||
[[ $output =~ information_schema ]] || false
|
||||
[[ $output =~ mysql ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db db1 -q "create user new_user"
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
|
||||
run ls -a
|
||||
! [[ "$output" =~ ".doltcfg" ]] || false
|
||||
@@ -449,7 +581,7 @@ behavior:
|
||||
dolt init
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt
|
||||
|
||||
server_query test_db 1 dolt "" "create user new_user" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user new_user"
|
||||
stop_sql_server
|
||||
sleep 1
|
||||
run ls -a
|
||||
@@ -459,65 +591,91 @@ behavior:
|
||||
|
||||
cd db_dir
|
||||
start_sql_server_with_args --host 0.0.0.0 --user=dolt
|
||||
server_query db1 1 dolt "" "select user from mysql.user order by user" "User\ndolt\nnew_user"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db db1 -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ new_user ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: basic lack of privileges tests" {
|
||||
make_test_repo
|
||||
start_sql_server
|
||||
|
||||
server_query test_db 1 dolt "" "create table t1(c1 int)"
|
||||
server_query test_db 1 dolt "" "create user test"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test"
|
||||
|
||||
# Should only see test_db database
|
||||
server_query "" 1 test "" "show databases" "Database\ntest_db"
|
||||
server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1"
|
||||
run dolt sql-client -P $PORT -u dolt --use-db '' -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ test_db ]] || false
|
||||
|
||||
run dolt sql-client -P $PORT -u dolt --use-db test_db -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ t1 ]] || false
|
||||
|
||||
# Revoke works as expected
|
||||
server_query test_db 1 dolt "" "revoke select on test_db.* from test"
|
||||
server_query test_db 1 test "" "show tables" "" 1
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "revoke select on test_db.* from test"
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Host in privileges is respected
|
||||
server_query test_db 1 dolt "" "drop user test"
|
||||
server_query test_db 1 dolt "" "create user test@'127.0.0.1'"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test@'127.0.0.1'"
|
||||
server_query test_db 1 test "" "show tables" "Tables_in_test_db\nt1"
|
||||
server_query test_db 1 dolt "" "drop user test@'127.0.0.1'"
|
||||
server_query test_db 1 dolt "" "create user test@'10.10.10.10'"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test@'10.10.10.10'"
|
||||
server_query test_db 1 test "" "show tables" "" 1
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'127.0.0.1'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'127.0.0.1'"
|
||||
run dolt sql-client -P $PORT -u test -H 127.0.0.1 --use-db test_db -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ t1 ]] || false
|
||||
|
||||
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "drop user test@'127.0.0.1'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create user test@'10.10.10.10'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "grant select on test_db.* to test@'10.10.10.10'"
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
}
|
||||
|
||||
@test "sql-privs: creating user identified by password" {
|
||||
make_test_repo
|
||||
start_sql_server
|
||||
|
||||
server_query test_db 1 dolt "" "create user test identified by 'test'" ""
|
||||
server_query test_db 1 dolt "" "grant select on mysql.user to test" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test identified by 'test'"
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on mysql.user to test"
|
||||
|
||||
# Should not be able to connect to test_db
|
||||
server_query test_db 1 test test "select user from mysql.user order by user" "" 1
|
||||
run dolt sql-client -P $PORT -u test -p test --use-db test_db -q "select user from mysql.user order by user"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
server_query "" 1 test test "select user from mysql.user order by user" "User\ndolt\ntest"
|
||||
run dolt sql-client -P $PORT -u test -p test --use-db '' -q "select user from mysql.user"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ dolt ]] || false
|
||||
[[ $output =~ test ]] || false
|
||||
|
||||
# Bad password can't connect
|
||||
server_query "" 1 test bad "select user from mysql.user order by user" "" 1
|
||||
run dolt sql-client -P $PORT -u test -p bad --use-db '' -q "select user from mysql.user order by user"
|
||||
[ $status -ne 0 ]
|
||||
|
||||
# Should only see mysql database
|
||||
server_query "" 1 test test "show databases" "Database\nmysql"
|
||||
run dolt sql-client -P $PORT -u test -p test --use-db '' -q "show databases"
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ mysql ]] || false
|
||||
! [[ $output =~ test_db ]] || false
|
||||
}
|
||||
|
||||
@test "sql-privs: deleting user prevents access by that user" {
|
||||
make_test_repo
|
||||
start_sql_server
|
||||
|
||||
server_query test_db 1 dolt "" "create user test"
|
||||
server_query test_db 1 dolt "" "grant select on test_db.* to test" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db test_db -q "create table t1(c1 int)"
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "create user test"
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "grant select on test_db.* to test"
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
echo $output
|
||||
[[ $output =~ t1 ]] || false
|
||||
|
||||
server_query test_db 1 test "" "show tables" ""
|
||||
dolt sql-client -P $PORT -u dolt --use-db '' -q "drop user test"
|
||||
|
||||
server_query test_db 1 dolt "" "drop user test"
|
||||
|
||||
server_query test_db 1 test "" "show tables" "" 1
|
||||
run dolt sql-client -P $PORT -u test --use-db test_db -q "show tables"
|
||||
[ $status -ne 0 ]
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,3 @@
|
||||
Create the ../*.pem files that are used by these tests.
|
||||
|
||||
Expects to be run from this directory like `go run .`.
|
||||
3
integration-tests/go-sql-server-driver/gencerts/go.mod
Normal file
3
integration-tests/go-sql-server-driver/gencerts/go.mod
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/dolthub/dolt/integration-tests/go-sql-server-driver/gencerts
|
||||
|
||||
go 1.19
|
||||
337
integration-tests/go-sql-server-driver/gencerts/main.go
Normal file
337
integration-tests/go-sql-server-driver/gencerts/main.go
Normal file
@@ -0,0 +1,337 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"path/filepath"
|
||||
"math/big"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Generates a 4096-bit RSA chain and an ed25519 chain.
|
||||
// Each chain includes a root, an intermediate, a leaf with DNS and URI SANs.
|
||||
// Root and intermediate have isCA=true and key usage CertSign.
|
||||
// Leaf has isCA=false and key usage digitalSignature and extKeyUsage ServerAuth.
|
||||
//
|
||||
// Generates separate expired leafs for each key type.
|
||||
//
|
||||
// Emits private keys of the leafs. RSA keys are emitted PEM encoded PKCS1.
|
||||
// ed25519 keys are emitted PEM encoded PKCS8.
|
||||
//
|
||||
// These certificates and private keys are used by
|
||||
// tests/sql-server-cluster-tls.yaml and tests/sql-server-tls.yaml, for
|
||||
// example.
|
||||
//
|
||||
// TODO: Further tests which should not verify? (SHA-1 signatures, expired
|
||||
// roots or intermediates, wrong isCA, wrong key usage, etc.)
|
||||
|
||||
const RelPath = "../testdata"
|
||||
|
||||
func main() {
|
||||
rsacerts, err := MakeRSACerts()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = WriteRSACerts(rsacerts)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
edcerts, err := MakeEd25519Certs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = WriteEd25519Certs(edcerts)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func WriteRSACerts(rsacerts TestCerts) error {
|
||||
err := os.WriteFile(filepath.Join(RelPath, "rsa_root.pem"), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: rsacerts.Root.Raw,
|
||||
}), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(RelPath, "rsa_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: rsacerts.Leaf.Raw,
|
||||
}), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: rsacerts.Intermediate.Raw,
|
||||
})...), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(RelPath, "rsa_key.pem"), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(rsacerts.LeafKey.(*rsa.PrivateKey)),
|
||||
}), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(RelPath, "rsa_exp_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: rsacerts.ExpiredLeaf.Raw,
|
||||
}), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: rsacerts.Intermediate.Raw,
|
||||
})...), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(RelPath, "rsa_exp_key.pem"), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(rsacerts.ExpiredLeafKey.(*rsa.PrivateKey)),
|
||||
}), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteEd25519Certs(edcerts TestCerts) error {
|
||||
err := os.WriteFile(filepath.Join(RelPath, "ed25519_root.pem"), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: edcerts.Root.Raw,
|
||||
}), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(RelPath, "ed25519_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: edcerts.Leaf.Raw,
|
||||
}), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: edcerts.Intermediate.Raw,
|
||||
})...), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keybytes, err := x509.MarshalPKCS8PrivateKey(edcerts.LeafKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(RelPath, "ed25519_key.pem"), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PRIVATE KEY",
|
||||
Bytes: keybytes,
|
||||
}), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(RelPath, "ed25519_exp_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: edcerts.ExpiredLeaf.Raw,
|
||||
}), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: edcerts.Intermediate.Raw,
|
||||
})...), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keybytes, err = x509.MarshalPKCS8PrivateKey(edcerts.ExpiredLeafKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(RelPath, "edcerts_exp_key.pem"), pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PRIVATE KEY",
|
||||
Bytes: keybytes,
|
||||
}), 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type TestCerts struct {
|
||||
Root *x509.Certificate
|
||||
Intermediate *x509.Certificate
|
||||
Leaf *x509.Certificate
|
||||
LeafKey any
|
||||
ExpiredLeaf *x509.Certificate
|
||||
ExpiredLeafKey any
|
||||
}
|
||||
|
||||
func MakeRSACerts() (TestCerts, error) {
|
||||
genKey := func() (any, any, error) {
|
||||
key, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return key.Public(), key, nil
|
||||
}
|
||||
return MakeCerts("RSA 4096-bit", genKey)
|
||||
}
|
||||
|
||||
func MakeEd25519Certs() (TestCerts, error) {
|
||||
genKey := func() (any, any, error) {
|
||||
return ed25519.GenerateKey(rand.Reader)
|
||||
}
|
||||
return MakeCerts("ed25519", genKey)
|
||||
}
|
||||
|
||||
func MakeCerts(desc string, genKey func() (any, any, error)) (TestCerts, error) {
|
||||
nbf := time.Now().Add(-24 * time.Hour)
|
||||
exp := nbf.Add(24 * 365 * 10 * time.Hour)
|
||||
badExp := nbf.Add(12 * time.Hour)
|
||||
|
||||
rootpub, rootpriv, err := genKey()
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
intpub, intpriv, err := genKey()
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
leafpub, leafpriv, err := genKey()
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
exppub, exppriv, err := genKey()
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
|
||||
signer, err := NewRootSigner(&x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{
|
||||
Country: []string{"US"},
|
||||
Organization: []string{"DoltHub, Inc."},
|
||||
CommonName: "dolt integration tests " + desc + " Root",
|
||||
},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageCertSign,
|
||||
NotBefore: nbf,
|
||||
NotAfter: exp,
|
||||
}, rootpub, rootpriv)
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
|
||||
intcert, err := signer.Sign(&x509.Certificate{
|
||||
SerialNumber: big.NewInt(2),
|
||||
Subject: pkix.Name{
|
||||
Country: []string{"US"},
|
||||
Organization: []string{"DoltHub, Inc."},
|
||||
CommonName: "dolt integration tests " + desc + " Intermediate",
|
||||
},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageCertSign,
|
||||
NotBefore: nbf,
|
||||
NotAfter: exp,
|
||||
}, intpub)
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
intsigner := Signer{intcert, intpriv}
|
||||
|
||||
leafdns := "dolt-instance.dolt-integration-test.example"
|
||||
leafurl, err := url.Parse("spiffe://dolt-integration-tests.dev.trust.dolthub.com.example/dolt-instance")
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
|
||||
leafcert, err := intsigner.Sign(&x509.Certificate{
|
||||
SerialNumber: big.NewInt(3),
|
||||
Subject: pkix.Name{
|
||||
Country: []string{"US"},
|
||||
Organization: []string{"DoltHub, Inc."},
|
||||
CommonName: "dolt integration tests " + desc + " Leaf",
|
||||
},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: false,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
NotBefore: nbf,
|
||||
NotAfter: exp,
|
||||
DNSNames: []string{leafdns},
|
||||
URIs: []*url.URL{leafurl},
|
||||
}, leafpub)
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
|
||||
expcert, err := intsigner.Sign(&x509.Certificate{
|
||||
SerialNumber: big.NewInt(4),
|
||||
Subject: pkix.Name{
|
||||
Country: []string{"US"},
|
||||
Organization: []string{"DoltHub, Inc."},
|
||||
CommonName: "dolt integration tests " + desc + " Expired Leaf",
|
||||
},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: false,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
NotBefore: nbf,
|
||||
NotAfter: badExp,
|
||||
DNSNames: []string{leafdns},
|
||||
URIs: []*url.URL{leafurl},
|
||||
}, exppub)
|
||||
if err != nil {
|
||||
return TestCerts{}, err
|
||||
}
|
||||
|
||||
return TestCerts{
|
||||
Root: signer.Cert,
|
||||
Intermediate: intsigner.Cert,
|
||||
Leaf: leafcert,
|
||||
ExpiredLeaf: expcert,
|
||||
LeafKey: leafpriv,
|
||||
ExpiredLeafKey: exppriv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Signer struct {
|
||||
Cert *x509.Certificate
|
||||
Key interface{}
|
||||
}
|
||||
|
||||
func (s Signer) Sign(cert *x509.Certificate, pub any) (*x509.Certificate, error) {
|
||||
der, err := x509.CreateCertificate(rand.Reader, cert, s.Cert, pub, s.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x509.ParseCertificate(der)
|
||||
}
|
||||
|
||||
func NewRootSigner(cert *x509.Certificate, pub, priv any) (Signer, error) {
|
||||
der, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv)
|
||||
if err != nil {
|
||||
return Signer{}, err
|
||||
}
|
||||
cert, err = x509.ParseCertificate(der)
|
||||
if err != nil {
|
||||
return Signer{}, err
|
||||
}
|
||||
return Signer{cert, priv}, nil
|
||||
}
|
||||
@@ -26,6 +26,10 @@ func TestCluster(t *testing.T) {
|
||||
RunTestsFile(t, "tests/sql-server-cluster.yaml")
|
||||
}
|
||||
|
||||
func TestClusterTLS(t *testing.T) {
|
||||
RunTestsFile(t, "tests/sql-server-cluster-tls.yaml")
|
||||
}
|
||||
|
||||
func TestOriginal(t *testing.T) {
|
||||
RunTestsFile(t, "tests/sql-server-orig.yaml")
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIErDCCApQCCQCnSokQKR3M/zANBgkqhkiG9w0BAQUFADAYMRYwFAYDVQQKDA1E
|
||||
b2x0SHViLCBJbmMuMB4XDTIyMDcyMTIwMDgzMloXDTI2MDcxOTIwMDgzMlowGDEW
|
||||
MBQGA1UECgwNRG9sdEh1YiwgSW5jLjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC
|
||||
AgoCggIBAMPmzHy0CmW5Xc27rbRYpJG/QKMXVAz+k2v+AkTQkUzBWKv0z8WhePB/
|
||||
tDNVfVYuYQ2sBiHTaar9nn2Lokon+YkPjyMis2aMETHVuqx0DmJb9YcxniA8M27o
|
||||
ZlfDrJtQO5UzIp9q2zhsFWj30Qdm6YUOhZ3rTnvYOMUYG/cIYLWXyQCg1oPqRVRr
|
||||
GldzLP2GdigdrS6QQjA9AdK+Zi3dP2m2vssG4gJ+lkAWOHe7wvv2RJl/alsvWXmw
|
||||
pur7Q9Z7M+tQmqGDxlyDtkDDecyqvEkxPH7mnKV1jahJjzUFHND1r44JlCN0eTmD
|
||||
Q3+RldBNZCZSJWQ42yOIK+mTSp4QUvZL9wnJ1/lMb/v7atDlF/MSLeN6SDyAPod7
|
||||
Oci8PR+nGhaOKacngrogM6SFQ1kF4tlY5Scrpg61IAcf6uxF3eSBP0qEaFvfLXZV
|
||||
mc136E4g2G1haLt7y2prckCHLXEnxurXU4xlU/SH4cy4jB/zLZJs46tM7J9ZtCjg
|
||||
QScZeNBA91kKAvHr36f/+suU3MNPAP2fmMCziH2uxh6SxTP8yzsUoV9PCTeaSnXX
|
||||
rTMB077j0TOB2qsYhLF3XsLMz+B2Jo0b7ydT7c7rMS9yYvyKPA9JSE44nUrZWj3B
|
||||
7ity1moIfrzwbH3AK3D5I9iUbBV0+JpuIZFPoqTIb15TUXJSusYHAgMBAAEwDQYJ
|
||||
KoZIhvcNAQEFBQADggIBABGrQEUFJk5StmyFUGvaw/57H+K1ZT62rusFBq1NacMb
|
||||
61dMh9xJyDMgLiUllQ8q5CS3bjYt2J2KajpU/58ugF/Ct9aoxA4vFDtfHECllYaH
|
||||
zvoiK0Dkrf901xxNVeCbHDmXbvzJ0N/xTkP80kbT4o+aBOw6fxQVEBGAGg4EEz1D
|
||||
k7v3/lEsZ2TkCPua1p9kXHaG8+wwE0hAWsaUYgXHTpzz0gUBJ69bOIlBpLKqO9It
|
||||
HStkPD7wtYnN54pmOM68EAyXAxUC7yZ9PqncX0X04hH0VlmQGfdXFJDR89mSS6B4
|
||||
P1qsi1XtnKC/hHuJlrY02uMXn7u1cVCf5uWfFm6Xs8rLL+q28gV6Tr2aXqgY0Cjl
|
||||
tNtUEIP23/irWN48c5/rKOTiUIHJy2m6UofwMQO91jgKFxIyUmkgPQmos2LLNjtk
|
||||
VFaPRigAaArwvombUmvfXJl6KoyH/je4H4+Gs+rRQURXU/PD1cioHgsOYNXSmYAj
|
||||
AQJv/xp9QBmpzb1ExJOKeWjnUWGu0Wdv4TCTXJNvfdQqOVkT6k6ty1urgr9fNOxY
|
||||
PDbHZTI6rXMtT57G108k2gAkaCE6O2R2Dm+vfW7auauqF3lNiZU9Y8IEGU2ybmE3
|
||||
s2j+THPWmhuepbZKO5daQH0zlma31QgoyhGSoZ6QUWKEjufEvfx4HwGqMP6BEmaP
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,52 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDD5sx8tApluV3N
|
||||
u620WKSRv0CjF1QM/pNr/gJE0JFMwVir9M/FoXjwf7QzVX1WLmENrAYh02mq/Z59
|
||||
i6JKJ/mJD48jIrNmjBEx1bqsdA5iW/WHMZ4gPDNu6GZXw6ybUDuVMyKfats4bBVo
|
||||
99EHZumFDoWd60572DjFGBv3CGC1l8kAoNaD6kVUaxpXcyz9hnYoHa0ukEIwPQHS
|
||||
vmYt3T9ptr7LBuICfpZAFjh3u8L79kSZf2pbL1l5sKbq+0PWezPrUJqhg8Zcg7ZA
|
||||
w3nMqrxJMTx+5pyldY2oSY81BRzQ9a+OCZQjdHk5g0N/kZXQTWQmUiVkONsjiCvp
|
||||
k0qeEFL2S/cJydf5TG/7+2rQ5RfzEi3jekg8gD6HeznIvD0fpxoWjimnJ4K6IDOk
|
||||
hUNZBeLZWOUnK6YOtSAHH+rsRd3kgT9KhGhb3y12VZnNd+hOINhtYWi7e8tqa3JA
|
||||
hy1xJ8bq11OMZVP0h+HMuIwf8y2SbOOrTOyfWbQo4EEnGXjQQPdZCgLx69+n//rL
|
||||
lNzDTwD9n5jAs4h9rsYeksUz/Ms7FKFfTwk3mkp1160zAdO+49EzgdqrGISxd17C
|
||||
zM/gdiaNG+8nU+3O6zEvcmL8ijwPSUhOOJ1K2Vo9we4rctZqCH688Gx9wCtw+SPY
|
||||
lGwVdPiabiGRT6KkyG9eU1FyUrrGBwIDAQABAoICABUIJlQNEECzkfqQd6mxCpoL
|
||||
KmlYC9IJUtJ5Rs0Uh0TyTQ7JDbVuDInla/dG6lniSNEq8s2W4PVWnTllUFsdx5CL
|
||||
dxaSlygfSYlMJOp220R8EvQcw5k6XVs+4B30CAf0qTDveHwdAMQh9np6gJqG1fNP
|
||||
B9FYfeiV4iJm4Dm5UIiubwn+OomXETJq/Tz+RIpDcVQFO56QJkr/gb6aamXqJvC2
|
||||
ie1KI+GYrZDb0dwo8FoUqnDAWS7I+pYx/PmlWDciqwRMdw14FEfCbEKvudfbTLOe
|
||||
8Zu+LnslD7xNiW5ryhg1CE/7f0f/LTSbfxenDap7ZJEoqJMF96Ds8an2AkDOB9nx
|
||||
XB5kVz5jMsaZ1f68Rx8S4EqEEcXxYwiRe5WoDEnnVr2+Q6QzOqh/4DaA5VuId462
|
||||
IjPDWmYszSqig9QXjS11SkTMKCKxas4AqfCb8uUlcXdri4aSv0Khb7DgbO2su1KC
|
||||
+hcXpiAMH9jVX1d4N8c0Q0HLOT09lRnD2mmEX6Lo2kWgb5Hpzo88Ty9WI7oiszsY
|
||||
J1r6qPkXIc9Ft1YwpdVBhkBbxB024l9IG8I1UzjrLFnR/A5sRefzosNi4/ZACPW4
|
||||
Kykhy7p+ZV9Kf8cjMbY11afCmi9jlXsVqWwJIMk+LxTCjF/lmbMay/G7j+ibGtSQ
|
||||
hU+LNPzAOUEwBj1OqoMhAoIBAQDlo3Ecgeu5zxNILnkut6RHHDJUK2N9+5HIDwi4
|
||||
frMlkM3b8NLz09/GtmX4HTKkDBur4x9QeEIsxG19tk2QWZQ4EAKs8OcEXaCL4Q9g
|
||||
msZbQC5rrFjRzUC4roxCTEz4g/ANEM+huLq/3a6afUhkmUuGZzK6rf6E36dTx3na
|
||||
DP4tDAx1s/DqfMtXYYmzrb3V1Nk9NUwQFRselJ8EHeIA7NEcLcv5yREia57RcYm/
|
||||
EfuA90j1ER6iHZIxopPfo1Cx7I9N4eoQM4/Tjb5qu+krfGOFOQbL6hCPHeHkZlAw
|
||||
0/2ECxCHS2y+Uih3MkMdnme2tfBr8AQpcfAOxSTMXu1wGDs9AoIBAQDaY+fVJ2G/
|
||||
/myI3Nly7MZaJ8NT8kcQx55b1s5vqWU+IQo5YC4KGdUp32U5Uxr2war8SuA2pKW0
|
||||
Cv42IJYlGQQUgpj2k+DJcDz+Qz9nqE5Ft8vNmyA3Y2gbwgTkd9dtFCTph4BNiAad
|
||||
qyjXwdJ6qwB1dbORsprC/Ue8WcEVwWwvF3PGnvbEiM8qLyxv/WIXnN5B/XcvUFHS
|
||||
mS3IVkJpdR8Kzp0Ctro5mHd2L6SQa/XM5tU3bye9Hzf1J3rWM/FGzVtYInC//CoO
|
||||
w/sA/ebfhK1iHjYYp4MjyETBkbD1kpCl6eNdTKN9ydSkUzhWlHn3xKQQrdZ7KiiH
|
||||
YbIhh1rwB+qTAoIBAFIoOnSfis2MZ3Kgpdxv+UczsFHqwAq3sX1o247eTYu4Fd6F
|
||||
d4OinuICKdMt5wtIBbJmbLKmg85ubFnYmkF1uxCfscVb3tryAFlrKMxAM408Fh+R
|
||||
pqlRDMHGOQoTMEqNMZoLFK3gYHf6gNhm0DqlmZ65Vy3wyCmTttLDgDXiBiHpuJ93
|
||||
xE6wXTOjAtgU5eEV6K78XX03f99d/tJDOrNoBpxVSi/Qnt+4rzZxr317moaWcjSz
|
||||
bklD2SUG7G7LiDhP0SllFQ+80s02XhTjq9VSCG0GbQcRc+EwKLxFWpVNktrl9oDh
|
||||
HEOvMykKA3caUDLPPvfvBB4r1F4EbFjt8Xb0RGUCggEAO0PrcRvr2gd4ere8RwTc
|
||||
WzD5P/m6sWIKpo+nnAPTVsXumV1xgQo7n85hEOptodMyzJ6hNBMAaNim3hd/x3d/
|
||||
dPVv/1JoKSJNWw7y0PWKsD7NjvFvD7jpUscXPs0K6C4USk+cUO3+JaGCRvLxZJqt
|
||||
WDLl1T8r4oiLhCCzVm0UJ79sitUu0Gz0E1WT8JxJl3DZm/zl8DAS1Fz/YKOQCEBh
|
||||
eTRSxZ7C8MhgevE47nxtyvpFmHKQzTEApYXePuz/qCAojsVh5afP3gvvPPiqQ7Qk
|
||||
vUDHm28yFm7Nwd4AsNPibzQGoJYgtA0mqKVw34YRh1yUzXXvg6MQNpUbmx+5XPQ5
|
||||
AwKCAQEA5Iye1s7RVxZP5iJ3vgy67OU+zza5yEIoJEabhF4bOBDsCwNcx2EwsQll
|
||||
X/Su5qqiIVnrRmkdYVhTnZv8bigq/8Hu+BBenMLxkAwZ5ep6gKq9wdiPQArjNBlS
|
||||
5KkGuj+7LNCsmmldXVXjjg2BNWBDdVv33hhhqsi/Tzau+qAufdNGdBTS4ZTWEH0z
|
||||
X5EBtOphJbBPeMUrm1PFOXKUDDwPfqX86rg1NHr1l5iB7uqShZak1s1ovoyFO6s7
|
||||
I9d8chi4/qwwYk8cHczB4C9EwBvWEvcAf1xa6I1Mp8y3tDhWPVIpq5P8i9vQFYIJ
|
||||
LWLCd/YowgxkNl5j6a5QMFoZvjLi5A==
|
||||
-----END PRIVATE KEY-----
|
||||
26
integration-tests/go-sql-server-driver/testdata/ed25519_chain.pem
vendored
Normal file
26
integration-tests/go-sql-server-driver/testdata/ed25519_chain.pem
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICOzCCAe2gAwIBAgIBAzAFBgMrZXAwWzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
|
||||
DURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
|
||||
ZWQyNTUxOSBJbnRlcm1lZGlhdGUwHhcNMjIxMDI2MjEwMTQyWhcNMzIxMDIzMjEw
|
||||
MTQyWjBTMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNRG9sdEh1YiwgSW5jLjEsMCoG
|
||||
A1UEAxMjZG9sdCBpbnRlZ3JhdGlvbiB0ZXN0cyBlZDI1NTE5IExlYWYwKjAFBgMr
|
||||
ZXADIQBq59gmS/TqiLFwMpug/QSxGiq/zzMPQBWOe+l0o8tbkKOB3TCB2jAOBgNV
|
||||
HQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAf
|
||||
BgNVHSMEGDAWgBS8Fugt5Yjb7mabErluXQOwId4DfTCBgwYDVR0RBHwweoIrZG9s
|
||||
dC1pbnN0YW5jZS5kb2x0LWludGVncmF0aW9uLXRlc3QuZXhhbXBsZYZLc3BpZmZl
|
||||
Oi8vZG9sdC1pbnRlZ3JhdGlvbi10ZXN0cy5kZXYudHJ1c3QuZG9sdGh1Yi5jb20u
|
||||
ZXhhbXBsZS9kb2x0LWluc3RhbmNlMAUGAytlcANBAF7vtPl1usXT+WgeD72BEdYB
|
||||
2E8PbORVYT05SrjRYRcdHNegWQUN2fhKE/+WNeeOVfGQBcwMlObof6deraq9uw8=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBwDCCAXKgAwIBAgIBAjAFBgMrZXAwUzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
|
||||
DURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
|
||||
ZWQyNTUxOSBSb290MB4XDTIyMTAyNjIxMDE0MloXDTMyMTAyMzIxMDE0MlowWzEL
|
||||
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2Rv
|
||||
bHQgaW50ZWdyYXRpb24gdGVzdHMgZWQyNTUxOSBJbnRlcm1lZGlhdGUwKjAFBgMr
|
||||
ZXADIQC63kDzz+nGeTtt2CcA2M3Q1R8YephuuUzxlvEB+cgj5KNjMGEwDgYDVR0P
|
||||
AQH/BAQDAgIEMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLwW6C3liNvuZpsS
|
||||
uW5dA7Ah3gN9MB8GA1UdIwQYMBaAFOE9s81S97V1S09D3k0obt02yhrpMAUGAytl
|
||||
cANBAJkX45OPKCFrJ2EmgXntZQFznQuUriA68Pxaxxzy3/W1jDtxf2cccDxtS1TJ
|
||||
uPGtJ5Ri8dbk+5FgK3GQFQweDwA=
|
||||
-----END CERTIFICATE-----
|
||||
27
integration-tests/go-sql-server-driver/testdata/ed25519_exp_chain.pem
vendored
Normal file
27
integration-tests/go-sql-server-driver/testdata/ed25519_exp_chain.pem
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICQzCCAfWgAwIBAgIBBDAFBgMrZXAwWzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
|
||||
DURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
|
||||
ZWQyNTUxOSBJbnRlcm1lZGlhdGUwHhcNMjIxMDI2MjEwMTQyWhcNMjIxMDI3MDkw
|
||||
MTQyWjBbMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNRG9sdEh1YiwgSW5jLjE0MDIG
|
||||
A1UEAxMrZG9sdCBpbnRlZ3JhdGlvbiB0ZXN0cyBlZDI1NTE5IEV4cGlyZWQgTGVh
|
||||
ZjAqMAUGAytlcAMhAF6ENDzBPmj6JXxySz9SBR4eh6pOI+IEeepQuqa0Pvn4o4Hd
|
||||
MIHaMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMB
|
||||
Af8EAjAAMB8GA1UdIwQYMBaAFLwW6C3liNvuZpsSuW5dA7Ah3gN9MIGDBgNVHREE
|
||||
fDB6gitkb2x0LWluc3RhbmNlLmRvbHQtaW50ZWdyYXRpb24tdGVzdC5leGFtcGxl
|
||||
hktzcGlmZmU6Ly9kb2x0LWludGVncmF0aW9uLXRlc3RzLmRldi50cnVzdC5kb2x0
|
||||
aHViLmNvbS5leGFtcGxlL2RvbHQtaW5zdGFuY2UwBQYDK2VwA0EAsJcZ7AAXXkmW
|
||||
78cvfT7aa++y/t++altVJs0Qy8zZcP4XBBuPpdzxrQRcILQ2lyrpER8wrSB67UH6
|
||||
LSeDh4FuCA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBwDCCAXKgAwIBAgIBAjAFBgMrZXAwUzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
|
||||
DURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
|
||||
ZWQyNTUxOSBSb290MB4XDTIyMTAyNjIxMDE0MloXDTMyMTAyMzIxMDE0MlowWzEL
|
||||
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2Rv
|
||||
bHQgaW50ZWdyYXRpb24gdGVzdHMgZWQyNTUxOSBJbnRlcm1lZGlhdGUwKjAFBgMr
|
||||
ZXADIQC63kDzz+nGeTtt2CcA2M3Q1R8YephuuUzxlvEB+cgj5KNjMGEwDgYDVR0P
|
||||
AQH/BAQDAgIEMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLwW6C3liNvuZpsS
|
||||
uW5dA7Ah3gN9MB8GA1UdIwQYMBaAFOE9s81S97V1S09D3k0obt02yhrpMAUGAytl
|
||||
cANBAJkX45OPKCFrJ2EmgXntZQFznQuUriA68Pxaxxzy3/W1jDtxf2cccDxtS1TJ
|
||||
uPGtJ5Ri8dbk+5FgK3GQFQweDwA=
|
||||
-----END CERTIFICATE-----
|
||||
3
integration-tests/go-sql-server-driver/testdata/ed25519_key.pem
vendored
Normal file
3
integration-tests/go-sql-server-driver/testdata/ed25519_key.pem
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEIIq53ao+dZ09B33ER3RWNEbIhuQAOP/aza1sLDcCaBwN
|
||||
-----END PRIVATE KEY-----
|
||||
11
integration-tests/go-sql-server-driver/testdata/ed25519_root.pem
vendored
Normal file
11
integration-tests/go-sql-server-driver/testdata/ed25519_root.pem
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIBlzCCAUmgAwIBAgIBATAFBgMrZXAwUzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
|
||||
DURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
|
||||
ZWQyNTUxOSBSb290MB4XDTIyMTAyNjIxMDE0MloXDTMyMTAyMzIxMDE0MlowUzEL
|
||||
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2Rv
|
||||
bHQgaW50ZWdyYXRpb24gdGVzdHMgZWQyNTUxOSBSb290MCowBQYDK2VwAyEAUSTT
|
||||
dZ6hXoZFVLDT4li0j/4K0//gRILcsNnPeTXeENSjQjBAMA4GA1UdDwEB/wQEAwIC
|
||||
BDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBThPbPNUve1dUtPQ95NKG7dNsoa
|
||||
6TAFBgMrZXADQQCS//dI2SsZnwaLk2I4m9WCHihUyZ2wWeDonwsPXkBtNBxJZnJb
|
||||
tw0xf6bL+3opXeQfVTkn/BePZ8s4hbeBK9AO
|
||||
-----END CERTIFICATE-----
|
||||
3
integration-tests/go-sql-server-driver/testdata/edcerts_exp_key.pem
vendored
Normal file
3
integration-tests/go-sql-server-driver/testdata/edcerts_exp_key.pem
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEIIcPLEb34wrHmDff8cr7jjLaaaRyWEd+kuYw2h1GRA9U
|
||||
-----END PRIVATE KEY-----
|
||||
21
integration-tests/go-sql-server-driver/testdata/invalid_root.pem
vendored
Normal file
21
integration-tests/go-sql-server-driver/testdata/invalid_root.pem
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDbDCCAlSgAwIBAgIUdWEanf/1+cmS33nZDPY+gkQwS+gwDQYJKoZIhvcNAQEL
|
||||
BQAwTjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xJzAlBgNV
|
||||
BAMTHnRlc3RkYXRhIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0yMjEwMjYyMDM1
|
||||
MDBaFw0yNzEwMjUyMDM1MDBaME4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0
|
||||
SHViLCBJbmMuMScwJQYDVQQDEx50ZXN0ZGF0YSBDZXJ0aWZpY2F0ZSBBdXRob3Jp
|
||||
dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLAQ88jtxKIH0Uc0Yp
|
||||
oUmM0Bx3/fBqgbYAGJ1cxtkXahhGp94ICe0gmASnbPuAY22X0zf55C94semPNNgb
|
||||
xV/FHftvyi720z3wwOk8twa8I4vjb1mnxlPZzS2Xd1pb4KnUtjOemGfZOn6OWbXF
|
||||
ukf5uNDKUZcFPPjaiAnQ+kK6vjYWZjY6Hn4KVAjBRylQj86hzgF0cc7B4WOX3L6L
|
||||
ahY56urFElKnFh8vCydSfyZqtz56ng3Gc83PBIEkTTgQVwFJkx+Azh73NaTGwXcv
|
||||
3Wj4D+TzF2T0JsHe6s1CWyoHxvccwoUdAv8HGzzHVcm+81KMdy9r9e7R3kyu9HSK
|
||||
D3sBAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
|
||||
A1UdDgQWBBRzOWBY5hQAM5obC3y+nbHKnvQtmzANBgkqhkiG9w0BAQsFAAOCAQEA
|
||||
yKsw7CLYQQ2i9jzislIUF0pMW03rLTBPSyv78mhUrfaL2TncdJAPTMdR5KaFTKSy
|
||||
2AzuYkIN9gU0blk73sxbtdNyZlpP0MQHRuRkgpuXii0tWQ0f6uhLaZRJvLm4Hjsj
|
||||
Sma8ydO3/7FvdTby6Uv1Rivd53BGfVAcw8W1oC+8KfrDhUsWzqcDH6Aiszz0utKr
|
||||
XAqiOdNUSy2riyxc3s9RH2j20BNj6vWkz8ZoRdBa2pf/oRtYF2ZJjCZq7eH5hlSj
|
||||
/Am5Yw9Cc0/48Tm58e4V2SDHys9ld8EBKOMlo8djk3q0LxGtZ41O1hr4iaHTkWyl
|
||||
2wYWEa395xncUBUqvCpKyA==
|
||||
-----END CERTIFICATE-----
|
||||
67
integration-tests/go-sql-server-driver/testdata/rsa_chain.pem
vendored
Normal file
67
integration-tests/go-sql-server-driver/testdata/rsa_chain.pem
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIGETCCA/mgAwIBAgIBAzANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJVUzEW
|
||||
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjE5MDcGA1UEAxMwZG9sdCBpbnRlZ3JhdGlv
|
||||
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgSW50ZXJtZWRpYXRlMB4XDTIyMTAyNjIxMDEz
|
||||
N1oXDTMyMTAyMzIxMDEzN1owWDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRI
|
||||
dWIsIEluYy4xMTAvBgNVBAMTKGRvbHQgaW50ZWdyYXRpb24gdGVzdHMgUlNBIDQw
|
||||
OTYtYml0IExlYWYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDVFmdv
|
||||
tpYlR7XPQlnSbza5io/JAmGlOX30InseMZkddyURv+9rs8FY5PuEZyNH5VcJ/w7n
|
||||
xP3MwD8Uctojnl2FoEZjXreIIefPmyeLRgSXBTNE+iioTXn7B2sfPXFr4GeRborH
|
||||
E8GZJOgztlWrBkPinDn9dcY2tJzlh0HWIRedeohVlqs45Uy7u1UkpGyVZPobTXm3
|
||||
9S7gSyeGRnAgaIRPfVZdTkUHCY4x+Hn94Uj5U7TAH6QYLzeKGmFIRoQxgabPaZw+
|
||||
Ug0XAVDIoYmlOPgphQjkLpjLDEt7nhxnEvMG68ZrR6c7WGOS+eC6HasAdgnxWmRo
|
||||
HRSMdKKyDh5TwVbRGxlHAhcFPEYqDwUXb+H781Cia4MAo4eUiblBdEtQs97ymeRt
|
||||
HBoSU2ORQIOYx568SZwKDx3/HwyHd/5jZ0oM0kMoVJhvjjiF30su1Tuku6FT0Uu4
|
||||
NsIPf0Sq3BHERPz244t9yyFobbT8WdtNHGOOjEUWSP/ho/9hez6rboH2No1K1RuN
|
||||
2wQlQ5mNIQyPwkrACOdhYvQPCs0gp4Y3wgdrQ7jqccObXy5hokRC92WfgwCFVIEl
|
||||
JT9lOoBElH5IS2BaP+4k9k/A/LXQ87OmqRvZlKsaMX9LdUZoLFS3EzSDFdnWdgtf
|
||||
QJIDX0T1l8LQaNhbcbfNVrRbD8+BSB3tdllv9wIDAQABo4HdMIHaMA4GA1UdDwEB
|
||||
/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMB8GA1Ud
|
||||
IwQYMBaAFFVLRPReehc5TLVcwASWmZzRgdVfMIGDBgNVHREEfDB6gitkb2x0LWlu
|
||||
c3RhbmNlLmRvbHQtaW50ZWdyYXRpb24tdGVzdC5leGFtcGxlhktzcGlmZmU6Ly9k
|
||||
b2x0LWludGVncmF0aW9uLXRlc3RzLmRldi50cnVzdC5kb2x0aHViLmNvbS5leGFt
|
||||
cGxlL2RvbHQtaW5zdGFuY2UwDQYJKoZIhvcNAQELBQADggIBAKeAj0ALw5Qbc45u
|
||||
kjGx856yoFdunVXRujz5/v37AuGEEV16DMnONroHD2DSss2vxGEQGEkvypgWOLE6
|
||||
L5QPqH93W+deVrVeHS1FNWbEWGVEJEtIZOhZsTCU9bIj+WtgcHDCk7DHE2t2DBeh
|
||||
QH4aDPfkPL0vOmD/H6Mq0dbPPJW6FuS0tIlCXorKHM98lqmOWcxDnbGl1aH4uITo
|
||||
GB6dltX2YU9gM5G15Np9Nng2d1owTbOHt5sMvtKxCZeb+AYZvTGCFq8tRTlGvxHZ
|
||||
Xr39YmtGbplzkEq8EVEMUTYHse0cdsw2xxYkq9aqYegrBHHfNFybv2U6Rz+yxco5
|
||||
p44NecwZgsSm4+ZEb6gHg9RSZ/egDKHFEkQgapjQcRrHxrNqUmn2/zMmEs9uJLYM
|
||||
nYCxrSlGY+wULDk9wsAStrz5n0xhsl0mE/CjRcwtiFyNW5QqBD//d4bacfFhPMA1
|
||||
1Ce2mcha+PZhLC43zxuN5DMFNJWEiOzUpH72CwQ6UpnLZnL2Kkhff3SEuPAn83s/
|
||||
8zHXEWYbvLlBVTZjTxJ5+4YqyEQaf4SHZoIJXLwUbp5ZSz1EIP6qlgqGtxHYBePo
|
||||
KUtkFjf5aWt1nQ/Yu1sIBMa4i+xfQey/zwQS1smLrwlzh6QqvykINQCUUMzN2rQZ
|
||||
kAddIrPAEFcBeHRQs93N+IqisBpA
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFljCCA36gAwIBAgIBAjANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJVUzEW
|
||||
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjExMC8GA1UEAxMoZG9sdCBpbnRlZ3JhdGlv
|
||||
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgUm9vdDAeFw0yMjEwMjYyMTAxMzdaFw0zMjEw
|
||||
MjMyMTAxMzdaMGAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMu
|
||||
MTkwNwYDVQQDEzBkb2x0IGludGVncmF0aW9uIHRlc3RzIFJTQSA0MDk2LWJpdCBJ
|
||||
bnRlcm1lZGlhdGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC47wob
|
||||
mq2NLhf6EJdkWeli1E0ViOVpGOM05FNtcrYuX0A/y5VQtmavwchdn02Fa0ueEu3I
|
||||
JU9gqYu8Ubpa9fE3xWGA97543yx8Y3/blL6EGD4yWzf+iMb3R2qOe/omTOPllk3g
|
||||
D4pghlaSQe5ZRzzvVfUBH1Qj6WTSHcUoRCKUYaaBFpLxapjAS90Vf6PL1GQabdaq
|
||||
JN/BUbC1dR/4Z+brelUy9NSvAXg7/HtndiRMl3sOU8wh6NXVpPYta4xhkGr230Cc
|
||||
t1kOMPRSq4px9DOx3vZJCyOUy1Ro2CgAU2px2PlBG+95/TrqTgO6G8DxGYSV2bDY
|
||||
ZpHX1MyRoaYAuRvvuffXt17CCHbiD5i9VYy6F8WBLd16l96RsgU6sREOryYoXFZ0
|
||||
K9oUA69PEabq7H6Tlg6sQgaQ6u1No/H4H2eYTtmhdcOnaRWPq5i/x7EZ/cnwp9zc
|
||||
7P0afMTrKl1ezXxXOVRVpmk7SD53lqXYXNJpy1pf2wTvPnl4mq1aCnQtHPmEFbuJ
|
||||
LzejLBKrelfm7HMmQfxiKYKCHcth96eclP9GXhmA6XkU2BqbXGDEetDv+YiRdaQl
|
||||
wKdfRhCZPBVBcy7DnqJoa0lss5l0e3lTu2+wW5Znb3FzXdJSnl1e6togF9IgJAkH
|
||||
n4Fgs4/33TXQi0XPmy/iTbiUR8Ht8HhGBO4A2QIDAQABo2MwYTAOBgNVHQ8BAf8E
|
||||
BAMCAgQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVUtE9F56FzlMtVzABJaZ
|
||||
nNGB1V8wHwYDVR0jBBgwFoAUmmgyBnblEP+8fE5WcVV1fDkenHswDQYJKoZIhvcN
|
||||
AQELBQADggIBAIzbD8T6npRjMcTCHTfirl4r2eM9r6ANHIZgro5HK8EaTGaD2pQC
|
||||
3nnh9dJfnw/bbIUG6yEacAAfp1Krba/f+z3B6PyIdbhGAkYaAZzyujzcyPZN3yx0
|
||||
AIzZxwO4f9mpZ1Q95Xpn5ygozFKzZUg+AYW2qmyftDCVtHcZWBnKREgE64PMZ9b4
|
||||
/sajWdmx9jdr6algdEUu4kIxGvAq5C2pgydh1mpVcx4Znvroczip+dlUAb5cudGP
|
||||
krzCmdi9RxeGc/RIghNRNBtKVQtMh3nQwE0YOcIRY3T0WwCJHoRqCX9SoJvS/mYg
|
||||
mpm4YxLf9NXxnhTCNTCgBZ+lYqqW4nt6msh10inYg/nSDgWoU50VC5WOQwmVbAXx
|
||||
N4JrONvNXElWdEVkz8V2Lq1mwA6+4Mf1Rjau+j04z6bqZkdMYzCH0fG0to5B4fiM
|
||||
+XfoFDgZfnymSuEPKjo4vsGLwwNAwfsVNIuiqEkJODKf31p9YNgNW23v6uKzV/GR
|
||||
x6rKidp6XjfUkSXdmoPd4+qdhJLe+IQEVtoBUALlpGEYckin0L1/9Sl/GIucnkz3
|
||||
bjq+NazgnPeRb2YdfiQBsY5C7b9x7bbRZdtskCtIjrdzvYr+Hil0xHDlqRSlOHZz
|
||||
1snsRgG+DJF7rEPiXayz89JNrucWsrnyTYiQHANXWcwSKacILL1jneum
|
||||
-----END CERTIFICATE-----
|
||||
67
integration-tests/go-sql-server-driver/testdata/rsa_exp_chain.pem
vendored
Normal file
67
integration-tests/go-sql-server-driver/testdata/rsa_exp_chain.pem
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIGGTCCBAGgAwIBAgIBBDANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJVUzEW
|
||||
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjE5MDcGA1UEAxMwZG9sdCBpbnRlZ3JhdGlv
|
||||
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgSW50ZXJtZWRpYXRlMB4XDTIyMTAyNjIxMDEz
|
||||
N1oXDTIyMTAyNzA5MDEzN1owYDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRI
|
||||
dWIsIEluYy4xOTA3BgNVBAMTMGRvbHQgaW50ZWdyYXRpb24gdGVzdHMgUlNBIDQw
|
||||
OTYtYml0IEV4cGlyZWQgTGVhZjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
|
||||
ggIBAN2eUhKl74hhAnLW4N5x3UHkiHkcoWzck6Qfg3u7d3OhtURYVMgUTynW/JKb
|
||||
WfHBGT8yG77HT/GF0x+sV+IajZeZT1F4xou7sO9MFL7QDyEoaZo37dLkhr+mWkYb
|
||||
YadcIRNAFY/vuI/xP5DMPVrZNzoDp6VHnW3NwlYWYlPLLkjIGcZafVkt3UfI0XzZ
|
||||
4QhKtPDNopIt3lLmod5HLwcjiWIcMyjWJVue4kP71H9AA+7edCQ9kDmfO+v4JtUr
|
||||
3AE1q+vwNQe8mXaAj8aRXJc2m6qmRfbeVdsI0YYdeDFyuhahb2hqaD/pAEbaNz0p
|
||||
hy8AvPRY/oUwmF0oZ92YI94DUDgxneFyUp0I4+4ngUKUYwXHg0Elvw9w472rNBHB
|
||||
qNAbPh1wi4bBKs4yVZaZB35ESWHl9gmFQMN5+dGgnB6kVgLZOmBKoaVwBjQ+SeSQ
|
||||
QhDUnNph70y4LSNY+GEKWeYY6wZH0PbRa4PNhuXSs4aE9sHCbWKyRR1zlfoEloVr
|
||||
xj1/Dx04nP0/tFcEcb0XMjOcTH/2484SJ9smBn8HZwybltIdZcpPNNVN98ZiPPK2
|
||||
BXR9bOWYEJsjovGFVNSYQP0kURAez4qwFuppZ0WUHH5STJLBlkRhe3YQEG3RHojj
|
||||
H8Tb1ynUJuLdM1dTCQe+Q9XVtUOXfxKBX+kJdphR/z3xS9FrAgMBAAGjgd0wgdow
|
||||
DgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQC
|
||||
MAAwHwYDVR0jBBgwFoAUVUtE9F56FzlMtVzABJaZnNGB1V8wgYMGA1UdEQR8MHqC
|
||||
K2RvbHQtaW5zdGFuY2UuZG9sdC1pbnRlZ3JhdGlvbi10ZXN0LmV4YW1wbGWGS3Nw
|
||||
aWZmZTovL2RvbHQtaW50ZWdyYXRpb24tdGVzdHMuZGV2LnRydXN0LmRvbHRodWIu
|
||||
Y29tLmV4YW1wbGUvZG9sdC1pbnN0YW5jZTANBgkqhkiG9w0BAQsFAAOCAgEAQWgb
|
||||
s9Y7kqnhxZr5UrZDqadVGCULU+M8+UqxLtT4IlS23z49uXcMgylX7Zzb1IwSHwN0
|
||||
crZKzi/O0biTCFT0Kq3lTdLPrUUEt4qJXnMlLz5UYLs3ujSrDbCjNQMr7YM6/bjo
|
||||
LPmThTlCUR1L611SJCYTg4zXlCOuTOqPLzrsyW7yoqk6++HIzf7x2CI5VAW7FrrV
|
||||
J6QG80WKaDGRkVMINXGVxkcUEKQ69hc218jDA70J60PpiPHXmv/MzvzMc0dxnw1m
|
||||
mB/4Cy9wsOP6M4YL8flmTbD6qLeMbmGVRDNJknQ+bo+RPAG6yO/TGXTYwSfPH9ki
|
||||
wOE5OysRB8Rm9KGX+00W5OoRmRF8duj/b5EW+SnF6J3etKMbgzHUcKLrIB54ikMv
|
||||
vNNcCbGS+Qb94cBfLXt9zK+ifywUjnT1au/ahlz5MonzVNmudeabn261A3UbXFg2
|
||||
6dvlbPLbb/FDoDomr+uIQcipkjN2F3Pe7AYGW5JDr1+sqyfPx2BpYnASaKFZtVrL
|
||||
FvlyYC/wtp29QW+zAn+csaS1r9WgUZfKoI29fIBE2Qw2367QpEQPgNdhZQEfXIwM
|
||||
K10uZJPiam8buVt88PNKsc+wYFaOzHeGO3kmq86Em9j9SJa5EF+yU0Et+I3z7hna
|
||||
xSEruA+hC+ccBaicl2rxNeiml7xbOTiuQD6Okx0=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFljCCA36gAwIBAgIBAjANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJVUzEW
|
||||
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjExMC8GA1UEAxMoZG9sdCBpbnRlZ3JhdGlv
|
||||
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgUm9vdDAeFw0yMjEwMjYyMTAxMzdaFw0zMjEw
|
||||
MjMyMTAxMzdaMGAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMu
|
||||
MTkwNwYDVQQDEzBkb2x0IGludGVncmF0aW9uIHRlc3RzIFJTQSA0MDk2LWJpdCBJ
|
||||
bnRlcm1lZGlhdGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC47wob
|
||||
mq2NLhf6EJdkWeli1E0ViOVpGOM05FNtcrYuX0A/y5VQtmavwchdn02Fa0ueEu3I
|
||||
JU9gqYu8Ubpa9fE3xWGA97543yx8Y3/blL6EGD4yWzf+iMb3R2qOe/omTOPllk3g
|
||||
D4pghlaSQe5ZRzzvVfUBH1Qj6WTSHcUoRCKUYaaBFpLxapjAS90Vf6PL1GQabdaq
|
||||
JN/BUbC1dR/4Z+brelUy9NSvAXg7/HtndiRMl3sOU8wh6NXVpPYta4xhkGr230Cc
|
||||
t1kOMPRSq4px9DOx3vZJCyOUy1Ro2CgAU2px2PlBG+95/TrqTgO6G8DxGYSV2bDY
|
||||
ZpHX1MyRoaYAuRvvuffXt17CCHbiD5i9VYy6F8WBLd16l96RsgU6sREOryYoXFZ0
|
||||
K9oUA69PEabq7H6Tlg6sQgaQ6u1No/H4H2eYTtmhdcOnaRWPq5i/x7EZ/cnwp9zc
|
||||
7P0afMTrKl1ezXxXOVRVpmk7SD53lqXYXNJpy1pf2wTvPnl4mq1aCnQtHPmEFbuJ
|
||||
LzejLBKrelfm7HMmQfxiKYKCHcth96eclP9GXhmA6XkU2BqbXGDEetDv+YiRdaQl
|
||||
wKdfRhCZPBVBcy7DnqJoa0lss5l0e3lTu2+wW5Znb3FzXdJSnl1e6togF9IgJAkH
|
||||
n4Fgs4/33TXQi0XPmy/iTbiUR8Ht8HhGBO4A2QIDAQABo2MwYTAOBgNVHQ8BAf8E
|
||||
BAMCAgQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVUtE9F56FzlMtVzABJaZ
|
||||
nNGB1V8wHwYDVR0jBBgwFoAUmmgyBnblEP+8fE5WcVV1fDkenHswDQYJKoZIhvcN
|
||||
AQELBQADggIBAIzbD8T6npRjMcTCHTfirl4r2eM9r6ANHIZgro5HK8EaTGaD2pQC
|
||||
3nnh9dJfnw/bbIUG6yEacAAfp1Krba/f+z3B6PyIdbhGAkYaAZzyujzcyPZN3yx0
|
||||
AIzZxwO4f9mpZ1Q95Xpn5ygozFKzZUg+AYW2qmyftDCVtHcZWBnKREgE64PMZ9b4
|
||||
/sajWdmx9jdr6algdEUu4kIxGvAq5C2pgydh1mpVcx4Znvroczip+dlUAb5cudGP
|
||||
krzCmdi9RxeGc/RIghNRNBtKVQtMh3nQwE0YOcIRY3T0WwCJHoRqCX9SoJvS/mYg
|
||||
mpm4YxLf9NXxnhTCNTCgBZ+lYqqW4nt6msh10inYg/nSDgWoU50VC5WOQwmVbAXx
|
||||
N4JrONvNXElWdEVkz8V2Lq1mwA6+4Mf1Rjau+j04z6bqZkdMYzCH0fG0to5B4fiM
|
||||
+XfoFDgZfnymSuEPKjo4vsGLwwNAwfsVNIuiqEkJODKf31p9YNgNW23v6uKzV/GR
|
||||
x6rKidp6XjfUkSXdmoPd4+qdhJLe+IQEVtoBUALlpGEYckin0L1/9Sl/GIucnkz3
|
||||
bjq+NazgnPeRb2YdfiQBsY5C7b9x7bbRZdtskCtIjrdzvYr+Hil0xHDlqRSlOHZz
|
||||
1snsRgG+DJF7rEPiXayz89JNrucWsrnyTYiQHANXWcwSKacILL1jneum
|
||||
-----END CERTIFICATE-----
|
||||
51
integration-tests/go-sql-server-driver/testdata/rsa_exp_key.pem
vendored
Normal file
51
integration-tests/go-sql-server-driver/testdata/rsa_exp_key.pem
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKAIBAAKCAgEA3Z5SEqXviGECctbg3nHdQeSIeRyhbNyTpB+De7t3c6G1RFhU
|
||||
yBRPKdb8kptZ8cEZPzIbvsdP8YXTH6xX4hqNl5lPUXjGi7uw70wUvtAPIShpmjft
|
||||
0uSGv6ZaRhthp1whE0AVj++4j/E/kMw9Wtk3OgOnpUedbc3CVhZiU8suSMgZxlp9
|
||||
WS3dR8jRfNnhCEq08M2iki3eUuah3kcvByOJYhwzKNYlW57iQ/vUf0AD7t50JD2Q
|
||||
OZ876/gm1SvcATWr6/A1B7yZdoCPxpFclzabqqZF9t5V2wjRhh14MXK6FqFvaGpo
|
||||
P+kARto3PSmHLwC89Fj+hTCYXShn3Zgj3gNQODGd4XJSnQjj7ieBQpRjBceDQSW/
|
||||
D3Djvas0EcGo0Bs+HXCLhsEqzjJVlpkHfkRJYeX2CYVAw3n50aCcHqRWAtk6YEqh
|
||||
pXAGND5J5JBCENSc2mHvTLgtI1j4YQpZ5hjrBkfQ9tFrg82G5dKzhoT2wcJtYrJF
|
||||
HXOV+gSWhWvGPX8PHTic/T+0VwRxvRcyM5xMf/bjzhIn2yYGfwdnDJuW0h1lyk80
|
||||
1U33xmI88rYFdH1s5ZgQmyOi8YVU1JhA/SRREB7PirAW6mlnRZQcflJMksGWRGF7
|
||||
dhAQbdEeiOMfxNvXKdQm4t0zV1MJB75D1dW1Q5d/EoFf6Ql2mFH/PfFL0WsCAwEA
|
||||
AQKCAgAZIAfiAVoEtirW5jGcRoB1Jfmq8WoDs4Yxhzka4AgM3fp1AyapgFPxRGRi
|
||||
Iqax19iK551ppaMUmL10y88y3RvMYh8x92VbFi5bEt8POvtcIP7H8ytNS4dTVwLJ
|
||||
C/WoSbOeQRewt9bOyuUP/3e9Qv8V3rA09seMWVV8+RCwu0pGChmR+VRYtfBuYQAP
|
||||
DYyLqhyVaFrUA8s+ztLvJlbYkljS/Kt4J78YggzY9EYFHzbS7/lu2mPthHYArKOK
|
||||
a2yH3pPdLeB1PhaP7sdeFcDPi+teD3fDIzXMnVVTxSeJQ56BTlAZIGctR7c2USsO
|
||||
DhU7aPQDJ8vDQd0kQp5z6vm2A94mKqk+iWDiJY8Q1LSbs/u1W+f3QYJq/s8eahhQ
|
||||
4pbyb6rK8zyqynRTiNBW0al0ORdwzYLXsRgyn+DJSD+Yd84Iv9jnALVcYnGN5omd
|
||||
Km5wIBIuu7OpWzxE+aY4svV3KrWQPgDzL6iTHRc4WjldBR0LUBE7N9lvs3mu0WnW
|
||||
lgcGPuCfCo3DH1+j4YST3YmHq2viznYWJRGXZS15wcgopyAaWK9frMMmPWG4DGhD
|
||||
IhLJPdueth/TK9rYBe8TBV64CJrBzCFmYitBYWNfZo0J59Lu0n1ubWTy4a5AO0sa
|
||||
Z04D1YkAwNRaWODs7dyjl2LWJVxB8bmj+RwWo0ITASY+WE11wQKCAQEA/ols6B6O
|
||||
qP1cWz37f2wghLw6T/pXkmOidTdUbquZe7hr8wDtB+D56Diox3K0pMXKTsECIl7x
|
||||
rvt5YpQaNNCLikO532y7kynN5gEveIy5/hPjEQd3F+2hqF0wknSCMfqr1OsBeaRR
|
||||
RTEFpMYRZ2KDpjpuCMOb67cn/lkZHlIA9SDAFo+8xecLqgoOEzy8S/D86JCVndK9
|
||||
bbK2l2eoX6+FnnAdeZP6oTe2/WErPeU+moLS2monwL1F/Rpj5sPkwOlsQESxjxNh
|
||||
jto3/IZL/to3bdc0QHuiFEdukKHQXmvw9N7mXqoX7OimlzwY4K9uVP5mm24mPhlQ
|
||||
xzUYxOTK/ub0EQKCAQEA3uRz5fE64Af6NERdu5vRxcxck7H1FnOFxcbjH90ijJk+
|
||||
EL74ku+gbhaaF9wc/DWXAa9fCq5sJfmMyOxcHA3jbprm3OU3Uvl3xqAYY94LtXKc
|
||||
kNcfMNVydq5D7d0vhk1ZStISQ5iE0ooyi7k/+DpM9NUxrdMcVvc0UifsV3Wa8tnU
|
||||
25kmY4SNH0O75cwUHrBVAyGEAjoWY5tqvub7hAarxWUoqScxnuaZsQ/kDiUyEvvV
|
||||
E0lguQiuMV007uVx/pAB5pBGOmKTpn+FA/d7RZEjHdmhbmgUQTUM+wyLyCaXnyML
|
||||
7GNBa82gKwkt2SecoB+PqxyCmSKh7b900/fxTwP5uwKCAQEAjVxrNHqMBkQ3f6Dr
|
||||
xPebE7ypsfM5oV5eQjUJWjFJG1Hv+dRAz8hdYl/dNG967au/UH+WnNoX5XNaFapk
|
||||
54IOjSpR10i+39sus5di8tNNFFOdHr8DoDFkP2oJ6Lx19iXeOnCNsC3WyNOR69Ho
|
||||
pn8q2C3JIGrqdD2TI4n+Dj3CtGCM7brEPzCy5KuS55IqjQ54lvx9a1o3w+2lxG5Y
|
||||
L1P+pGBlrjjFz7VulkfZyRVA8HTJf23HSB6V+Rwn8WhH6e95JDRCXFCKNNjykPdQ
|
||||
y2gLsp/7L/i5qgOF3yNO4rGV75i/XkGe6f7HTmSc+GPVpbRn7dh9uq06lHfjmq3q
|
||||
IyjG0QKCAQA1xgHcEMW2dNY9M901PNNwF+yhyUYqw3Ybj+8NqekC6JmbqqcHs/4N
|
||||
cTB93yGzCy7CPk/8oLYAt2LqunNCZWtgLMjTtA0T3JGz/r3DojdK8DqriurAek5i
|
||||
KYrD4R/tE84eCe5UFoC81pB5Oxkextn4G1Mf75WfuYYK6AzR0NKwEjOTQzCKw9jU
|
||||
GSBEwWZ9POdVmQljDCaIo18ubUVyxbU9KzmTDZygDFw55m6LpxecktsGCyblnR2B
|
||||
VU5G8F3/HzkGKfp3bX7XpV4u5c78qpZBRlb27u8sCCN4kb9La1wbDXZ78jo/St2A
|
||||
ZHeGM1NLIjIkAv3S+hL867rAxMmX2YqvAoIBABYk3QFqAJ1D6oM0bKlwzFyxY45b
|
||||
VyKW7EhyYdiShOTx5dPJ39phEuFd9ShWMVoJZ1t+PGlC4PDxhm3p6w0XxQD8maNk
|
||||
o1R5lRcthYdiBOdVgE/LmGnTi88VbFsfQY1GzypYcej+7ioGapQwmWAhMxsIVoQW
|
||||
wfwgP28Ju0Lt7ocu2iqlYWs1QIdkH/eBMRgU48OqiyhOa3LuAD13OZa5fWWshce7
|
||||
R0cwnsavav+7SJ+9Y002DdpCKDuNUBk/7rnV7aJzmvTvI4Vtg1kMFF7kqdJDyyIZ
|
||||
Fvwp4QXkmCpoxIR5Lg3qP8YkBR1KnFnjUiagMKkcTNUvpy02sRlXKiuZIQ4=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
51
integration-tests/go-sql-server-driver/testdata/rsa_key.pem
vendored
Normal file
51
integration-tests/go-sql-server-driver/testdata/rsa_key.pem
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKwIBAAKCAgEA1RZnb7aWJUe1z0JZ0m82uYqPyQJhpTl99CJ7HjGZHXclEb/v
|
||||
a7PBWOT7hGcjR+VXCf8O58T9zMA/FHLaI55dhaBGY163iCHnz5sni0YElwUzRPoo
|
||||
qE15+wdrHz1xa+BnkW6KxxPBmSToM7ZVqwZD4pw5/XXGNrSc5YdB1iEXnXqIVZar
|
||||
OOVMu7tVJKRslWT6G015t/Uu4EsnhkZwIGiET31WXU5FBwmOMfh5/eFI+VO0wB+k
|
||||
GC83ihphSEaEMYGmz2mcPlINFwFQyKGJpTj4KYUI5C6YywxLe54cZxLzBuvGa0en
|
||||
O1hjkvnguh2rAHYJ8VpkaB0UjHSisg4eU8FW0RsZRwIXBTxGKg8FF2/h+/NQomuD
|
||||
AKOHlIm5QXRLULPe8pnkbRwaElNjkUCDmMeevEmcCg8d/x8Mh3f+Y2dKDNJDKFSY
|
||||
b444hd9LLtU7pLuhU9FLuDbCD39EqtwRxET89uOLfcshaG20/FnbTRxjjoxFFkj/
|
||||
4aP/YXs+q26B9jaNStUbjdsEJUOZjSEMj8JKwAjnYWL0DwrNIKeGN8IHa0O46nHD
|
||||
m18uYaJEQvdln4MAhVSBJSU/ZTqARJR+SEtgWj/uJPZPwPy10POzpqkb2ZSrGjF/
|
||||
S3VGaCxUtxM0gxXZ1nYLX0CSA19E9ZfC0GjYW3G3zVa0Ww/PgUgd7XZZb/cCAwEA
|
||||
AQKCAgEAsczL6i8UAW9giNv4Ttp1c0Pmzfaqu07JhhfvWzZPaIKt762TfO60ehQ2
|
||||
ujfE/Iykn7avHT0F+P7Ao7NhyS6vInylvuydf32rC1OPH2sBEXJJYVjK7AQZsBh0
|
||||
jdCa/0RzpqnyKkKV288r4VpXCSllI4Tv7kmDZso10F+X07AxIDnJ4ICjxgmuiUa6
|
||||
uKRp5g4nkviGg7SVtJmBFU1Y6fHL1PfkRF62wjR67b9DK0/7r+7kdxrvtS5QzW9N
|
||||
lR0h2J0yLwcUU/vq6DM00PPZAoASsbJLxPoMjABqS2+Wi3sIRdId8g4LwDz0eQL/
|
||||
PMzcjrnvee/OsADz0G7SAng9Xe2JHdNUUULUwXArVc8rw0krsPW51nHt5RnJJf1O
|
||||
UDzl90AgrHmpte3K9UAkVWpgpjlCpjNk32GZpP/qKuvSS/tADqx+CI4Nz727WkTn
|
||||
j4gCg0mdMqwNZL3ZJaVAFR1AAPptusMsIeZpuCePcveQq0oTYTj5iXeZN9ly62eU
|
||||
m5eloEucFmBQgW1CFl3RiOO220Q1Z7DvHSvIEquMB6FdY9UL4HFfvmPadKE1Yt91
|
||||
Ekd3JPJ7Imw2kZOr0RxSBknlho7mnIs4OmFhXs1Fi3vZ8E67dLzxYkjYE5WlXwIk
|
||||
330pCMI/8S1PWkZpCtX+FnP88bsKbdD85ckWMOjewG5sR2l4TAECggEBANt5y1Ip
|
||||
OWupcWXVlWXU6CkXbocv5SpVXUrJX9FECdXxG521Apn8d3ROF+J5Ef+NePRrIz+9
|
||||
PKGnEj1DrPEEmiIBYDieND9L1fJ2F4M2zmvawCVv+I1YDMQYDdj8Pj2L+pK8oZRo
|
||||
6G6edtFGKArlFNNc8n1aii5UicPJJfwb7GmUu3jIMzIHxkln0lt6I67/r1M9TA2Z
|
||||
7sYqMCochdyQPva9LgMKkR51PW8EmF95GDV/ZVd/OwlVQL84nMV2atQq0NOb9v8/
|
||||
WH5U4VzCTML9z+RVBm+XaR5Vd2sbVBRtDl7JU8ml7+GSfMpfRbrrCkwoZVFcDVFC
|
||||
QgtSecNi0cPt03cCggEBAPiMdHtWZaSL/W3MfkosQ7PaDmdsEsR2lasdSL6esVTz
|
||||
Tg0j+fzItPEgy79/xAZKAS8NDCgKKNs3YRcDe2TH/rxoG8HDrNN4ROMuF7c0ERwl
|
||||
fxX6Hj/Ku0zCUJYAUMfC1JNKKhUSgLO39p+BMEDlK78cX5AV2UESir309DoyZJPR
|
||||
br0Mp/1BCkf2fL8E5cdNCC2u9f3cA+Z9SSZe5zwS1VnicEugc2sXFpUltE9C48zq
|
||||
BcGt+ipO5vd98rGFED95y3qkgl+S43QT7pcywnEVFuN8MA909ksQhxszuU+/NST7
|
||||
QmaVhlbf1fvA6E+nRB4qeunfV4Di1hPJ96vFUuk7Z4ECggEBALBecnZG7H7I/niA
|
||||
J3tamWoUC10VrwnS6ZJkutwW2fSTucaFJF8MFA5hhy4WbtE3JeD8i6n371ZURQL4
|
||||
dGyGbzouct1svL20umwKXCC0lTL06f3ruggTnsaiMdaR9Y5OA4GRliMGHupt6zuH
|
||||
Ljx7FL45biAv9uT+SGsOJLkw002RGQoZj5J/zudESDX8s9o4W/L/SjOP5OJYGrZs
|
||||
j2HMhNyQ7/2/qxiXzFN/lNb8H+k1xAGJG68HVG2WF91SqMxwz7mNFvLNO9bhbOy2
|
||||
syrq/foWHYLlYLLgqYNnxaxYavjevrdH4roZlrCl2Qo5QOAsgibcW1NWdG5Wy8So
|
||||
j7rsTkcCggEBAOjiujXi40RFw0WBSYf4Z8t6cuqnEgKx1lVKECJEEYa2nxii8BbS
|
||||
fPA+uYqKjcMSzn8mq5BMzLFy928X3SO2XVJt/iwVS7etxmZThvcrOyjzXVvbS0Kc
|
||||
k9k9bULPsuEqBgKoiDvMZXl/0v7rjqoP0Wi1jjk0r6dQV13byodJNoJmx2suE62x
|
||||
po99Pq/BSAfxfstHV8jwwVrTY5onbGUWhIA/Mtc51Uuvi4JenM9zrn0PfitW98Ny
|
||||
wOl/QsrhEjNXzLfysxIYUTUvg+x6LETG7PZkI6goAKqfJujvEyDM2V/4aeQzEHgQ
|
||||
LbFBtKsF4EafqSHAGn5yjQJnyMBTGPfeqIECggEBAMNuS+B2+XT9QuzA6erkQpNj
|
||||
zEz2FmAwSdvoj9/Y4UnUfXb7xF98L84qyqEzoGMnrQLvYtL92tffKGObsjWqNWJs
|
||||
NYhTpfvblAscpafkG3mUZM7u5xB/0VHfzRUvt1YQ1N0LItUzJL15GWEDGBJdh/Vi
|
||||
3nyLciEETnCg+xzQQzkNAM3wg9ingBPjEdtOuZEgNmkR7ZUshhleD4dEp2yuzLG7
|
||||
jFAPDHPpfkEGuy6WNuf/OKzjgBiwXsnFpjVOt/JUH03xXh9moRrwZGG4kuzOwRzk
|
||||
i9k5kunsdiYsmcJ4RdtPx2TP/TgJoh6woTivFmhMVFsJ3jGFfWbzl94OxS5B9/U=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
32
integration-tests/go-sql-server-driver/testdata/rsa_root.pem
vendored
Normal file
32
integration-tests/go-sql-server-driver/testdata/rsa_root.pem
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFbTCCA1WgAwIBAgIBATANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJVUzEW
|
||||
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjExMC8GA1UEAxMoZG9sdCBpbnRlZ3JhdGlv
|
||||
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgUm9vdDAeFw0yMjEwMjYyMTAxMzdaFw0zMjEw
|
||||
MjMyMTAxMzdaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMu
|
||||
MTEwLwYDVQQDEyhkb2x0IGludGVncmF0aW9uIHRlc3RzIFJTQSA0MDk2LWJpdCBS
|
||||
b290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqtesAvWsaJka6DLe
|
||||
56o0b8xxuO5MIf7OC6SWX3GzFSNCW0rVBYZXK3v/Rv4hyni9DGgStQfvDWA+ZEvF
|
||||
x1EtdtfRWx41RvtgrjQHj965Zh+fI41TSVyXYRL5v08tnXzu5Y1v51XlbCwmtXyP
|
||||
cq1oYdNi/4XaVLEx+nYfZ2u3WXnzuHBcQc7n67zLdOUnHYszWpRrvqgmSCuqUz1x
|
||||
Q431Kq4qORkRNUD/lvaXiE7Dgp3lJeYqcq27YX5AKODgBsPYCqD1iKiSphLWb84O
|
||||
LUymJJDQ/ytYuMuwEK4Y0jsCLU1NnUI/Esdk4UeYQSdumNE70/9UuCGXSgpCOLt1
|
||||
o/jKkEiagI4vQ8W3Daa+G3heNT2ukvK3JAnn2nCcH/El2/KsaJCX5idu7qjyxHMT
|
||||
srkocltpEU3wLDQ8OuezcYUOig9fLX2lbZhNmEWdV5aXr2QrGXX4YmHw36awr1zm
|
||||
6c33bjL6Hz3r9HgoBROIJDV4SvpCpnRRZJcx62sAQEUYjp0ASN2b95alPKYGFzti
|
||||
m93kqVxMvb98b+L1XX/5LaU6Auqz6rWVJf8cBxgLlqope4IsSpP4jNUUw+m2l4Q6
|
||||
dtaAg43UfHsVNuwgXYqvYrBJerSViPL1eDaxTLmjtyuWqHqb/MdGeU6EbTThieHq
|
||||
cDEPLzk26VzxfVqgI4jL7ggJibECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgIEMA8G
|
||||
A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJpoMgZ25RD/vHxOVnFVdXw5Hpx7MA0G
|
||||
CSqGSIb3DQEBCwUAA4ICAQBgkaeyYLRUku3eyBSzG9QGjTsYFRAj5ov4gEj3SkdG
|
||||
3vBJ+QttGIyDksn0p/kSq6O1pWDWSk2hUqC/AV3w1mGZt/BX7MrM27HlwGPD42zh
|
||||
ndxx73axiSLVovTpDD4b5gihjx55GJzM7VTjFKTTTD8DBsZg+vVNtbMJHG+oa8Fe
|
||||
xCGE6TH4q39Xzy0mYMBxj90MLQlUgak1I7juWZTFs3T6eCPB+10onwYDwAaSjxt/
|
||||
abqKz3FSEdCclBZ/t6W7HRJNznKjtgL3xm7B1yRkXJuGVRzdPOa9GW3n1XOzMmFy
|
||||
OqEHGhlGov/5O+jGoAGMD1EyYzhMxD71Y0hfNMt6XVV39tsbzwFYM7SkhJ2xgHzs
|
||||
mdGxac9S+Hcyki0JHKIAAkswAG2POsrBxMLcUsvp4Gie0H0Gz53/wZEc5zC05pr3
|
||||
VSjMXwnrCKTNpx6/hJNqf1rJLjBkJCiCEpeK7o4FjnVNGnIE0KroUsnWpRXwVQ0I
|
||||
b/BJTA1BwA5AOvCFWIN+kVJTIl3FBQAKmsEn37VCPhMiOuy2HbVeQL1P+VveMb7X
|
||||
9K7JoFb2OA1V7UL1iak9489xvXBGQzsMnJXHzo43T2eIODBXH3Cd+cFCoC2Zshm3
|
||||
IjGR72bNhDgb9DvnIE0cUA4Umrs+yJkK2BJe294fi6nD80SDwbuxj6pW1jOSChf8
|
||||
vQ==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -0,0 +1,489 @@
|
||||
tests:
|
||||
- name: tls, bad root, failover to standby fails
|
||||
multi_repos:
|
||||
- name: server1
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3309
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50052/{database}
|
||||
bootstrap_role: primary
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50051
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/ed25519_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3309
|
||||
- name: server2
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3310
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50051/{database}
|
||||
bootstrap_role: standby
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50052
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3310
|
||||
connections:
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'create database repo1'
|
||||
- exec: "use repo1"
|
||||
- query: "call dolt_assume_cluster_role('standby', '11')"
|
||||
error_match: failed to transition from primary to standby gracefully
|
||||
- exec: "create table vals (i int primary key)"
|
||||
- exec: "insert into vals values (0)"
|
||||
- name: tls, expired leaf, failover to standby fails
|
||||
multi_repos:
|
||||
- name: server1
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3309
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50052/{database}
|
||||
bootstrap_role: primary
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50051
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3309
|
||||
- name: server2
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3310
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50051/{database}
|
||||
bootstrap_role: standby
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50052
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_exp_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_exp_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3310
|
||||
connections:
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'create database repo1'
|
||||
- exec: "use repo1"
|
||||
- query: "call dolt_assume_cluster_role('standby', '11')"
|
||||
error_match: failed to transition from primary to standby gracefully
|
||||
- exec: "create table vals (i int primary key)"
|
||||
- exec: "insert into vals values (0)"
|
||||
- name: tls, mismatched dns, failover to standby fails
|
||||
multi_repos:
|
||||
- name: server1
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3309
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50052/{database}
|
||||
bootstrap_role: primary
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50051
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
server_name_dns: ["does-not-match.dolt-instance.dolt-integration-test.example"]
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3309
|
||||
- name: server2
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3310
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50051/{database}
|
||||
bootstrap_role: standby
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50052
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3310
|
||||
connections:
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'create database repo1'
|
||||
- exec: "use repo1"
|
||||
- query: "call dolt_assume_cluster_role('standby', '11')"
|
||||
error_match: failed to transition from primary to standby gracefully
|
||||
- exec: "create table vals (i int primary key)"
|
||||
- exec: "insert into vals values (0)"
|
||||
- name: tls, mismatched url, failover to standby fails
|
||||
multi_repos:
|
||||
- name: server1
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3309
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50052/{database}
|
||||
bootstrap_role: primary
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50051
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
server_name_urls: ["spiffe://dolt-integration-tests.dev.trust.dolthub.com.example/dolt-instance/does-not-match"]
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3309
|
||||
- name: server2
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3310
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50051/{database}
|
||||
bootstrap_role: standby
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50052
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3310
|
||||
connections:
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'create database repo1'
|
||||
- exec: "use repo1"
|
||||
- query: "call dolt_assume_cluster_role('standby', '11')"
|
||||
error_match: failed to transition from primary to standby gracefully
|
||||
- exec: "create table vals (i int primary key)"
|
||||
- exec: "insert into vals values (0)"
|
||||
- name: tls, good rsa certs, create new database, primary replicates to standby, fails over, new primary replicates to standby, fails over, new primary has all writes
|
||||
multi_repos:
|
||||
- name: server1
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3309
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50052/{database}
|
||||
bootstrap_role: primary
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50051
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3309
|
||||
- name: server2
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3310
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50051/{database}
|
||||
bootstrap_role: standby
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50052
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
- name: key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/rsa_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3310
|
||||
connections:
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'create database repo1'
|
||||
- exec: 'use repo1'
|
||||
- exec: 'create table vals (i int primary key)'
|
||||
- exec: 'insert into vals values (0),(1),(2),(3),(4)'
|
||||
- query: "call dolt_assume_cluster_role('standby', 2)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server2
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- query: "select count(*) from vals"
|
||||
result:
|
||||
columns: ["count(*)"]
|
||||
rows: [["5"]]
|
||||
- query: "call dolt_assume_cluster_role('primary', 2)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server2
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- exec: 'insert into vals values (5),(6),(7),(8),(9)'
|
||||
- query: "call dolt_assume_cluster_role('standby', 3)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- query: "select count(*) from vals"
|
||||
result:
|
||||
columns: ["count(*)"]
|
||||
rows: [["10"]]
|
||||
- query: "call dolt_assume_cluster_role('primary', 3)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- exec: 'insert into vals values (10),(11),(12),(13),(14)'
|
||||
- query: "select count(*) from vals"
|
||||
result:
|
||||
columns: ["count(*)"]
|
||||
rows: [["15"]]
|
||||
- name: tls, good ed25519 certs, create new database, primary replicates to standby, fails over, new primary replicates to standby, fails over, new primary has all writes
|
||||
multi_repos:
|
||||
- name: server1
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3309
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50052/{database}
|
||||
bootstrap_role: primary
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50051
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
server_name_urls: ["spiffe://dolt-integration-tests.dev.trust.dolthub.com.example/dolt-instance"]
|
||||
server_name_dns: ["dolt-instance.dolt-integration-test.example"]
|
||||
- name: key.pem
|
||||
source_path: testdata/ed25519_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/ed25519_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/ed25519_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3309
|
||||
- name: server2
|
||||
with_files:
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
log_level: trace
|
||||
listener:
|
||||
host: 0.0.0.0
|
||||
port: 3310
|
||||
cluster:
|
||||
standby_remotes:
|
||||
- name: standby
|
||||
remote_url_template: https://localhost:50051/{database}
|
||||
bootstrap_role: standby
|
||||
bootstrap_epoch: 1
|
||||
remotesapi:
|
||||
port: 50052
|
||||
tls_key: key.pem
|
||||
tls_cert: cert.pem
|
||||
tls_ca: root.pem
|
||||
server_name_urls: ["spiffe://dolt-integration-tests.dev.trust.dolthub.com.example/dolt-instance"]
|
||||
server_name_dns: ["dolt-instance.dolt-integration-test.example"]
|
||||
- name: key.pem
|
||||
source_path: testdata/ed25519_key.pem
|
||||
- name: cert.pem
|
||||
source_path: testdata/ed25519_chain.pem
|
||||
- name: root.pem
|
||||
source_path: testdata/ed25519_root.pem
|
||||
server:
|
||||
args: ["--config", "server.yaml"]
|
||||
port: 3310
|
||||
connections:
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'create database repo1'
|
||||
- exec: 'use repo1'
|
||||
- exec: 'create table vals (i int primary key)'
|
||||
- exec: 'insert into vals values (0),(1),(2),(3),(4)'
|
||||
- query: "call dolt_assume_cluster_role('standby', 2)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server2
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- query: "select count(*) from vals"
|
||||
result:
|
||||
columns: ["count(*)"]
|
||||
rows: [["5"]]
|
||||
- query: "call dolt_assume_cluster_role('primary', 2)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server2
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- exec: 'insert into vals values (5),(6),(7),(8),(9)'
|
||||
- query: "call dolt_assume_cluster_role('standby', 3)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- query: "select count(*) from vals"
|
||||
result:
|
||||
columns: ["count(*)"]
|
||||
rows: [["10"]]
|
||||
- query: "call dolt_assume_cluster_role('primary', 3)"
|
||||
result:
|
||||
columns: ["status"]
|
||||
rows: [["0"]]
|
||||
- on: server1
|
||||
queries:
|
||||
- exec: 'use repo1'
|
||||
- exec: 'insert into vals values (10),(11),(12),(13),(14)'
|
||||
- query: "select count(*) from vals"
|
||||
result:
|
||||
columns: ["count(*)"]
|
||||
rows: [["15"]]
|
||||
@@ -313,3 +313,20 @@ tests:
|
||||
result:
|
||||
columns: ["pk", "val"]
|
||||
rows: [["1", "1"], ["2", "2"]]
|
||||
- name: Create a temporary table and validate that it doesn't persist after a session closes
|
||||
repos:
|
||||
- name: repo1
|
||||
server: {}
|
||||
connections:
|
||||
- on: repo1
|
||||
queries:
|
||||
- query: "show tables"
|
||||
result:
|
||||
columns: ["Tables_in_repo1"]
|
||||
rows: []
|
||||
- exec: "CREATE TEMPORARY TABLE t1(pk int primary key, val int)"
|
||||
- exec: "INSERT INTO t1 VALUES (1, 1),(2, 2)"
|
||||
- on: repo1
|
||||
queries:
|
||||
- exec: "INSERT INTO t1 VALUES (1, 1),(2, 2)"
|
||||
error_match: "table not found"
|
||||
|
||||
@@ -16,9 +16,9 @@ tests:
|
||||
- name: repo1
|
||||
with_files:
|
||||
- name: chain_key.pem
|
||||
source_path: testdata/chain_key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: chain_cert.pem
|
||||
source_path: testdata/chain_cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
listener:
|
||||
@@ -33,9 +33,9 @@ tests:
|
||||
- name: repo1
|
||||
with_files:
|
||||
- name: chain_key.pem
|
||||
source_path: testdata/chain_key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: chain_cert.pem
|
||||
source_path: testdata/chain_cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
listener:
|
||||
@@ -53,9 +53,9 @@ tests:
|
||||
- name: repo1
|
||||
with_files:
|
||||
- name: chain_key.pem
|
||||
source_path: testdata/chain_key.pem
|
||||
source_path: testdata/rsa_key.pem
|
||||
- name: chain_cert.pem
|
||||
source_path: testdata/chain_cert.pem
|
||||
source_path: testdata/rsa_chain.pem
|
||||
- name: server.yaml
|
||||
contents: |
|
||||
listener:
|
||||
|
||||
Reference in New Issue
Block a user