mirror of
https://github.com/dolthub/dolt.git
synced 2026-01-27 10:35:36 -06:00
Merge branch 'main' into jennifer/mysqldump
This commit is contained in:
4
.github/workflows/ci-go-tests.yaml
vendored
4
.github/workflows/ci-go-tests.yaml
vendored
@@ -48,9 +48,9 @@ jobs:
|
||||
echo "Testing Package: ${file_arr[$i]}"
|
||||
if [ "$MATRIX_OS" == 'ubuntu-18.04' ]
|
||||
then
|
||||
go test -timeout 30m -race "${file_arr[$i]}"
|
||||
go test -timeout 45m -race "${file_arr[$i]}"
|
||||
else
|
||||
go test -timeout 30m "${file_arr[$i]}"
|
||||
go test -timeout 45m "${file_arr[$i]}"
|
||||
fi
|
||||
succeeded=$(echo "$?")
|
||||
if [ "$succeeded" -ne 0 ]; then
|
||||
|
||||
@@ -131,6 +131,22 @@ func (cmd DumpCmd) Exec(ctx context.Context, commandStr string, args []string, d
|
||||
return HandleVErrAndExitCode(vErr, usage)
|
||||
}
|
||||
|
||||
// Look for schemas and procedures table, and add to tblNames only for sql dumps
|
||||
if resFormat == emptyFileExt || resFormat == sqlFileExt {
|
||||
sysTblNames, err := doltdb.GetSystemTableNames(ctx, root)
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
for _, tblName := range sysTblNames {
|
||||
switch tblName {
|
||||
case doltdb.SchemasTableName:
|
||||
tblNames = append(tblNames, doltdb.SchemasTableName)
|
||||
case doltdb.ProceduresTableName:
|
||||
tblNames = append(tblNames, doltdb.ProceduresTableName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch resFormat {
|
||||
case emptyFileExt, sqlFileExt:
|
||||
if name == emptyStr {
|
||||
@@ -149,7 +165,6 @@ func (cmd DumpCmd) Exec(ctx context.Context, commandStr string, args []string, d
|
||||
|
||||
for _, tbl := range tblNames {
|
||||
tblOpts := newTableArgs(tbl, dumpOpts.dest, apr.Contains(batchFlag))
|
||||
|
||||
err = dumpTable(ctx, dEnv, tblOpts, fPath)
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(err, usage)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
@@ -36,13 +37,16 @@ import (
|
||||
|
||||
const (
|
||||
loginRetryInterval = 5
|
||||
authEndpointParam = "auth-endpoint"
|
||||
loginURLParam = "login-url"
|
||||
insecureParam = "insecure"
|
||||
)
|
||||
|
||||
var loginDocs = cli.CommandDocumentationContent{
|
||||
ShortDesc: "Login to DoltHub",
|
||||
LongDesc: `Login into DoltHub using the email in your config so you can pull from private repos and push to those you have permission to.
|
||||
ShortDesc: "Login to DoltHub or DoltLab",
|
||||
LongDesc: `Login into DoltHub or DoltLab using the email in your config so you can pull from private repos and push to those you have permission to.
|
||||
`,
|
||||
Synopsis: []string{"[{{.LessThan}}creds{{.GreaterThan}}]"},
|
||||
Synopsis: []string{"[--auth-endpoint <endpoint>] [--login-url <url>] [-i | --insecure] [{{.LessThan}}creds{{.GreaterThan}}]"},
|
||||
}
|
||||
|
||||
// The LoginCmd doesn't handle its own signals, but should stop cancel global context when receiving SIGINT signal
|
||||
@@ -78,7 +82,10 @@ func (cmd LoginCmd) CreateMarkdown(wr io.Writer, commandStr string) error {
|
||||
|
||||
func (cmd LoginCmd) ArgParser() *argparser.ArgParser {
|
||||
ap := argparser.NewArgParser()
|
||||
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"creds", "A specific credential to use for login."})
|
||||
ap.SupportsString(authEndpointParam, "e", "hostname:port", fmt.Sprintf("Specify the endpoint used to authenticate this client. Must be used with --%s OR set in the configuration file as `%s`", loginURLParam, env.AddCredsUrlKey))
|
||||
ap.SupportsString(loginURLParam, "url", "url", "Specify the login url where the browser will add credentials.")
|
||||
ap.SupportsFlag(insecureParam, "i", "If set, makes insecure connection to remote authentication server")
|
||||
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"creds", "A specific credential to use for login. If omitted, new credentials will be generated."})
|
||||
return ap
|
||||
}
|
||||
|
||||
@@ -93,11 +100,39 @@ func (cmd LoginCmd) Exec(ctx context.Context, commandStr string, args []string,
|
||||
help, usage := cli.HelpAndUsagePrinters(cli.GetCommandDocumentation(commandStr, loginDocs, ap))
|
||||
apr := cli.ParseArgsOrDie(ap, args, help)
|
||||
|
||||
// use config values over defaults, flag values over config values
|
||||
loginUrl := dEnv.Config.GetStringOrDefault(env.AddCredsUrlKey, env.DefaultLoginUrl)
|
||||
loginUrl = apr.GetValueOrDefault(loginURLParam, loginUrl)
|
||||
|
||||
authHost := dEnv.Config.GetStringOrDefault(env.RemotesApiHostKey, env.DefaultRemotesApiHost)
|
||||
authPort := dEnv.Config.GetStringOrDefault(env.RemotesApiHostPortKey, env.DefaultRemotesApiPort)
|
||||
|
||||
authEndpoint := apr.GetValueOrDefault(authEndpointParam, fmt.Sprintf("%s:%s", authHost, authPort))
|
||||
|
||||
// handle args supplied with empty strings
|
||||
if loginUrl == "" {
|
||||
loginUrl = env.DefaultLoginUrl
|
||||
}
|
||||
if authEndpoint == "" {
|
||||
authEndpoint = fmt.Sprintf("%s:%s", authHost, authPort)
|
||||
}
|
||||
|
||||
insecure := apr.Contains(insecureParam)
|
||||
|
||||
var err error
|
||||
if !insecure {
|
||||
insecureStr := dEnv.Config.GetStringOrDefault(env.DoltLabInsecureKey, "false")
|
||||
insecure, err = strconv.ParseBool(insecureStr)
|
||||
if err != nil {
|
||||
HandleVErrAndExitCode(errhand.BuildDError(fmt.Sprintf("The config value of '%s' is '%s' which is not a valid true/false value", env.DoltLabInsecureKey, insecureStr)).Build(), usage)
|
||||
}
|
||||
}
|
||||
|
||||
var verr errhand.VerboseError
|
||||
if apr.NArg() == 0 {
|
||||
verr = loginWithNewCreds(ctx, dEnv)
|
||||
verr = loginWithNewCreds(ctx, dEnv, authEndpoint, loginUrl, insecure)
|
||||
} else if apr.NArg() == 1 {
|
||||
verr = loginWithExistingCreds(ctx, dEnv, apr.Arg(0))
|
||||
verr = loginWithExistingCreds(ctx, dEnv, apr.Arg(0), authEndpoint, loginUrl, insecure)
|
||||
} else {
|
||||
verr = errhand.BuildDError("").SetPrintUsage().Build()
|
||||
}
|
||||
@@ -116,7 +151,7 @@ var openBrowserFirst loginBehavior = 1
|
||||
// with an account on the server. Check first before opening a browser.
|
||||
var checkCredentialsThenOpenBrowser loginBehavior = 2
|
||||
|
||||
func loginWithNewCreds(ctx context.Context, dEnv *env.DoltEnv) errhand.VerboseError {
|
||||
func loginWithNewCreds(ctx context.Context, dEnv *env.DoltEnv, authEndpoint, loginUrl string, insecure bool) errhand.VerboseError {
|
||||
path, dc, err := actions.NewCredsFile(dEnv)
|
||||
|
||||
if err != nil {
|
||||
@@ -128,10 +163,10 @@ func loginWithNewCreds(ctx context.Context, dEnv *env.DoltEnv) errhand.VerboseEr
|
||||
|
||||
cli.Println(path)
|
||||
|
||||
return loginWithCreds(ctx, dEnv, dc, openBrowserFirst)
|
||||
return loginWithCreds(ctx, dEnv, dc, openBrowserFirst, authEndpoint, loginUrl, insecure)
|
||||
}
|
||||
|
||||
func loginWithExistingCreds(ctx context.Context, dEnv *env.DoltEnv, idOrPubKey string) errhand.VerboseError {
|
||||
func loginWithExistingCreds(ctx context.Context, dEnv *env.DoltEnv, idOrPubKey, authEndpoint, credsEndpoint string, insecure bool) errhand.VerboseError {
|
||||
credsDir, err := dEnv.CredsDir()
|
||||
|
||||
if err != nil {
|
||||
@@ -150,11 +185,11 @@ func loginWithExistingCreds(ctx context.Context, dEnv *env.DoltEnv, idOrPubKey s
|
||||
return errhand.BuildDError("error: failed to load creds from file").AddCause(err).Build()
|
||||
}
|
||||
|
||||
return loginWithCreds(ctx, dEnv, dc, checkCredentialsThenOpenBrowser)
|
||||
return loginWithCreds(ctx, dEnv, dc, checkCredentialsThenOpenBrowser, authEndpoint, credsEndpoint, insecure)
|
||||
}
|
||||
|
||||
func loginWithCreds(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds, behavior loginBehavior) errhand.VerboseError {
|
||||
grpcClient, verr := getCredentialsClient(dEnv, dc)
|
||||
func loginWithCreds(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds, behavior loginBehavior, authEndpoint, loginUrl string, insecure bool) errhand.VerboseError {
|
||||
grpcClient, verr := getCredentialsClient(dEnv, dc, authEndpoint, insecure)
|
||||
if verr != nil {
|
||||
return verr
|
||||
}
|
||||
@@ -166,7 +201,7 @@ func loginWithCreds(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds,
|
||||
}
|
||||
|
||||
if whoAmI == nil {
|
||||
openBrowserForCredsAdd(dEnv, dc)
|
||||
openBrowserForCredsAdd(dc, loginUrl)
|
||||
cli.Println("Checking remote server looking for key association.")
|
||||
}
|
||||
|
||||
@@ -198,19 +233,17 @@ func loginWithCreds(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds,
|
||||
return nil
|
||||
}
|
||||
|
||||
func openBrowserForCredsAdd(dEnv *env.DoltEnv, dc creds.DoltCreds) {
|
||||
loginUrl := dEnv.Config.GetStringOrDefault(env.AddCredsUrlKey, env.DefaultLoginUrl)
|
||||
func openBrowserForCredsAdd(dc creds.DoltCreds, loginUrl string) {
|
||||
url := fmt.Sprintf("%s#%s", loginUrl, dc.PubKeyBase32Str())
|
||||
cli.Printf("Opening a browser to:\n\t%s\nPlease associate your key with your account.\n", url)
|
||||
open.Start(url)
|
||||
}
|
||||
|
||||
func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds) (remotesapi.CredentialsServiceClient, errhand.VerboseError) {
|
||||
host := dEnv.Config.GetStringOrDefault(env.RemotesApiHostKey, env.DefaultRemotesApiHost)
|
||||
port := dEnv.Config.GetStringOrDefault(env.RemotesApiHostPortKey, env.DefaultRemotesApiPort)
|
||||
func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds, authEndpoint string, insecure bool) (remotesapi.CredentialsServiceClient, errhand.VerboseError) {
|
||||
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
|
||||
Endpoint: fmt.Sprintf("%s:%s", host, port),
|
||||
Endpoint: authEndpoint,
|
||||
Creds: dc,
|
||||
Insecure: insecure,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errhand.BuildDError("error: unable to build dial options for connecting to server with credentials.").AddCause(err).Build()
|
||||
|
||||
@@ -19,14 +19,16 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/store/datas"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/cli"
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
|
||||
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/argparser"
|
||||
"github.com/dolthub/dolt/go/store/datas"
|
||||
)
|
||||
|
||||
var pullDocs = cli.CommandDocumentationContent{
|
||||
@@ -71,7 +73,6 @@ func (cmd PullCmd) EventType() eventsapi.ClientEventType {
|
||||
|
||||
// Exec executes the command
|
||||
func (cmd PullCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
|
||||
|
||||
ap := cli.CreatePullArgParser()
|
||||
help, usage := cli.HelpAndUsagePrinters(cli.GetCommandDocumentation(commandStr, pullDocs, ap))
|
||||
|
||||
@@ -102,16 +103,26 @@ func (cmd PullCmd) Exec(ctx context.Context, commandStr string, args []string, d
|
||||
// pullHelper splits pull into fetch, prepare merge, and merge to interleave printing
|
||||
func pullHelper(ctx context.Context, dEnv *env.DoltEnv, pullSpec *env.PullSpec) error {
|
||||
srcDB, err := pullSpec.Remote.GetRemoteDBWithoutCaching(ctx, dEnv.DoltDB.ValueReadWriter().Format())
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get remote db; %w", err)
|
||||
}
|
||||
for _, refSpec := range pullSpec.RefSpecs {
|
||||
remoteTrackRef := refSpec.DestRef(pullSpec.Branch)
|
||||
|
||||
if remoteTrackRef != nil {
|
||||
// Fetch all references
|
||||
branchRefs, err := srcDB.GetHeadRefs(ctx)
|
||||
if err != nil {
|
||||
return env.ErrFailedToReadDb
|
||||
}
|
||||
|
||||
srcDBCommit, err := actions.FetchRemoteBranch(ctx, dEnv.TempTableFilesDir(), pullSpec.Remote, srcDB, dEnv.DoltDB, pullSpec.Branch, buildProgStarter(downloadLanguage), stopProgFuncs)
|
||||
// Go through every reference and every branch in each reference
|
||||
for _, rs := range pullSpec.RefSpecs {
|
||||
rsSeen := false // track invalid refSpecs
|
||||
for _, branchRef := range branchRefs {
|
||||
remoteTrackRef := rs.DestRef(branchRef)
|
||||
if remoteTrackRef == nil {
|
||||
continue
|
||||
}
|
||||
rsSeen = true
|
||||
srcDBCommit, err := actions.FetchRemoteBranch(ctx, dEnv.TempTableFilesDir(), pullSpec.Remote, srcDB, dEnv.DoltDB, branchRef, buildProgStarter(downloadLanguage), stopProgFuncs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -172,15 +183,19 @@ func pullHelper(ctx context.Context, dEnv *env.DoltEnv, pullSpec *env.PullSpec)
|
||||
// TODO: We should add functionality to create a commit from a no-ff/normal merge operation instead of
|
||||
// leaving the branch in a merged state.
|
||||
}
|
||||
if !rsSeen {
|
||||
return fmt.Errorf("%w: '%s'", ref.ErrInvalidRefSpec, rs.GetRemRefToLocal())
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = actions.FetchFollowTags(ctx, dEnv.TempTableFilesDir(), srcDB, dEnv.DoltDB, buildProgStarter(downloadLanguage), stopProgFuncs)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "0.38.0"
|
||||
Version = "0.39.0"
|
||||
)
|
||||
|
||||
var dumpDocsCommand = &commands.DumpDocsCmd{}
|
||||
|
||||
@@ -55,7 +55,7 @@ require (
|
||||
github.com/uber/jaeger-lib v2.4.0+incompatible // indirect
|
||||
go.mongodb.org/mongo-driver v1.7.0 // indirect
|
||||
go.uber.org/zap v1.15.0
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20220111092808-5a964db01320
|
||||
@@ -68,7 +68,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220411234737-2819197dce90
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220419170920-1ba079276d56
|
||||
github.com/google/flatbuffers v2.0.5+incompatible
|
||||
github.com/gosuri/uilive v0.0.4
|
||||
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
|
||||
@@ -119,9 +119,9 @@ require (
|
||||
go.uber.org/atomic v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.5.0 // indirect
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect
|
||||
golang.org/x/tools v0.1.9 // indirect
|
||||
golang.org/x/tools v0.1.10 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210506142907-4a47615972c2 // indirect
|
||||
|
||||
15
go/go.sum
15
go/go.sum
@@ -170,8 +170,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
|
||||
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220411234737-2819197dce90 h1:iL+XIoZ7H4oZ8X82YeFIzj1mx9PMTKG9SghJe1LfY5U=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220411234737-2819197dce90/go.mod h1:+g6JxhIOOGTDfWK017iHjiSZG7denDB8sOypTWxcYNk=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220419170920-1ba079276d56 h1:sJFVps2aCpflTacgmqZ3XlFjpbUYJ/v3qKnB/lMgh78=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220419170920-1ba079276d56/go.mod h1:hsYBel2QX/95ePjfzFdwyvtgKPDF2S3uUC81kaO2hLc=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
|
||||
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
|
||||
@@ -818,8 +818,9 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -852,8 +853,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1075,8 +1076,8 @@ golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4X
|
||||
golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -216,6 +216,9 @@ func (t *Table) GetSchemaHash(ctx context.Context) (hash.Hash, error) {
|
||||
}
|
||||
|
||||
// UpdateSchema updates the table with the schema given and returns the updated table. The original table is unchanged.
|
||||
// This method only updates the schema of a table; the row data is unchanged. Schema alterations that require rebuilding
|
||||
// the table (e.g. adding a column in the middle, adding a new non-null column, adding a column in the middle of a
|
||||
// schema) must account for these changes separately.
|
||||
func (t *Table) UpdateSchema(ctx context.Context, sch schema.Schema) (*Table, error) {
|
||||
table, err := t.table.SetSchema(ctx, sch)
|
||||
if err != nil {
|
||||
|
||||
9
go/libraries/doltcore/env/actions/clone.go
vendored
9
go/libraries/doltcore/env/actions/clone.go
vendored
@@ -264,7 +264,14 @@ func CloneRemote(ctx context.Context, srcDB *doltdb.DoltDB, remoteName, branch s
|
||||
return err
|
||||
}
|
||||
|
||||
ws := doltdb.EmptyWorkingSet(wsRef)
|
||||
// Retrieve existing working set, delete if it exists
|
||||
ws, err := dEnv.DoltDB.ResolveWorkingSet(ctx, wsRef)
|
||||
if ws != nil {
|
||||
dEnv.DoltDB.DeleteWorkingSet(ctx, wsRef)
|
||||
}
|
||||
ws = doltdb.EmptyWorkingSet(wsRef)
|
||||
|
||||
// Update to use current Working and Staged root
|
||||
err = dEnv.UpdateWorkingSet(ctx, ws.WithWorkingRoot(rootVal).WithStagedRoot(rootVal))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
3
go/libraries/doltcore/env/config.go
vendored
3
go/libraries/doltcore/env/config.go
vendored
@@ -43,7 +43,8 @@ const (
|
||||
RemotesApiHostKey = "remotes.default_host"
|
||||
RemotesApiHostPortKey = "remotes.default_port"
|
||||
|
||||
AddCredsUrlKey = "creds.add_url"
|
||||
AddCredsUrlKey = "creds.add_url"
|
||||
DoltLabInsecureKey = "doltlab.insecure"
|
||||
|
||||
MetricsDisabled = "metrics.disabled"
|
||||
MetricsHost = "metrics.host"
|
||||
|
||||
@@ -131,6 +131,8 @@ func wrapConvertValueToNomsValue(
|
||||
vInt = float64(val)
|
||||
case types.InlineBlob:
|
||||
vInt = *(*string)(unsafe.Pointer(&val))
|
||||
case types.TupleRowStorage:
|
||||
vInt = *(*string)(unsafe.Pointer(&val))
|
||||
case types.Int:
|
||||
vInt = int64(val)
|
||||
case types.JSON:
|
||||
|
||||
@@ -85,10 +85,20 @@ func addColumnToTable(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tbl.UpdateSchema(ctx, newSchema)
|
||||
// if types.IsFormat_DOLT_1(t.nbf) {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
newTable, err := tbl.UpdateSchema(ctx, newSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newTable, nil
|
||||
}
|
||||
|
||||
// addColumnToSchema creates a new schema with a column as specified by the params.
|
||||
// TODO: make this a schema operation, not in this package
|
||||
func addColumnToSchema(
|
||||
sch schema.Schema,
|
||||
tag uint64,
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions/commitwalk"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dprocedures"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/globalstate"
|
||||
@@ -162,6 +163,7 @@ var _ sql.TemporaryTableCreator = Database{}
|
||||
var _ sql.TableRenamer = Database{}
|
||||
var _ sql.TriggerDatabase = Database{}
|
||||
var _ sql.StoredProcedureDatabase = Database{}
|
||||
var _ sql.ExternalStoredProcedureDatabase = Database{}
|
||||
var _ sql.TransactionDatabase = Database{}
|
||||
var _ globalstate.StateProvider = Database{}
|
||||
|
||||
@@ -976,6 +978,11 @@ func (db Database) DropStoredProcedure(ctx *sql.Context, name string) error {
|
||||
return DoltProceduresDropProcedure(ctx, db, name)
|
||||
}
|
||||
|
||||
// GetExternalStoredProcedures implements sql.ExternalStoredProcedureDatabase.
|
||||
func (db Database) GetExternalStoredProcedures(ctx *sql.Context) ([]sql.ExternalStoredProcedureDetails, error) {
|
||||
return dprocedures.DoltProcedures, nil
|
||||
}
|
||||
|
||||
func (db Database) addFragToSchemasTable(ctx *sql.Context, fragType, name, definition string, created time.Time, existingErr error) (retErr error) {
|
||||
tbl, err := GetOrCreateDoltSchemasTable(ctx, db)
|
||||
if err != nil {
|
||||
|
||||
@@ -23,18 +23,24 @@ import (
|
||||
|
||||
const CommitFuncName = "commit"
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type CommitFunc struct {
|
||||
children []sql.Expression
|
||||
}
|
||||
|
||||
// NewCommitFunc creates a new CommitFunc expression.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewCommitFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &CommitFunc{children: args}, nil
|
||||
}
|
||||
|
||||
// Eval implements the Expression interface.
|
||||
func (cf *CommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
return doDoltCommit(ctx, row, cf.Children())
|
||||
args, err := getDoltArgs(ctx, row, cf.Children())
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
return DoDoltCommit(ctx, args)
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
|
||||
@@ -27,25 +27,27 @@ import (
|
||||
|
||||
const DoltAddFuncName = "dolt_add"
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltAddFunc struct {
|
||||
children []sql.Expression
|
||||
}
|
||||
|
||||
func (d DoltAddFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
return DoDoltAdd(ctx, args)
|
||||
}
|
||||
|
||||
func DoDoltAdd(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
return 1, fmt.Errorf("Empty database name.")
|
||||
}
|
||||
|
||||
ap := cli.CreateAddArgParser()
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreateAddArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
@@ -68,7 +70,7 @@ func (d DoltAddFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
|
||||
err = dSess.SetRoots(ctx, dbName, roots)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
} else {
|
||||
roots, err = actions.StageTablesNoDocs(ctx, roots, apr.Args)
|
||||
@@ -78,7 +80,7 @@ func (d DoltAddFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
|
||||
err = dSess.SetRoots(ctx, dbName, roots)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,6 +128,7 @@ func (d DoltAddFunc) WithChildren(children ...sql.Expression) (sql.Expression, e
|
||||
}
|
||||
|
||||
// NewDoltAddFunc creates a new DoltAddFunc expression whose children represents the args passed in DOLT_ADD.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewDoltAddFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltAddFunc{children: args}, nil
|
||||
}
|
||||
|
||||
@@ -35,10 +35,12 @@ const DoltBranchFuncName = "dolt_branch"
|
||||
var EmptyBranchNameErr = errors.New("error: cannot branch empty string")
|
||||
var InvalidArgErr = errors.New("error: invalid usage")
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltBranchFunc struct {
|
||||
expression.NaryExpression
|
||||
}
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewDoltBranchFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltBranchFunc{expression.NaryExpression{ChildExpressions: args}}, nil
|
||||
}
|
||||
@@ -62,20 +64,21 @@ func (d DoltBranchFunc) WithChildren(children ...sql.Expression) (sql.Expression
|
||||
}
|
||||
|
||||
func (d DoltBranchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
return DoDoltBranch(ctx, args)
|
||||
}
|
||||
|
||||
func DoDoltBranch(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
return 1, fmt.Errorf("Empty database name.")
|
||||
}
|
||||
|
||||
ap := cli.CreateBranchArgParser()
|
||||
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreateBranchArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
@@ -34,25 +34,27 @@ const DoltCheckoutFuncName = "dolt_checkout"
|
||||
|
||||
var ErrEmptyBranchName = errors.New("error: cannot checkout empty string")
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltCheckoutFunc struct {
|
||||
expression.NaryExpression
|
||||
}
|
||||
|
||||
func (d DoltCheckoutFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
return DoDoltCheckout(ctx, args)
|
||||
}
|
||||
|
||||
func DoDoltCheckout(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
return 1, fmt.Errorf("Empty database name.")
|
||||
}
|
||||
|
||||
ap := cli.CreateCheckoutArgParser()
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreateCheckoutArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
@@ -192,6 +194,7 @@ func (d DoltCheckoutFunc) WithChildren(children ...sql.Expression) (sql.Expressi
|
||||
return NewDoltCheckoutFunc(children...)
|
||||
}
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewDoltCheckoutFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltCheckoutFunc{expression.NaryExpression{ChildExpressions: args}}, nil
|
||||
}
|
||||
|
||||
@@ -31,45 +31,44 @@ const DoltCommitFuncName = "dolt_commit"
|
||||
var hashType = sql.MustCreateString(query.Type_TEXT, 32, sql.Collation_ascii_bin)
|
||||
|
||||
// DoltCommitFunc runs a `dolt commit` in the SQL context, committing staged changes to head.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltCommitFunc struct {
|
||||
children []sql.Expression
|
||||
}
|
||||
|
||||
// NewDoltCommitFunc creates a new DoltCommitFunc expression whose children represents the args passed in DOLT_COMMIT.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewDoltCommitFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltCommitFunc{children: args}, nil
|
||||
}
|
||||
|
||||
func (d DoltCommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
return doDoltCommit(ctx, row, d.Children())
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
return DoDoltCommit(ctx, args)
|
||||
}
|
||||
|
||||
func doDoltCommit(ctx *sql.Context, row sql.Row, exprs []sql.Expression) (interface{}, error) {
|
||||
func DoDoltCommit(ctx *sql.Context, args []string) (string, error) {
|
||||
// Get the information for the sql context.
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
ap := cli.CreateCommitArgParser()
|
||||
|
||||
// Get the args for DOLT_COMMIT.
|
||||
args, err := getDoltArgs(ctx, row, exprs)
|
||||
apr, err := cli.CreateCommitArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
dSess := dsess.DSessFromSess(ctx.Session)
|
||||
roots, ok := dSess.GetRoots(ctx, dbName)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Could not load database %s", dbName)
|
||||
return "", fmt.Errorf("Could not load database %s", dbName)
|
||||
}
|
||||
|
||||
if apr.Contains(cli.AllFlag) {
|
||||
roots, err = actions.StageAllTablesNoDocs(ctx, roots)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(err.Error())
|
||||
return "", fmt.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +76,7 @@ func doDoltCommit(ctx *sql.Context, row sql.Row, exprs []sql.Expression) (interf
|
||||
if authorStr, ok := apr.GetValue(cli.AuthorParam); ok {
|
||||
name, email, err = cli.ParseAuthor(authorStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
name = dSess.Username()
|
||||
@@ -86,7 +85,7 @@ func doDoltCommit(ctx *sql.Context, row sql.Row, exprs []sql.Expression) (interf
|
||||
|
||||
msg, msgOk := apr.GetValue(cli.CommitMessageArg)
|
||||
if !msgOk {
|
||||
return nil, fmt.Errorf("Must provide commit message.")
|
||||
return "", fmt.Errorf("Must provide commit message.")
|
||||
}
|
||||
|
||||
t := ctx.QueryTime()
|
||||
@@ -95,7 +94,7 @@ func doDoltCommit(ctx *sql.Context, row sql.Row, exprs []sql.Expression) (interf
|
||||
t, err = cli.ParseDate(commitTimeStr)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(err.Error())
|
||||
return "", fmt.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,22 +107,22 @@ func doDoltCommit(ctx *sql.Context, row sql.Row, exprs []sql.Expression) (interf
|
||||
Email: email,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Nothing to commit, and we didn't pass --allowEmpty
|
||||
if pendingCommit == nil {
|
||||
return nil, errors.New("nothing to commit")
|
||||
return "", errors.New("nothing to commit")
|
||||
}
|
||||
|
||||
newCommit, err := dSess.DoltCommit(ctx, dbName, dSess.GetTransaction(), pendingCommit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
h, err := newCommit.HashOf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
return h.String(), nil
|
||||
|
||||
@@ -35,11 +35,13 @@ const (
|
||||
cmdSuccess = 1
|
||||
)
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltFetchFunc struct {
|
||||
expression.NaryExpression
|
||||
}
|
||||
|
||||
// NewFetchFunc creates a new FetchFunc expression.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewFetchFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltFetchFunc{expression.NaryExpression{ChildExpressions: args}}, nil
|
||||
}
|
||||
@@ -63,6 +65,14 @@ func (d DoltFetchFunc) WithChildren(children ...sql.Expression) (sql.Expression,
|
||||
}
|
||||
|
||||
func (d DoltFetchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return cmdFailure, err
|
||||
}
|
||||
return DoDoltFetch(ctx, args)
|
||||
}
|
||||
|
||||
func DoDoltFetch(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
@@ -75,13 +85,7 @@ func (d DoltFetchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
|
||||
return cmdFailure, fmt.Errorf("Could not load database %s", dbName)
|
||||
}
|
||||
|
||||
ap := cli.CreateFetchArgParser()
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return cmdFailure, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreateFetchArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return cmdFailure, err
|
||||
}
|
||||
|
||||
@@ -32,12 +32,14 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/utils/argparser"
|
||||
)
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewDoltMergeFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltMergeFunc{expression.NaryExpression{ChildExpressions: args}}, nil
|
||||
}
|
||||
|
||||
const DoltMergeFuncName = "dolt_merge"
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltMergeFunc struct {
|
||||
expression.NaryExpression
|
||||
}
|
||||
@@ -50,10 +52,14 @@ const (
|
||||
)
|
||||
|
||||
func (d DoltMergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
return doDoltMerge(ctx, row, d.Children())
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
return DoDoltMerge(ctx, args)
|
||||
}
|
||||
|
||||
func doDoltMerge(ctx *sql.Context, row sql.Row, exprs []sql.Expression) (interface{}, error) {
|
||||
func DoDoltMerge(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
@@ -62,14 +68,7 @@ func doDoltMerge(ctx *sql.Context, row sql.Row, exprs []sql.Expression) (interfa
|
||||
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
|
||||
ap := cli.CreateMergeArgParser()
|
||||
args, err := getDoltArgs(ctx, row, exprs)
|
||||
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreateMergeArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
|
||||
@@ -34,11 +34,13 @@ import (
|
||||
|
||||
const DoltPullFuncName = "dolt_pull"
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltPullFunc struct {
|
||||
expression.NaryExpression
|
||||
}
|
||||
|
||||
// NewPullFunc creates a new PullFunc expression.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewPullFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltPullFunc{expression.NaryExpression{ChildExpressions: args}}, nil
|
||||
}
|
||||
@@ -62,6 +64,14 @@ func (d DoltPullFunc) WithChildren(children ...sql.Expression) (sql.Expression,
|
||||
}
|
||||
|
||||
func (d DoltPullFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
return DoDoltPull(ctx, args)
|
||||
}
|
||||
|
||||
func DoDoltPull(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
@@ -74,10 +84,7 @@ func (d DoltPullFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
return noConflicts, sql.ErrDatabaseNotFound.New(dbName)
|
||||
}
|
||||
|
||||
ap := cli.CreatePullArgParser()
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreatePullArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
@@ -106,7 +113,7 @@ func (d DoltPullFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
return noConflicts, err
|
||||
}
|
||||
|
||||
var conflicts interface{}
|
||||
var conflicts int
|
||||
for _, refSpec := range pullSpec.RefSpecs {
|
||||
remoteTrackRef := refSpec.DestRef(pullSpec.Branch)
|
||||
|
||||
|
||||
@@ -31,11 +31,13 @@ import (
|
||||
|
||||
const DoltPushFuncName = "dolt_push"
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltPushFunc struct {
|
||||
expression.NaryExpression
|
||||
}
|
||||
|
||||
// NewPushFunc creates a new PushFunc expression.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewPushFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &DoltPushFunc{expression.NaryExpression{ChildExpressions: args}}, nil
|
||||
}
|
||||
@@ -59,6 +61,14 @@ func (d DoltPushFunc) WithChildren(children ...sql.Expression) (sql.Expression,
|
||||
}
|
||||
|
||||
func (d DoltPushFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return cmdFailure, err
|
||||
}
|
||||
return DoDoltPush(ctx, args)
|
||||
}
|
||||
|
||||
func DoDoltPush(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
@@ -72,13 +82,7 @@ func (d DoltPushFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
return cmdFailure, fmt.Errorf("could not load database %s", dbName)
|
||||
}
|
||||
|
||||
ap := cli.CreatePushArgParser()
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return cmdFailure, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreatePushArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return cmdFailure, err
|
||||
}
|
||||
|
||||
@@ -28,11 +28,20 @@ import (
|
||||
|
||||
const DoltResetFuncName = "dolt_reset"
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type DoltResetFunc struct {
|
||||
children []sql.Expression
|
||||
}
|
||||
|
||||
func (d DoltResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
return DoDoltReset(ctx, args)
|
||||
}
|
||||
|
||||
func DoDoltReset(ctx *sql.Context, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
|
||||
if len(dbName) == 0 {
|
||||
@@ -46,14 +55,7 @@ func (d DoltResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
|
||||
return 1, fmt.Errorf("Could not load database %s", dbName)
|
||||
}
|
||||
|
||||
ap := cli.CreateResetArgParser()
|
||||
args, err := getDoltArgs(ctx, row, d.Children())
|
||||
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
apr, err := cli.CreateResetArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
@@ -93,7 +95,7 @@ func (d DoltResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
|
||||
|
||||
ws, err := dSess.WorkingSet(ctx, dbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
err = dSess.SetWorkingSet(ctx, dbName, ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged))
|
||||
if err != nil {
|
||||
@@ -154,6 +156,7 @@ func (d DoltResetFunc) WithChildren(children ...sql.Expression) (sql.Expression,
|
||||
return NewDoltResetFunc(children...)
|
||||
}
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewDoltResetFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return DoltResetFunc{children: args}, nil
|
||||
}
|
||||
|
||||
@@ -26,38 +26,43 @@ const MergeFuncName = "merge"
|
||||
|
||||
var ErrUncommittedChanges = goerrors.NewKind("cannot merge with uncommitted changes")
|
||||
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type MergeFunc struct {
|
||||
children []sql.Expression
|
||||
}
|
||||
|
||||
// NewMergeFunc creates a new MergeFunc expression.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewMergeFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &MergeFunc{children: args}, nil
|
||||
}
|
||||
|
||||
// Eval implements the Expression interface.
|
||||
// todo(andy): merge with DOLT_MERGE()
|
||||
func (cf *MergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
return doDoltMerge(ctx, row, cf.Children())
|
||||
func (mf *MergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, mf.Children())
|
||||
if err != nil {
|
||||
return noConflicts, err
|
||||
}
|
||||
return DoDoltMerge(ctx, args)
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (cf *MergeFunc) String() string {
|
||||
childrenStrings := make([]string, len(cf.children))
|
||||
func (mf *MergeFunc) String() string {
|
||||
childrenStrings := make([]string, len(mf.children))
|
||||
|
||||
for i, child := range cf.children {
|
||||
for i, child := range mf.children {
|
||||
childrenStrings[i] = child.String()
|
||||
}
|
||||
return fmt.Sprintf("Merge(%s)", strings.Join(childrenStrings, ","))
|
||||
}
|
||||
|
||||
// IsNullable implements the Expression interface.
|
||||
func (cf *MergeFunc) IsNullable() bool {
|
||||
func (mf *MergeFunc) IsNullable() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (cf *MergeFunc) Resolved() bool {
|
||||
for _, child := range cf.Children() {
|
||||
func (mf *MergeFunc) Resolved() bool {
|
||||
for _, child := range mf.Children() {
|
||||
if !child.Resolved() {
|
||||
return false
|
||||
}
|
||||
@@ -65,16 +70,16 @@ func (cf *MergeFunc) Resolved() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (cf *MergeFunc) Children() []sql.Expression {
|
||||
return cf.children
|
||||
func (mf *MergeFunc) Children() []sql.Expression {
|
||||
return mf.children
|
||||
}
|
||||
|
||||
// WithChildren implements the Expression interface.
|
||||
func (cf *MergeFunc) WithChildren(children ...sql.Expression) (sql.Expression, error) {
|
||||
func (mf *MergeFunc) WithChildren(children ...sql.Expression) (sql.Expression, error) {
|
||||
return NewMergeFunc(children...)
|
||||
}
|
||||
|
||||
// Type implements the Expression interface.
|
||||
func (cf *MergeFunc) Type() sql.Type {
|
||||
func (mf *MergeFunc) Type() sql.Type {
|
||||
return sql.Text
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ const (
|
||||
)
|
||||
|
||||
// RevertFunc represents the dolt function "dolt revert".
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type RevertFunc struct {
|
||||
expression.NaryExpression
|
||||
}
|
||||
@@ -39,90 +40,94 @@ type RevertFunc struct {
|
||||
var _ sql.Expression = (*RevertFunc)(nil)
|
||||
|
||||
// NewRevertFunc creates a new RevertFunc expression that reverts commits.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewRevertFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &RevertFunc{expression.NaryExpression{ChildExpressions: args}}, nil
|
||||
}
|
||||
|
||||
// Eval implements the Expression interface.
|
||||
func (r *RevertFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
args, err := getDoltArgs(ctx, row, r.ChildExpressions)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
return DoDoltRevert(ctx, row, args)
|
||||
}
|
||||
|
||||
func DoDoltRevert(ctx *sql.Context, row sql.Row, args []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
dSess := dsess.DSessFromSess(ctx.Session)
|
||||
ddb, ok := dSess.GetDoltDB(ctx, dbName)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("dolt database could not be found")
|
||||
return 1, fmt.Errorf("dolt database could not be found")
|
||||
}
|
||||
workingSet, err := dSess.WorkingSet(ctx, dbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
workingRoot := workingSet.WorkingRoot()
|
||||
headCommit, err := dSess.GetHeadCommit(ctx, dbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
headRoot, err := headCommit.GetRootValue(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
headHash, err := headRoot.HashOf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
workingHash, err := workingRoot.HashOf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
if !headHash.Equal(workingHash) {
|
||||
return nil, fmt.Errorf("you must commit any changes before using revert")
|
||||
return 1, fmt.Errorf("you must commit any changes before using revert")
|
||||
}
|
||||
|
||||
headRef, err := dSess.CWBHeadRef(ctx, dbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
args, err := getDoltArgs(ctx, row, r.ChildExpressions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
|
||||
apr, err := cli.CreateRevertArgParser().Parse(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
|
||||
commits := make([]*doltdb.Commit, apr.NArg())
|
||||
for i, revisionStr := range apr.Args {
|
||||
commitSpec, err := doltdb.NewCommitSpec(revisionStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
commit, err := ddb.Resolve(ctx, commitSpec, headRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
commits[i] = commit
|
||||
}
|
||||
|
||||
dbState, ok, err := dSess.LookupDbState(ctx, dbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
} else if !ok {
|
||||
return nil, fmt.Errorf("Could not load database %s", dbName)
|
||||
return 1, fmt.Errorf("Could not load database %s", dbName)
|
||||
}
|
||||
|
||||
workingRoot, revertMessage, err := merge.Revert(ctx, ddb, workingRoot, commits, dbState.EditOpts())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
workingHash, err = workingRoot.HashOf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
if !headHash.Equal(workingHash) {
|
||||
err = dSess.SetRoot(ctx, dbName, workingRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
stringType := typeinfo.StringDefaultType.ToSqlType()
|
||||
|
||||
@@ -135,11 +140,11 @@ func (r *RevertFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
|
||||
commitFunc, err := NewDoltCommitFunc(expressions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
_, err = commitFunc.Eval(ctx, row)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
|
||||
@@ -33,6 +33,7 @@ const (
|
||||
)
|
||||
|
||||
// ConstraintsVerifyFunc represents the sql functions "verify_constraints" and "verify_constraints_all".
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
type ConstraintsVerifyFunc struct {
|
||||
expression.NaryExpression
|
||||
isAll bool
|
||||
@@ -41,71 +42,81 @@ type ConstraintsVerifyFunc struct {
|
||||
var _ sql.Expression = (*ConstraintsVerifyFunc)(nil)
|
||||
|
||||
// NewConstraintsVerifyFunc creates a new ConstraintsVerifyFunc expression that verifies the diff.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewConstraintsVerifyFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &ConstraintsVerifyFunc{expression.NaryExpression{ChildExpressions: args}, false}, nil
|
||||
}
|
||||
|
||||
// NewConstraintsVerifyAllFunc creates a new ConstraintsVerifyFunc expression that verifies all rows.
|
||||
// Deprecated: please use the version in the dprocedures package
|
||||
func NewConstraintsVerifyAllFunc(args ...sql.Expression) (sql.Expression, error) {
|
||||
return &ConstraintsVerifyFunc{expression.NaryExpression{ChildExpressions: args}, true}, nil
|
||||
}
|
||||
|
||||
// Eval implements the Expression interface.
|
||||
func (vc *ConstraintsVerifyFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
|
||||
vals := make([]string, len(vc.ChildExpressions))
|
||||
for i, expr := range vc.ChildExpressions {
|
||||
evaluatedVal, err := expr.Eval(ctx, row)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
val, ok := evaluatedVal.(string)
|
||||
if !ok {
|
||||
return 1, sql.ErrUnexpectedType.New(i, reflect.TypeOf(evaluatedVal))
|
||||
}
|
||||
vals[i] = val
|
||||
}
|
||||
return DoDoltConstraintsVerify(ctx, vc.isAll, vals)
|
||||
}
|
||||
|
||||
func DoDoltConstraintsVerify(ctx *sql.Context, isAll bool, vals []string) (int, error) {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
dSess := dsess.DSessFromSess(ctx.Session)
|
||||
workingSet, err := dSess.WorkingSet(ctx, dbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
workingRoot := workingSet.WorkingRoot()
|
||||
var comparingRoot *doltdb.RootValue
|
||||
if vc.isAll {
|
||||
if isAll {
|
||||
comparingRoot, err = doltdb.EmptyRootValue(ctx, workingRoot.VRW())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
} else {
|
||||
headCommit, err := dSess.GetHeadCommit(ctx, dbName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
comparingRoot, err = headCommit.GetRootValue(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
}
|
||||
|
||||
tableSet := set.NewStrSet(nil)
|
||||
for i, expr := range vc.ChildExpressions {
|
||||
evaluatedVal, err := expr.Eval(ctx, row)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val, ok := evaluatedVal.(string)
|
||||
if !ok {
|
||||
return nil, sql.ErrUnexpectedType.New(i, reflect.TypeOf(evaluatedVal))
|
||||
}
|
||||
for _, val := range vals {
|
||||
_, tableName, ok, err := workingRoot.GetTableInsensitive(ctx, val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, sql.ErrTableNotFound.New(tableName)
|
||||
return 1, sql.ErrTableNotFound.New(tableName)
|
||||
}
|
||||
tableSet.Add(tableName)
|
||||
}
|
||||
|
||||
newRoot, tablesWithViolations, err := merge.AddConstraintViolations(ctx, workingRoot, comparingRoot, tableSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
if tablesWithViolations.Size() == 0 {
|
||||
return 1, nil
|
||||
} else {
|
||||
err = dSess.SetRoot(ctx, dbName, newRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 1, err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_add.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_add.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
)
|
||||
|
||||
// dolt_add is the stored procedure version of the function `dolt_add`.
|
||||
func dolt_add(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltAdd(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_branch.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_branch.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_branch is the stored procedure version of the function `dolt_branch`.
|
||||
func dolt_branch(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltBranch(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_checkout is the stored procedure version of the function `dolt_checkout`.
|
||||
func dolt_checkout(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltCheckout(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_commit.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_commit.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
)
|
||||
|
||||
// dolt_commit is the stored procedure version of the functions `commit` and `dolt_commit`.
|
||||
func dolt_commit(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltCommit(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(res), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_fetch.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_fetch.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_fetch is the stored procedure version of the function `dolt_fetch`.
|
||||
func dolt_fetch(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltFetch(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_merge.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_merge.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_merge is the stored procedure version of the functions `merge` and `dolt_merge`.
|
||||
func dolt_merge(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltMerge(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_pull.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_pull.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_pull is the stored procedure version of the function `dolt_pull`.
|
||||
func dolt_pull(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltPull(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_push.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_push.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_push is the stored procedure version of the function `dolt_push`.
|
||||
func dolt_push(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltPush(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_reset.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_reset.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_reset is the stored procedure version of the function `dolt_reset`.
|
||||
func dolt_reset(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltReset(ctx, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
30
go/libraries/doltcore/sqle/dprocedures/dolt_revert.go
Normal file
30
go/libraries/doltcore/sqle/dprocedures/dolt_revert.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_revert is the stored procedure version of the function `revert`.
|
||||
func dolt_revert(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltRevert(ctx, nil, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
)
|
||||
|
||||
// dolt_verify_constraints is the stored procedure version of the function `constraints_verify`.
|
||||
func dolt_verify_constraints(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltConstraintsVerify(ctx, false, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
|
||||
// dolt_verify_all_constraints is the stored procedure version of the function `constraints_verify_all`.
|
||||
func dolt_verify_all_constraints(ctx *sql.Context, args ...string) (sql.RowIter, error) {
|
||||
res, err := dfunctions.DoDoltConstraintsVerify(ctx, true, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rowToIter(int64(res)), nil
|
||||
}
|
||||
67
go/libraries/doltcore/sqle/dprocedures/init.go
Normal file
67
go/libraries/doltcore/sqle/dprocedures/init.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dprocedures
|
||||
|
||||
import "github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
var DoltProcedures = []sql.ExternalStoredProcedureDetails{
|
||||
{Name: "dolt_add", Schema: int64Schema("failed"), Function: dolt_add},
|
||||
{Name: "dolt_branch", Schema: int64Schema("failed"), Function: dolt_branch},
|
||||
{Name: "dolt_checkout", Schema: int64Schema("failed"), Function: dolt_checkout},
|
||||
{Name: "dolt_commit", Schema: stringSchema("hash"), Function: dolt_commit},
|
||||
{Name: "dolt_fetch", Schema: int64Schema("success"), Function: dolt_fetch},
|
||||
{Name: "dolt_merge", Schema: int64Schema("no_conflicts"), Function: dolt_merge},
|
||||
{Name: "dolt_pull", Schema: int64Schema("no_conflicts"), Function: dolt_pull},
|
||||
{Name: "dolt_push", Schema: int64Schema("success"), Function: dolt_push},
|
||||
{Name: "dolt_reset", Schema: int64Schema("failed"), Function: dolt_reset},
|
||||
{Name: "dolt_revert", Schema: int64Schema("failed"), Function: dolt_revert},
|
||||
{Name: "dolt_verify_constraints", Schema: int64Schema("no_violations"), Function: dolt_verify_constraints},
|
||||
{Name: "dolt_verify_all_constraints", Schema: int64Schema("no_violations"), Function: dolt_verify_all_constraints},
|
||||
}
|
||||
|
||||
// stringSchema returns a non-nullable schema with all columns as LONGTEXT.
|
||||
func stringSchema(columnNames ...string) sql.Schema {
|
||||
sch := make(sql.Schema, len(columnNames))
|
||||
for i, colName := range columnNames {
|
||||
sch[i] = &sql.Column{
|
||||
Name: colName,
|
||||
Type: sql.LongText,
|
||||
Nullable: false,
|
||||
}
|
||||
}
|
||||
return sch
|
||||
}
|
||||
|
||||
// int64Schema returns a non-nullable schema with all columns as BIGINT.
|
||||
func int64Schema(columnNames ...string) sql.Schema {
|
||||
sch := make(sql.Schema, len(columnNames))
|
||||
for i, colName := range columnNames {
|
||||
sch[i] = &sql.Column{
|
||||
Name: colName,
|
||||
Type: sql.Int64,
|
||||
Nullable: false,
|
||||
}
|
||||
}
|
||||
return sch
|
||||
}
|
||||
|
||||
// rowToIter returns a sql.RowIter with a single row containing the values passed in.
|
||||
func rowToIter(vals ...interface{}) sql.RowIter {
|
||||
row := make(sql.Row, len(vals))
|
||||
for i, val := range vals {
|
||||
row[i] = val
|
||||
}
|
||||
return sql.RowsToRowIter(row)
|
||||
}
|
||||
@@ -16,10 +16,12 @@ package enginetest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/enginetest"
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/plan"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -58,7 +60,7 @@ func TestSingleQuery(t *testing.T) {
|
||||
//engine.Analyzer.Debug = true
|
||||
//engine.Analyzer.Verbose = true
|
||||
|
||||
enginetest.TestQuery(t, harness, engine, test.Query, test.Expected, test.ExpectedColumns, test.Bindings)
|
||||
enginetest.TestQuery(t, harness, engine, test.Query, test.Expected, test.ExpectedColumns)
|
||||
}
|
||||
|
||||
// Convenience test for debugging a single query. Unskip and set to the desired query.
|
||||
@@ -69,42 +71,25 @@ func TestSingleScript(t *testing.T) {
|
||||
{
|
||||
Name: "Two column index",
|
||||
SetUpScript: []string{
|
||||
`CREATE TABLE test (pk BIGINT PRIMARY KEY, v1 BIGINT, v2 BIGINT, INDEX (v1, v2));`,
|
||||
`INSERT INTO test VALUES (0,0,48),(1,0,52),(2,2,4),(3,2,10),(4,3,35),(5,5,36),(6,5,60),(7,6,1),(8,6,51),
|
||||
(9,6,60),(10,6,73),(11,9,44),(12,9,97),(13,13,44),(14,14,53),(15,14,57),(16,14,98),(17,16,19),(18,16,53),(19,16,95),
|
||||
(20,18,31),(21,19,48),(22,19,75),(23,19,97),(24,24,60),(25,25,14),(26,25,31),(27,27,9),(28,27,24),(29,28,24),(30,28,83),
|
||||
(31,31,14),(32,33,39),(33,34,22),(34,34,91),(35,35,89),(36,38,20),(37,38,66),(38,39,55),(39,39,86),(40,40,97),(41,42,0),
|
||||
(42,42,82),(43,43,63),(44,44,48),(45,44,67),(46,45,22),(47,45,31),(48,45,63),(49,45,86),(50,46,46),(51,47,5),(52,48,22),
|
||||
(53,49,0),(54,50,0),(55,50,14),(56,51,35),(57,54,38),(58,56,0),(59,56,60),(60,57,29),(61,57,49),(62,58,12),(63,58,32),
|
||||
(64,59,29),(65,59,45),(66,59,54),(67,60,66),(68,61,3),(69,61,34),(70,63,19),(71,63,69),(72,65,80),(73,65,97),(74,67,95),
|
||||
(75,68,11),(76,69,34),(77,72,52),(78,74,81),(79,76,39),(80,78,0),(81,78,90),(82,79,36),(83,80,61),(84,80,88),(85,81,4),
|
||||
(86,82,16),(87,83,30),(88,83,74),(89,84,9),(90,84,45),(91,86,56),(92,86,88),(93,87,51),(94,89,3),(95,93,19),(96,93,21),
|
||||
(97,93,96),(98,98,0),(99,98,51),(100,98,61);`,
|
||||
`CREATE TABLE a (x int primary key, y int)`,
|
||||
`CREATE TABLE b (x int primary key, y int)`,
|
||||
`insert into a values (0,0), (1,1)`,
|
||||
`insert into b values (0,0), (1,1)`,
|
||||
},
|
||||
Assertions: []enginetest.ScriptTestAssertion{
|
||||
{
|
||||
Query: "SELECT * FROM test WHERE (((v1<20 AND v2<=46) OR (v1<>4 AND v2=26)) OR (v1>36 AND v2<>13));",
|
||||
Expected: []sql.Row{
|
||||
{58, 56, 0}, {61, 57, 49}, {72, 65, 80}, {85, 81, 4}, {3, 2, 10}, {49, 45, 86}, {5, 5, 36}, {50, 46, 46}, {62, 58, 12}, {92, 86, 88}, {47, 45, 31}, {54, 50, 0}, {55, 50, 14}, {87, 83, 30}, {91, 86, 56}, {66, 59, 54}, {76, 69, 34}, {79, 76, 39}, {46, 45, 22}, {57, 54, 38}, {68, 61, 3}, {93, 87, 51}, {4, 3, 35}, {7, 6, 1}, {45, 44, 67}, {52, 48, 22}, {2, 2, 4}, {53, 49, 0}, {69, 61, 34}, {73, 65, 97}, {90, 84, 45}, {82, 79, 36}, {11, 9, 44}, {20, 18, 31}, {41, 42, 0}, {43, 43, 63}, {65, 59, 45}, {100, 98, 61}, {95, 93, 19}, {13, 13, 44}, {56, 51, 35}, {59, 56, 60}, {67, 60, 66}, {77, 72, 52}, {89, 84, 9}, {63, 58, 32}, {83, 80, 61}, {39, 39, 86}, {17, 16, 19}, {38, 39, 55}, {40, 40, 97}, {74, 67, 95}, {78, 74, 81}, {81, 78, 90}, {88, 83, 74}, {37, 38, 66}, {48, 45, 63}, {51, 47, 5}, {64, 59, 29}, {80, 78, 0}, {86, 82, 16}, {96, 93, 21}, {98, 98, 0}, {75, 68, 11}, {84, 80, 88}, {99, 98, 51}, {44, 44, 48}, {60, 57, 29}, {70, 63, 19}, {71, 63, 69}, {36, 38, 20}, {42, 42, 82}, {94, 89, 3}, {97, 93, 96},
|
||||
},
|
||||
Query: `UPDATE a INNER JOIN b on a.x = b.x SET a.y = b.y + 1`,
|
||||
Expected: []sql.Row{{sql.OkResult{
|
||||
RowsAffected: uint64(2),
|
||||
Info: plan.UpdateInfo{Matched: 1, Updated: 2},
|
||||
}}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * FROM test WHERE (((v1<=52 AND v2<40) AND (v1<30) OR (v1<=75 AND v2 BETWEEN 54 AND 54)) OR (v1<>31 AND v2<>56));",
|
||||
Expected: []sql.Row{
|
||||
{19, 16, 95}, {58, 56, 0}, {61, 57, 49}, {72, 65, 80}, {85, 81, 4}, {3, 2, 10}, {49, 45, 86}, {5, 5, 36}, {9, 6, 60}, {50, 46, 46}, {62, 58, 12}, {92, 86, 88}, {15, 14, 57}, {47, 45, 31}, {54, 50, 0}, {55, 50, 14}, {87, 83, 30}, {16, 14, 98}, {66, 59, 54}, {76, 69, 34}, {79, 76, 39}, {21, 19, 48}, {46, 45, 22}, {57, 54, 38}, {68, 61, 3}, {93, 87, 51}, {4, 3, 35}, {7, 6, 1}, {45, 44, 67}, {52, 48, 22}, {2, 2, 4}, {12, 9, 97}, {30, 28, 83}, {53, 49, 0}, {69, 61, 34}, {73, 65, 97}, {90, 84, 45}, {82, 79, 36}, {0, 0, 48}, {10, 6, 73}, {11, 9, 44}, {20, 18, 31}, {41, 42, 0}, {43, 43, 63}, {65, 59, 45}, {100, 98, 61}, {95, 93, 19}, {1, 0, 52}, {13, 13, 44}, {56, 51, 35}, {59, 56, 60}, {67, 60, 66}, {77, 72, 52}, {89, 84, 9}, {24, 24, 60}, {33, 34, 22}, {35, 35, 89}, {63, 58, 32}, {83, 80, 61}, {39, 39, 86}, {8, 6, 51}, {14, 14, 53}, {17, 16, 19}, {23, 19, 97}, {26, 25, 31}, {29, 28, 24}, {38, 39, 55}, {40, 40, 97}, {74, 67, 95}, {78, 74, 81}, {81, 78, 90}, {88, 83, 74}, {28, 27, 24}, {37, 38, 66}, {48, 45, 63}, {51, 47, 5}, {64, 59, 29}, {80, 78, 0}, {86, 82, 16}, {96, 93, 21}, {98, 98, 0}, {25, 25, 14}, {27, 27, 9}, {32, 33, 39}, {75, 68, 11}, {84, 80, 88}, {99, 98, 51}, {6, 5, 60}, {22, 19, 75}, {44, 44, 48}, {60, 57, 29}, {70, 63, 19}, {71, 63, 69}, {18, 16, 53}, {34, 34, 91}, {36, 38, 20}, {42, 42, 82}, {94, 89, 3}, {97, 93, 96},
|
||||
},
|
||||
},
|
||||
Query: "SELECT * FROM a;",
|
||||
|
||||
{
|
||||
Query: "SELECT * FROM test WHERE ((v1>42 AND v2<=13) OR (v1=7));",
|
||||
Expected: []sql.Row{
|
||||
{58, 56, 0}, {85, 81, 4}, {62, 58, 12}, {54, 50, 0}, {68, 61, 3}, {53, 49, 0}, {89, 84, 9}, {51, 47, 5}, {80, 78, 0}, {98, 98, 0}, {75, 68, 11}, {94, 89, 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT * FROM test WHERE (((((v1<71 AND v2<7) OR (v1<=21 AND v2<=48)) OR (v1=44 AND v2 BETWEEN 21 AND 83)) OR (v1<=72 AND v2<>27)) OR (v1=35 AND v2 BETWEEN 78 AND 89));",
|
||||
Expected: []sql.Row{
|
||||
{19, 16, 95}, {58, 56, 0}, {61, 57, 49}, {72, 65, 80}, {3, 2, 10}, {49, 45, 86}, {5, 5, 36}, {9, 6, 60}, {50, 46, 46}, {62, 58, 12}, {15, 14, 57}, {47, 45, 31}, {54, 50, 0}, {55, 50, 14}, {16, 14, 98}, {66, 59, 54}, {76, 69, 34}, {21, 19, 48}, {46, 45, 22}, {57, 54, 38}, {68, 61, 3}, {4, 3, 35}, {7, 6, 1}, {45, 44, 67}, {52, 48, 22}, {2, 2, 4}, {12, 9, 97}, {30, 28, 83}, {53, 49, 0}, {69, 61, 34}, {73, 65, 97}, {0, 0, 48}, {10, 6, 73}, {11, 9, 44}, {20, 18, 31}, {41, 42, 0}, {43, 43, 63}, {65, 59, 45}, {1, 0, 52}, {13, 13, 44}, {56, 51, 35}, {59, 56, 60}, {67, 60, 66}, {77, 72, 52}, {24, 24, 60}, {33, 34, 22}, {35, 35, 89}, {63, 58, 32}, {39, 39, 86}, {8, 6, 51}, {14, 14, 53}, {17, 16, 19}, {23, 19, 97}, {26, 25, 31}, {29, 28, 24}, {38, 39, 55}, {40, 40, 97}, {74, 67, 95}, {28, 27, 24}, {37, 38, 66}, {48, 45, 63}, {51, 47, 5}, {64, 59, 29}, {25, 25, 14}, {27, 27, 9}, {32, 33, 39}, {75, 68, 11}, {6, 5, 60}, {22, 19, 75}, {31, 31, 14}, {44, 44, 48}, {60, 57, 29}, {70, 63, 19}, {71, 63, 69}, {18, 16, 53}, {34, 34, 91}, {36, 38, 20}, {42, 42, 82},
|
||||
{0, 1},
|
||||
{1, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -116,12 +101,38 @@ func TestSingleScript(t *testing.T) {
|
||||
myDb := harness.NewDatabase("mydb")
|
||||
databases := []sql.Database{myDb}
|
||||
engine := enginetest.NewEngineWithDbs(t, harness, databases)
|
||||
//engine.Analyzer.Debug = true
|
||||
//engine.Analyzer.Verbose = true
|
||||
engine.Analyzer.Debug = true
|
||||
engine.Analyzer.Verbose = true
|
||||
enginetest.TestScriptWithEngine(t, engine, harness, test)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleQueryPrepared(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
var test enginetest.QueryTest
|
||||
test = enginetest.QueryTest{
|
||||
Query: `SELECT ST_SRID(g, 0) from geometry_table order by i`,
|
||||
Expected: []sql.Row{
|
||||
{sql.Point{X: 1, Y: 2}},
|
||||
{sql.Linestring{Points: []sql.Point{{X: 1, Y: 2}, {X: 3, Y: 4}}}},
|
||||
{sql.Polygon{Lines: []sql.Linestring{{Points: []sql.Point{{X: 0, Y: 0}, {X: 0, Y: 1}, {X: 1, Y: 1}, {X: 0, Y: 0}}}}}},
|
||||
{sql.Point{X: 1, Y: 2}},
|
||||
{sql.Linestring{Points: []sql.Point{{X: 1, Y: 2}, {X: 3, Y: 4}}}},
|
||||
{sql.Polygon{Lines: []sql.Linestring{{Points: []sql.Point{{X: 0, Y: 0}, {X: 0, Y: 1}, {X: 1, Y: 1}, {X: 0, Y: 0}}}}}},
|
||||
},
|
||||
}
|
||||
|
||||
harness := newDoltHarness(t)
|
||||
//engine := enginetest.NewEngine(t, harness)
|
||||
//enginetest.CreateIndexes(t, harness, engine)
|
||||
engine := enginetest.NewSpatialEngine(t, harness)
|
||||
engine.Analyzer.Debug = true
|
||||
engine.Analyzer.Verbose = true
|
||||
|
||||
enginetest.TestQuery(t, harness, engine, test.Query, test.Expected, nil)
|
||||
}
|
||||
|
||||
func TestVersionedQueries(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestVersionedQueries(t, newDoltHarness(t))
|
||||
@@ -182,6 +193,11 @@ func TestInsertIntoErrors(t *testing.T) {
|
||||
enginetest.TestInsertIntoErrors(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestSpatialQueries(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestSpatialQueries(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestReplaceInto(t *testing.T) {
|
||||
enginetest.TestReplaceInto(t, newDoltHarness(t))
|
||||
}
|
||||
@@ -191,7 +207,19 @@ func TestReplaceIntoErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
enginetest.TestUpdate(t, newDoltHarness(t))
|
||||
var skipped []string
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
// skip update ffor join
|
||||
patternToSkip := "join"
|
||||
skipped = make([]string, 0)
|
||||
for _, q := range enginetest.UpdateTests {
|
||||
if strings.Contains(strings.ToLower(q.WriteQuery), patternToSkip) {
|
||||
skipped = append(skipped, q.WriteQuery)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enginetest.TestUpdate(t, newDoltHarness(t).WithSkippedQueries(skipped))
|
||||
}
|
||||
|
||||
func TestUpdateErrors(t *testing.T) {
|
||||
@@ -473,6 +501,20 @@ func TestVariableErrors(t *testing.T) {
|
||||
enginetest.TestVariableErrors(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestLoadDataPrepared(t *testing.T) {
|
||||
t.Skip()
|
||||
enginetest.TestLoadDataPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestLoadData(t *testing.T) {
|
||||
t.Skip()
|
||||
enginetest.TestLoadData(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestLoadDataErrors(t *testing.T) {
|
||||
enginetest.TestLoadDataErrors(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestJsonScripts(t *testing.T) {
|
||||
enginetest.TestJsonScripts(t, newDoltHarness(t))
|
||||
}
|
||||
@@ -724,6 +766,125 @@ func TestPersist(t *testing.T) {
|
||||
enginetest.TestPersist(t, harness, newPersistableSession)
|
||||
}
|
||||
|
||||
func TestQueriesPrepared(t *testing.T) {
|
||||
enginetest.TestQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestSpatialQueriesPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestSpatialQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestVersionedQueriesPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestVersionedQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestInfoSchemaPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestInfoSchemaPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestUpdateQueriesPrepared(t *testing.T) {
|
||||
var skipped []string
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
// skip select join for update
|
||||
skipped = make([]string, 0)
|
||||
for _, q := range enginetest.UpdateTests {
|
||||
if strings.Contains(strings.ToLower(q.WriteQuery), "join") {
|
||||
skipped = append(skipped, q.WriteQuery)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enginetest.TestUpdateQueriesPrepared(t, newDoltHarness(t).WithSkippedQueries(skipped))
|
||||
}
|
||||
|
||||
func TestInsertQueriesPrepared(t *testing.T) {
|
||||
var skipped []string
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
// skip keyless
|
||||
skipped = make([]string, 0)
|
||||
for _, q := range enginetest.UpdateTests {
|
||||
if strings.Contains(strings.ToLower(q.WriteQuery), "keyless") {
|
||||
skipped = append(skipped, q.WriteQuery)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enginetest.TestInsertQueriesPrepared(t, newDoltHarness(t).WithSkippedQueries(skipped))
|
||||
}
|
||||
|
||||
func TestReplaceQueriesPrepared(t *testing.T) {
|
||||
enginetest.TestReplaceQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestDeleteQueriesPrepared(t *testing.T) {
|
||||
enginetest.TestDeleteQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestScriptsPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestScriptsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestInsertScriptsPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestInsertScriptsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestComplexIndexQueriesPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestComplexIndexQueriesPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestJsonScriptsPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestJsonScriptsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestCreateCheckConstraintsScriptsPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestCreateCheckConstraintsScriptsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestInsertIgnoreScriptsPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestInsertIgnoreScriptsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestInsertErrorScriptsPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestInsertErrorScriptsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestExplodePrepared(t *testing.T) {
|
||||
t.Skip()
|
||||
enginetest.TestExplodePrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestViewsPrepared(t *testing.T) {
|
||||
enginetest.TestViewsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestVersionedViewsPrepared(t *testing.T) {
|
||||
t.Skip("unsupported for prepareds")
|
||||
enginetest.TestVersionedViewsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestShowTableStatusPrepared(t *testing.T) {
|
||||
enginetest.TestShowTableStatusPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestPrepared(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
enginetest.TestPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestPreparedInsert(t *testing.T) {
|
||||
enginetest.TestPreparedInsert(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
func TestAddDropPrimaryKeys(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
t.Run("adding and dropping primary keys does not result in duplicate NOT NULL constraints", func(t *testing.T) {
|
||||
|
||||
@@ -74,6 +74,7 @@ var _ sql.UpdatableTable = (*WritableIndexedDoltTable)(nil)
|
||||
var _ sql.DeletableTable = (*WritableIndexedDoltTable)(nil)
|
||||
var _ sql.ReplaceableTable = (*WritableIndexedDoltTable)(nil)
|
||||
var _ sql.StatisticsTable = (*WritableIndexedDoltTable)(nil)
|
||||
var _ sql.ProjectedTable = (*WritableIndexedDoltTable)(nil)
|
||||
|
||||
type WritableIndexedDoltTable struct {
|
||||
*WritableDoltTable
|
||||
@@ -99,9 +100,15 @@ func (t *WritableIndexedDoltTable) PartitionRows2(ctx *sql.Context, part sql.Par
|
||||
return iter.(sql.RowIter2), nil
|
||||
}
|
||||
|
||||
func (t *WritableIndexedDoltTable) WithProjection(colNames []string) sql.Table {
|
||||
// WithProjections implements sql.ProjectedTable
|
||||
func (t *WritableIndexedDoltTable) WithProjections(colNames []string) sql.Table {
|
||||
return &WritableIndexedDoltTable{
|
||||
WritableDoltTable: t.WithProjection(colNames).(*WritableDoltTable),
|
||||
WritableDoltTable: t.WithProjections(colNames).(*WritableDoltTable),
|
||||
indexLookup: t.indexLookup,
|
||||
}
|
||||
}
|
||||
|
||||
// Projections implements sql.ProjectedTable
|
||||
func (t *WritableIndexedDoltTable) Projections() []string {
|
||||
return t.projectedCols
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ func ProllyRowIterFromPartition(ctx context.Context, tbl *doltdb.Table, projecti
|
||||
return index.NewProllyRowIter(ctx, sch, rows, iter, projections)
|
||||
}
|
||||
|
||||
// Returns a |sql.RowIter| for a full table scan for the given |table|. If
|
||||
// TableToRowIter returns a |sql.RowIter| for a full table scan for the given |table|. If
|
||||
// |columns| is not empty, only columns with names appearing in |columns| will
|
||||
// have non-|nil| values in the resulting |sql.Row|s. If |columns| is empty,
|
||||
// values for all columns in the table are populated in each returned Row. The
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/vitess/go/sqltypes"
|
||||
"github.com/dolthub/vitess/go/vt/sqlparser"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/row"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
@@ -147,6 +148,68 @@ func InsertStatementPrefix(tableName string, tableSch schema.Schema) (string, er
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
// SqlRowAsCreateProcStmt Converts a Row into either a CREATE PROCEDURE statement
|
||||
// This function expects a row from the dolt_procedures table.
|
||||
func SqlRowAsCreateProcStmt(r sql.Row) (string, error) {
|
||||
var b strings.Builder
|
||||
|
||||
// Write create procedure
|
||||
prefix := "CREATE PROCEDURE "
|
||||
b.WriteString(prefix)
|
||||
|
||||
// Write procedure name
|
||||
nameStr := r[0].(string)
|
||||
b.WriteString(QuoteIdentifier(nameStr))
|
||||
b.WriteString(" ") // add a space
|
||||
|
||||
// Write definition
|
||||
defStmt, err := sqlparser.Parse(r[1].(string))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defStr := sqlparser.String(defStmt)
|
||||
defStr = defStr[len(prefix)+len(nameStr)+1:]
|
||||
b.WriteString(defStr)
|
||||
|
||||
b.WriteString(";")
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
// SqlRowAsCreateFragStmt Converts a Row into either a CREATE TRIGGER or CREATE VIEW statement
|
||||
// This function expects a row from the dolt_schemas table
|
||||
func SqlRowAsCreateFragStmt(r sql.Row) (string, error) {
|
||||
var b strings.Builder
|
||||
|
||||
// Write create
|
||||
b.WriteString("CREATE ")
|
||||
|
||||
// Write type
|
||||
typeStr := strings.ToUpper(r[0].(string))
|
||||
b.WriteString(typeStr)
|
||||
b.WriteString(" ") // add a space
|
||||
|
||||
// Write view/trigger name
|
||||
nameStr := r[1].(string)
|
||||
b.WriteString(QuoteIdentifier(nameStr))
|
||||
b.WriteString(" ") // add a space
|
||||
|
||||
// Parse statement to extract definition (and remove any weird whitespace issues)
|
||||
defStmt, err := sqlparser.Parse(r[2].(string))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defStr := sqlparser.String(defStmt)
|
||||
if typeStr == "TRIGGER" { // triggers need the create trigger <trig_name> to be cut off
|
||||
defStr = defStr[len("CREATE TRIGGER ")+len(nameStr)+1:]
|
||||
} else { // views need the prefixed with "AS"
|
||||
defStr = "AS " + defStr
|
||||
}
|
||||
b.WriteString(defStr)
|
||||
|
||||
b.WriteString(";")
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func SqlRowAsInsertStmt(ctx context.Context, r sql.Row, tableName string, tableSch schema.Schema) (string, error) {
|
||||
var b strings.Builder
|
||||
|
||||
|
||||
@@ -371,6 +371,7 @@ type doltTableInterface interface {
|
||||
sql.ReplaceableTable
|
||||
sql.AutoIncrementTable
|
||||
sql.TruncateableTable
|
||||
sql.ProjectedTable
|
||||
}
|
||||
|
||||
func (t *WritableDoltTable) setRoot(ctx *sql.Context, newRoot *doltdb.RootValue) error {
|
||||
@@ -384,9 +385,10 @@ func (t *WritableDoltTable) WithIndexLookup(lookup sql.IndexLookup) sql.Table {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *WritableDoltTable) WithProjection(colNames []string) sql.Table {
|
||||
// WithProjections implements sql.ProjectedTable
|
||||
func (t *WritableDoltTable) WithProjections(colNames []string) sql.Table {
|
||||
return &WritableDoltTable{
|
||||
DoltTable: t.DoltTable.WithProjection(colNames).(*DoltTable),
|
||||
DoltTable: t.DoltTable.WithProjections(colNames).(*DoltTable),
|
||||
db: t.db,
|
||||
ed: t.ed,
|
||||
}
|
||||
@@ -714,13 +716,16 @@ func (t DoltTable) GetForeignKeyUpdater(ctx *sql.Context) sql.ForeignKeyUpdater
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DoltTable) Projection() []string {
|
||||
// Projections implements sql.ProjectedTable
|
||||
func (t *DoltTable) Projections() []string {
|
||||
return t.projectedCols
|
||||
}
|
||||
|
||||
func (t DoltTable) WithProjection(colNames []string) sql.Table {
|
||||
t.projectedCols = colNames
|
||||
return &t
|
||||
// WithProjections implements sql.ProjectedTable
|
||||
func (t *DoltTable) WithProjections(colNames []string) sql.Table {
|
||||
nt := *t
|
||||
nt.projectedCols = colNames
|
||||
return &nt
|
||||
}
|
||||
|
||||
var _ sql.PartitionIter = (*doltTablePartitionIter)(nil)
|
||||
@@ -871,18 +876,18 @@ type doltAlterableTableInterface interface {
|
||||
sql.ForeignKeyTable
|
||||
sql.CheckAlterableTable
|
||||
sql.PrimaryKeyAlterableTable
|
||||
sql.ProjectedTable
|
||||
}
|
||||
|
||||
var _ doltAlterableTableInterface = (*AlterableDoltTable)(nil)
|
||||
|
||||
func (t *AlterableDoltTable) WithProjections(colNames []string) sql.Table {
|
||||
return &AlterableDoltTable{WritableDoltTable: *t.WritableDoltTable.WithProjections(colNames).(*WritableDoltTable)}
|
||||
}
|
||||
|
||||
// AddColumn implements sql.AlterableTable
|
||||
func (t *AlterableDoltTable) AddColumn(ctx *sql.Context, column *sql.Column, order *sql.ColumnOrder) error {
|
||||
if types.IsFormat_DOLT_1(t.nbf) {
|
||||
return nil
|
||||
}
|
||||
|
||||
root, err := t.getRoot(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -95,6 +95,24 @@ func (w *SqlExportWriter) WriteSqlRow(ctx context.Context, r sql.Row) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case for schemas table
|
||||
if w.tableName == doltdb.SchemasTableName {
|
||||
stmt, err := sqlfmt.SqlRowAsCreateFragStmt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return iohelp.WriteLine(w.wr, stmt)
|
||||
}
|
||||
|
||||
// Special case for procedures table
|
||||
if w.tableName == doltdb.ProceduresTableName {
|
||||
stmt, err := sqlfmt.SqlRowAsCreateProcStmt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return iohelp.WriteLine(w.wr, stmt)
|
||||
}
|
||||
|
||||
if err := w.maybeWriteDropCreate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -108,6 +126,10 @@ func (w *SqlExportWriter) WriteSqlRow(ctx context.Context, r sql.Row) error {
|
||||
}
|
||||
|
||||
func (w *SqlExportWriter) maybeWriteDropCreate(ctx context.Context) error {
|
||||
// Never write create table for DoltSchemasTable
|
||||
if w.tableName == doltdb.SchemasTableName || w.tableName == doltdb.ProceduresTableName {
|
||||
return nil
|
||||
}
|
||||
if !w.writtenFirstRow {
|
||||
var b strings.Builder
|
||||
b.WriteString(sqlfmt.DropTableIfExistsStmt(w.tableName))
|
||||
|
||||
@@ -1,24 +1,5 @@
|
||||
## Example
|
||||
The noms command is badly supported, but can be useful for certain debugging tasks. Notably, `noms manifest`
|
||||
and `noms show` are kept in good working order. The rest are in varying states of brokenness, but may still be useful.
|
||||
|
||||
```shell
|
||||
cd $GOPATH/src/github.com/attic-labs/noms/samples/go/counter
|
||||
go build
|
||||
./counter /tmp/nomsdb::counter
|
||||
./counter /tmp/nomsdb::counter
|
||||
./counter /tmp/nomsdb::counter
|
||||
|
||||
noms serve /tmp/nomsdb
|
||||
```
|
||||
|
||||
Then, in a separate shell:
|
||||
|
||||
```shell
|
||||
# This starts where the previous count left off because we're serving the same database
|
||||
./counter http://localhost:8000::counter
|
||||
|
||||
# Display the datasets at this server
|
||||
noms ds http://localhost:8000
|
||||
|
||||
# Print the history of the counter dataset
|
||||
noms log http://localhost:8000::counter
|
||||
```
|
||||
For inspecting the raw data of a dolt database, the `splunk.pl` script in this directory is your best bet. It
|
||||
uses `noms manifest` and `noms show` in a simple shell to explore a tree of values and refs.
|
||||
@@ -74,7 +74,9 @@ func runCommit(ctx context.Context, args []string) int {
|
||||
absPath, err := spec.NewAbsolutePath(path)
|
||||
util.CheckError(err)
|
||||
|
||||
value := absPath.Resolve(ctx, db, vrw)
|
||||
value, err := absPath.Resolve(ctx, db, vrw)
|
||||
util.CheckError(err)
|
||||
|
||||
if value == nil {
|
||||
util.CheckErrorNoUsage(errors.New(fmt.Sprintf("Error resolving value: %s", path)))
|
||||
}
|
||||
|
||||
@@ -161,7 +161,9 @@ func argumentToValue(ctx context.Context, arg string, db datas.Database, vrw typ
|
||||
if arg[0] == '@' {
|
||||
p, err := spec.NewAbsolutePath(arg[1:])
|
||||
d.PanicIfError(err)
|
||||
return p.Resolve(ctx, db, vrw), nil
|
||||
resolve, err := p.Resolve(ctx, db, vrw)
|
||||
d.PanicIfError(err)
|
||||
return resolve, nil
|
||||
}
|
||||
if n, err := strconv.ParseFloat(arg, 64); err == nil {
|
||||
return types.Float(n), nil
|
||||
|
||||
@@ -27,11 +27,13 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
flag "github.com/juju/gnuflag"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/cmd/noms/util"
|
||||
"github.com/dolthub/dolt/go/store/config"
|
||||
"github.com/dolthub/dolt/go/store/prolly"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
"github.com/dolthub/dolt/go/store/util/datetime"
|
||||
"github.com/dolthub/dolt/go/store/util/outputpager"
|
||||
@@ -67,8 +69,26 @@ func setupShowFlags() *flag.FlagSet {
|
||||
|
||||
func runShow(ctx context.Context, args []string) int {
|
||||
cfg := config.NewResolver()
|
||||
|
||||
var value interface{}
|
||||
database, vrw, value, err := cfg.GetPath(ctx, args[0])
|
||||
util.CheckErrorNoUsage(err)
|
||||
|
||||
if err != nil && strings.Contains(err.Error(), "unknown type") {
|
||||
// If noms can't decode a value but it does exist, we'll assume it's a prolly node and treat it as such
|
||||
sp, err := cfg.GetDatabaseSpecForPath(ctx, args[0])
|
||||
util.CheckErrorNoUsage(err)
|
||||
|
||||
database = sp.GetDatabase(ctx)
|
||||
vrw = sp.GetVRW(ctx)
|
||||
cs := sp.NewChunkStore(ctx)
|
||||
chunk, err := cs.Get(ctx, sp.Path.Hash)
|
||||
util.CheckErrorNoUsage(err)
|
||||
|
||||
value = prolly.MapNodeFromBytes(chunk.Data())
|
||||
} else {
|
||||
util.CheckErrorNoUsage(err)
|
||||
}
|
||||
|
||||
defer database.Close()
|
||||
|
||||
if value == nil {
|
||||
@@ -82,7 +102,7 @@ func runShow(ctx context.Context, args []string) int {
|
||||
}
|
||||
|
||||
if showRaw {
|
||||
ch, err := types.EncodeValue(value, vrw.Format())
|
||||
ch, err := types.EncodeValue(value.(types.Value), vrw.Format())
|
||||
util.CheckError(err)
|
||||
buf := bytes.NewBuffer(ch.Data())
|
||||
_, err = io.Copy(os.Stdout, buf)
|
||||
@@ -91,7 +111,7 @@ func runShow(ctx context.Context, args []string) int {
|
||||
}
|
||||
|
||||
if showStats {
|
||||
types.WriteValueStats(ctx, os.Stdout, value, vrw)
|
||||
types.WriteValueStats(ctx, os.Stdout, value.(types.Value), vrw)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -102,15 +122,42 @@ func runShow(ctx context.Context, args []string) int {
|
||||
pgr := outputpager.Start()
|
||||
defer pgr.Stop()
|
||||
|
||||
types.WriteEncodedValue(ctx, pgr.Writer, value)
|
||||
outputEncodedValue(ctx, pgr.Writer, value)
|
||||
fmt.Fprintln(pgr.Writer)
|
||||
} else {
|
||||
t, err := types.TypeOf(value)
|
||||
util.CheckError(err)
|
||||
fmt.Fprint(os.Stdout, t.HumanReadableString(), " - ")
|
||||
|
||||
types.WriteEncodedValue(ctx, os.Stdout, value)
|
||||
outputType(value)
|
||||
outputEncodedValue(ctx, os.Stdout, value)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func outputType(value interface{}) {
|
||||
var typeString string
|
||||
switch value := value.(type) {
|
||||
case prolly.Node:
|
||||
typeString = "prolly.Node"
|
||||
case types.Value:
|
||||
t, err := types.TypeOf(value)
|
||||
typeString = t.HumanReadableString()
|
||||
util.CheckError(err)
|
||||
default:
|
||||
typeString = fmt.Sprintf("unknown type %T", value)
|
||||
}
|
||||
fmt.Fprint(os.Stdout, typeString, " - ")
|
||||
}
|
||||
|
||||
func outputEncodedValue(ctx context.Context, w io.Writer, value interface{}) error {
|
||||
switch value := value.(type) {
|
||||
case types.TupleRowStorage:
|
||||
node := prolly.NodeFromValue(value)
|
||||
return prolly.OutputProllyNode(w, node)
|
||||
case prolly.Node:
|
||||
return prolly.OutputProllyNode(w, value)
|
||||
case types.Value:
|
||||
return types.WriteEncodedValue(ctx, w, value)
|
||||
default:
|
||||
_, err := w.Write([]byte(fmt.Sprintf("unknown value type %T: %v", value, value)))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,7 +107,12 @@ func nomsStructDel(ctx context.Context, specStr string, args []string) int {
|
||||
func splitPath(ctx context.Context, db datas.Database, sp spec.Spec) (rootVal types.Value, basePath types.Path) {
|
||||
rootPath := sp.Path
|
||||
rootPath.Path = types.Path{}
|
||||
rootVal = rootPath.Resolve(ctx, db, sp.GetVRW(ctx))
|
||||
var err error
|
||||
rootVal, err = rootPath.Resolve(ctx, db, sp.GetVRW(ctx))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if rootVal == nil {
|
||||
util.CheckError(fmt.Errorf("Invalid path: %s", sp.String()))
|
||||
return
|
||||
|
||||
@@ -170,5 +170,16 @@ func (r *Resolver) GetPath(ctx context.Context, str string) (datas.Database, typ
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return sp.GetDatabase(ctx), sp.GetVRW(ctx), sp.GetValue(ctx), nil
|
||||
value, err := sp.GetValue(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return sp.GetDatabase(ctx), sp.GetVRW(ctx), value, nil
|
||||
}
|
||||
|
||||
// GetDatabaseForPath returns the database and a VRW for the path given, but does not attempt to load a value
|
||||
func (r *Resolver) GetDatabaseSpecForPath(ctx context.Context, str string) (spec.Spec, error) {
|
||||
specStr, dbc := r.ResolvePathSpecAndGetDbConfig(str)
|
||||
return spec.ForPathOpts(r.verbose(ctx, str, specStr), specOptsForConfig(r.config, dbc))
|
||||
}
|
||||
|
||||
@@ -29,56 +29,73 @@ import (
|
||||
)
|
||||
|
||||
func newAWSChunkSource(ctx context.Context, ddb *ddbTableStore, s3 *s3ObjectReader, al awsLimits, name addr, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (cs chunkSource, err error) {
|
||||
|
||||
index, tra, err := func() (onHeapTableIndex, tableReaderAt, error) {
|
||||
var tra tableReaderAt
|
||||
index, err := loadTableIndex(stats, chunkCount, q, func(p []byte) error {
|
||||
if al.tableMayBeInDynamo(chunkCount) {
|
||||
t1 := time.Now()
|
||||
data, err := ddb.ReadTable(ctx, name, stats)
|
||||
if data == nil && err == nil { // There MUST be either data or an error
|
||||
return onHeapTableIndex{}, &dynamoTableReaderAt{}, errors.New("no data available")
|
||||
return errors.New("no data available")
|
||||
}
|
||||
if data != nil {
|
||||
stats.IndexReadLatency.SampleTimeSince(t1)
|
||||
stats.IndexBytesPerRead.Sample(uint64(len(data)))
|
||||
ind, err := parseTableIndexByCopy(data, q)
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, &dynamoTableReaderAt{}, err
|
||||
if len(p) > len(data) {
|
||||
return errors.New("not enough data for chunk count")
|
||||
}
|
||||
return ind, &dynamoTableReaderAt{ddb: ddb, h: name}, nil
|
||||
indexBytes := data[len(data)-len(p):]
|
||||
copy(p, indexBytes)
|
||||
tra = &dynamoTableReaderAt{ddb: ddb, h: name}
|
||||
return nil
|
||||
}
|
||||
if _, ok := err.(tableNotInDynamoErr); !ok {
|
||||
return onHeapTableIndex{}, &dynamoTableReaderAt{}, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
index, err := loadTableIndex(stats, chunkCount, q, func(bytesFromEnd int64) ([]byte, error) {
|
||||
buff := make([]byte, bytesFromEnd)
|
||||
n, _, err := s3.ReadFromEnd(ctx, name, buff, stats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bytesFromEnd != int64(n) {
|
||||
return nil, errors.New("failed to read all data")
|
||||
}
|
||||
return buff, nil
|
||||
})
|
||||
n, _, err := s3.ReadFromEnd(ctx, name, p, stats)
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, &dynamoTableReaderAt{}, err
|
||||
return err
|
||||
}
|
||||
|
||||
return index, &s3TableReaderAt{h: name, s3: s3}, nil
|
||||
}()
|
||||
if len(p) != n {
|
||||
return errors.New("failed to read all data")
|
||||
}
|
||||
tra = &s3TableReaderAt{h: name, s3: s3}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return &chunkSourceAdapter{}, err
|
||||
}
|
||||
|
||||
tr, err := newTableReader(index, tra, s3BlockSize)
|
||||
if err != nil {
|
||||
_ = index.Close()
|
||||
return &chunkSourceAdapter{}, err
|
||||
}
|
||||
return &chunkSourceAdapter{tr, name}, nil
|
||||
}
|
||||
|
||||
func loadTableIndex(stats *Stats, chunkCount uint32, q MemoryQuotaProvider, loadIndexBytes func(p []byte) error) (tableIndex, error) {
|
||||
ti, err := newMmapTableIndex(chunkCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t1 := time.Now()
|
||||
err = loadIndexBytes(ti.indexDataBuff)
|
||||
if err != nil {
|
||||
_ = ti.mmapped.Unmap()
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
stats.IndexReadLatency.SampleTimeSince(t1)
|
||||
stats.IndexBytesPerRead.Sample(uint64(len(ti.indexDataBuff)))
|
||||
|
||||
err = ti.parseIndexBuffer(q)
|
||||
if err != nil {
|
||||
_ = ti.mmapped.Unmap()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ti, nil
|
||||
}
|
||||
|
||||
type awsTableReaderAt struct {
|
||||
once sync.Once
|
||||
getTRErr error
|
||||
|
||||
@@ -17,7 +17,6 @@ package nbs
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/blobstore"
|
||||
"github.com/dolthub/dolt/go/store/chunks"
|
||||
@@ -96,35 +95,21 @@ func (bsTRA *bsTableReaderAt) ReadAtWithStats(ctx context.Context, p []byte, off
|
||||
return totalRead, nil
|
||||
}
|
||||
|
||||
func loadTableIndex(stats *Stats, chunkCount uint32, q MemoryQuotaProvider, getBytesFromEnd func(n int64) ([]byte, error)) (onHeapTableIndex, error) {
|
||||
size := indexSize(chunkCount) + footerSize
|
||||
t1 := time.Now()
|
||||
bytes, err := getBytesFromEnd(int64(size))
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
stats.IndexReadLatency.SampleTimeSince(t1)
|
||||
stats.IndexBytesPerRead.Sample(uint64(len(bytes)))
|
||||
|
||||
return parseTableIndex(bytes, q)
|
||||
}
|
||||
|
||||
func newBSChunkSource(ctx context.Context, bs blobstore.Blobstore, name addr, chunkCount uint32, blockSize uint64, q MemoryQuotaProvider, stats *Stats) (cs chunkSource, err error) {
|
||||
|
||||
index, err := loadTableIndex(stats, chunkCount, q, func(size int64) ([]byte, error) {
|
||||
rc, _, err := bs.Get(ctx, name.String(), blobstore.NewBlobRange(-size, 0))
|
||||
index, err := loadTableIndex(stats, chunkCount, q, func(p []byte) error {
|
||||
rc, _, err := bs.Get(ctx, name.String(), blobstore.NewBlobRange(-int64(len(p)), 0))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
buff := make([]byte, size)
|
||||
_, err = io.ReadFull(rc, buff)
|
||||
_, err = io.ReadFull(rc, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return buff, nil
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -132,6 +117,7 @@ func newBSChunkSource(ctx context.Context, bs blobstore.Blobstore, name addr, ch
|
||||
|
||||
tr, err := newTableReader(index, &bsTableReaderAt{name.String(), bs}, s3BlockSize)
|
||||
if err != nil {
|
||||
_ = index.Close()
|
||||
return nil, err
|
||||
}
|
||||
return &chunkSourceAdapter{tr, name}, nil
|
||||
|
||||
@@ -50,7 +50,7 @@ type fsTablePersister struct {
|
||||
}
|
||||
|
||||
func (ftp *fsTablePersister) Open(ctx context.Context, name addr, chunkCount uint32, stats *Stats) (chunkSource, error) {
|
||||
return newMmapTableReader(ftp.dir, name, chunkCount, ftp.q, ftp.fc)
|
||||
return newFileTableReader(ftp.dir, name, chunkCount, ftp.q, ftp.fc)
|
||||
}
|
||||
|
||||
func (ftp *fsTablePersister) Persist(ctx context.Context, mt *memTable, haver chunkReader, stats *Stats) (chunkSource, error) {
|
||||
|
||||
@@ -26,16 +26,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dolthub/mmap-go"
|
||||
)
|
||||
|
||||
type mmapTableReader struct {
|
||||
type fileTableReader struct {
|
||||
tableReader
|
||||
fc *fdCache
|
||||
h addr
|
||||
@@ -45,35 +41,22 @@ const (
|
||||
fileBlockSize = 1 << 12
|
||||
)
|
||||
|
||||
var (
|
||||
maxInt = int64(math.MaxInt64)
|
||||
)
|
||||
|
||||
func init() {
|
||||
if strconv.IntSize == 32 {
|
||||
maxInt = math.MaxInt32
|
||||
}
|
||||
}
|
||||
|
||||
func newMmapTableReader(dir string, h addr, chunkCount uint32, q MemoryQuotaProvider, fc *fdCache) (cs chunkSource, err error) {
|
||||
func newFileTableReader(dir string, h addr, chunkCount uint32, q MemoryQuotaProvider, fc *fdCache) (cs chunkSource, err error) {
|
||||
path := filepath.Join(dir, h.String())
|
||||
|
||||
index, err := func() (ti onHeapTableIndex, err error) {
|
||||
|
||||
// Be careful with how |f| is used below. |RefFile| returns a cached
|
||||
// os.File pointer so the code needs to use f in a concurrency-safe
|
||||
// manner. Moving the file offset is BAD.
|
||||
var f *os.File
|
||||
f, err = fc.RefFile(path)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
unrefErr := fc.UnrefFile(path)
|
||||
|
||||
if unrefErr != nil {
|
||||
err = unrefErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Since we can't move the file offset, get the size of the file and use
|
||||
// ReadAt to load the index instead.
|
||||
var fi os.FileInfo
|
||||
fi, err = f.Stat()
|
||||
|
||||
@@ -87,40 +70,25 @@ func newMmapTableReader(dir string, h addr, chunkCount uint32, q MemoryQuotaProv
|
||||
return
|
||||
}
|
||||
|
||||
// index. Mmap won't take an offset that's not page-aligned, so find the nearest page boundary preceding the index.
|
||||
indexOffset := fi.Size() - int64(footerSize) - int64(indexSize(chunkCount))
|
||||
aligned := indexOffset / mmapAlignment * mmapAlignment // Thanks, integer arithmetic!
|
||||
length := int(fi.Size() - aligned)
|
||||
indexSize := int64(indexSize(chunkCount) + footerSize)
|
||||
indexOffset := fi.Size() - indexSize
|
||||
r := io.NewSectionReader(f, indexOffset, indexSize)
|
||||
b := make([]byte, indexSize)
|
||||
|
||||
if fi.Size()-aligned > maxInt {
|
||||
err = fmt.Errorf("%s - size: %d alignment: %d> maxInt: %d", path, fi.Size(), aligned, maxInt)
|
||||
_, err = io.ReadFull(r, b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buff := make([]byte, indexSize(chunkCount)+footerSize)
|
||||
// TODO: Don't use mmap here.
|
||||
func() {
|
||||
var mm mmap.MMap
|
||||
mm, err = mmap.MapRegion(f, length, mmap.RDONLY, 0, aligned)
|
||||
if err != nil {
|
||||
return
|
||||
defer func() {
|
||||
unrefErr := fc.UnrefFile(path)
|
||||
|
||||
if unrefErr != nil {
|
||||
err = unrefErr
|
||||
}
|
||||
|
||||
defer func() {
|
||||
unmapErr := mm.Unmap()
|
||||
|
||||
if unmapErr != nil {
|
||||
err = unmapErr
|
||||
}
|
||||
}()
|
||||
copy(buff, mm[indexOffset-aligned:])
|
||||
}()
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
|
||||
ti, err = parseTableIndex(buff, q)
|
||||
|
||||
ti, err = parseTableIndex(b, q)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -139,27 +107,27 @@ func newMmapTableReader(dir string, h addr, chunkCount uint32, q MemoryQuotaProv
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mmapTableReader{
|
||||
return &fileTableReader{
|
||||
tr,
|
||||
fc,
|
||||
h,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mmtr *mmapTableReader) hash() (addr, error) {
|
||||
func (mmtr *fileTableReader) hash() (addr, error) {
|
||||
return mmtr.h, nil
|
||||
}
|
||||
|
||||
func (mmtr *mmapTableReader) Close() error {
|
||||
func (mmtr *fileTableReader) Close() error {
|
||||
return mmtr.tableReader.Close()
|
||||
}
|
||||
|
||||
func (mmtr *mmapTableReader) Clone() (chunkSource, error) {
|
||||
func (mmtr *fileTableReader) Clone() (chunkSource, error) {
|
||||
tr, err := mmtr.tableReader.Clone()
|
||||
if err != nil {
|
||||
return &mmapTableReader{}, err
|
||||
return &fileTableReader{}, err
|
||||
}
|
||||
return &mmapTableReader{tr, mmtr.fc, mmtr.h}, nil
|
||||
return &fileTableReader{tr, mmtr.fc, mmtr.h}, nil
|
||||
}
|
||||
|
||||
type cacheReaderAt struct {
|
||||
@@ -52,7 +52,7 @@ func TestMmapTableReader(t *testing.T) {
|
||||
err = os.WriteFile(filepath.Join(dir, h.String()), tableData, 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
trc, err := newMmapTableReader(dir, h, uint32(len(chunks)), &noopQuotaProvider{}, fc)
|
||||
trc, err := newFileTableReader(dir, h, uint32(len(chunks)), &noopQuotaProvider{}, fc)
|
||||
require.NoError(t, err)
|
||||
assertChunksInReader(chunks, trc, assert)
|
||||
}
|
||||
@@ -33,7 +33,7 @@ type GenerationalNBS struct {
|
||||
}
|
||||
|
||||
func NewGenerationalCS(oldGen, newGen *NomsBlockStore) *GenerationalNBS {
|
||||
if oldGen.Version() != newGen.Version() {
|
||||
if oldGen.Version() != "" && oldGen.Version() != newGen.Version() {
|
||||
panic("oldgen and newgen chunkstore versions vary")
|
||||
}
|
||||
|
||||
|
||||
@@ -450,11 +450,6 @@ func (ts tableSpec) GetChunkCount() uint32 {
|
||||
return ts.chunkCount
|
||||
}
|
||||
|
||||
func (ts tableSpec) GetMemorySize() uint64 {
|
||||
n := ts.GetChunkCount()
|
||||
return memSize(n)
|
||||
}
|
||||
|
||||
func tableSpecsToMap(specs []tableSpec) map[string]int {
|
||||
m := make(map[string]int)
|
||||
for _, spec := range specs {
|
||||
|
||||
@@ -23,6 +23,7 @@ package nbs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -492,17 +493,19 @@ func (fm *fakeManifest) set(version string, lock addr, root hash.Hash, specs, ap
|
||||
}
|
||||
|
||||
func newFakeTableSet(q MemoryQuotaProvider) tableSet {
|
||||
return tableSet{p: newFakeTablePersister(q), q: NewUnlimitedMemQuotaProvider(), rl: make(chan struct{}, 1)}
|
||||
return tableSet{p: newFakeTablePersister(q), q: q, rl: make(chan struct{}, 1)}
|
||||
}
|
||||
|
||||
func newFakeTablePersister(q MemoryQuotaProvider) tablePersister {
|
||||
return fakeTablePersister{q, map[addr]tableReader{}, &sync.RWMutex{}}
|
||||
func newFakeTablePersister(q MemoryQuotaProvider) fakeTablePersister {
|
||||
return fakeTablePersister{q, map[addr]tableReader{}, map[addr]bool{}, map[addr]bool{}, &sync.RWMutex{}}
|
||||
}
|
||||
|
||||
type fakeTablePersister struct {
|
||||
q MemoryQuotaProvider
|
||||
sources map[addr]tableReader
|
||||
mu *sync.RWMutex
|
||||
q MemoryQuotaProvider
|
||||
sources map[addr]tableReader
|
||||
sourcesToFail map[addr]bool
|
||||
opened map[addr]bool
|
||||
mu *sync.RWMutex
|
||||
}
|
||||
|
||||
var _ tablePersister = fakeTablePersister{}
|
||||
@@ -610,8 +613,12 @@ func compactSourcesToBuffer(sources chunkSources) (name addr, data []byte, chunk
|
||||
}
|
||||
|
||||
func (ftp fakeTablePersister) Open(ctx context.Context, name addr, chunkCount uint32, stats *Stats) (chunkSource, error) {
|
||||
ftp.mu.RLock()
|
||||
defer ftp.mu.RUnlock()
|
||||
ftp.mu.Lock()
|
||||
defer ftp.mu.Unlock()
|
||||
if _, ok := ftp.sourcesToFail[name]; ok {
|
||||
return nil, errors.New("intentional failure")
|
||||
}
|
||||
ftp.opened[name] = true
|
||||
return chunkSourceAdapter{ftp.sources[name], name}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -178,7 +178,6 @@ func (s3or *s3ObjectReader) readRange(ctx context.Context, name addr, p []byte,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n, err = io.ReadFull(result.Body, p)
|
||||
return n, sz, err
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ func (nbs *NomsBlockStore) GetChunkLocations(hashes hash.HashSet) (map[hash.Hash
|
||||
f := func(css chunkSources) error {
|
||||
for _, cs := range css {
|
||||
switch tr := cs.(type) {
|
||||
case *mmapTableReader:
|
||||
case *fileTableReader:
|
||||
offsetRecSlice, _, err := tr.findOffsets(gr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -275,14 +275,6 @@ type chunkSource interface {
|
||||
Clone() (chunkSource, error)
|
||||
}
|
||||
|
||||
func getCSMemSize(cs chunkSource) (uint64, error) {
|
||||
i, err := cs.index()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return memSize(i.ChunkCount()), nil
|
||||
}
|
||||
|
||||
type chunkSources []chunkSource
|
||||
|
||||
// TableFile is an interface for working with an existing table file
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/dolthub/mmap-go"
|
||||
@@ -56,6 +56,8 @@ type tableIndex interface {
|
||||
// Prefixes returns the sorted slice of |uint64| |addr| prefixes; each
|
||||
// entry corresponds to an indexed chunk address.
|
||||
Prefixes() ([]uint64, error)
|
||||
// PrefixAt returns the prefix at the specified index
|
||||
PrefixAt(idx uint32) uint64
|
||||
// TableFileSize returns the total size of the indexed table file, in bytes.
|
||||
TableFileSize() uint64
|
||||
// TotalUncompressedData returns the total uncompressed data size of
|
||||
@@ -94,20 +96,58 @@ func ReadTableFooter(rd io.ReadSeeker) (chunkCount uint32, totalUncompressedData
|
||||
return
|
||||
}
|
||||
|
||||
func indexMemSize(chunkCount uint32) uint64 {
|
||||
is := indexSize(chunkCount) + footerSize
|
||||
// Extra required space for offsets that don't fit into the region where lengths were previously stored, see
|
||||
// newOnHeapTableIndex
|
||||
is += uint64(offsetSize * (chunkCount - chunkCount/2))
|
||||
return is
|
||||
}
|
||||
|
||||
// parses a valid nbs tableIndex from a byte stream. |buff| must end with an NBS index
|
||||
// and footer and its length and capacity must match the expected indexSize for the chunkCount specified in the footer.
|
||||
// and footer and its length must match the expected indexSize for the chunkCount specified in the footer.
|
||||
// Retains the buffer and does not allocate new memory except for offsets, computes on buff in place.
|
||||
func parseTableIndex(buff []byte, q MemoryQuotaProvider) (onHeapTableIndex, error) {
|
||||
chunkCount, totalUncompressedData, err := ReadTableFooter(bytes.NewReader(buff))
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
iS := indexSize(chunkCount) + footerSize
|
||||
if uint64(len(buff)) != iS || uint64(cap(buff)) != iS {
|
||||
return onHeapTableIndex{}, ErrWrongBufferSize
|
||||
|
||||
buff, err = removeFooter(buff, chunkCount)
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
buff = buff[:len(buff)-footerSize]
|
||||
return NewOnHeapTableIndex(buff, chunkCount, totalUncompressedData, q)
|
||||
|
||||
chunks2 := chunkCount / 2
|
||||
chunks1 := chunkCount - chunks2
|
||||
offsetsBuff1 := make([]byte, chunks1*offsetSize)
|
||||
|
||||
return newOnHeapTableIndex(buff, offsetsBuff1, chunkCount, totalUncompressedData, q)
|
||||
}
|
||||
|
||||
// similar to parseTableIndex except that it uses the given |offsetsBuff1|
|
||||
// instead of allocating the additional space.
|
||||
func parseTableIndexWithOffsetBuff(buff []byte, offsetsBuff1 []byte, q MemoryQuotaProvider) (onHeapTableIndex, error) {
|
||||
chunkCount, totalUncompressedData, err := ReadTableFooter(bytes.NewReader(buff))
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
|
||||
buff, err = removeFooter(buff, chunkCount)
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
|
||||
return newOnHeapTableIndex(buff, offsetsBuff1, chunkCount, totalUncompressedData, q)
|
||||
}
|
||||
|
||||
func removeFooter(p []byte, chunkCount uint32) (out []byte, err error) {
|
||||
iS := indexSize(chunkCount) + footerSize
|
||||
if uint64(len(p)) != iS {
|
||||
return nil, ErrWrongBufferSize
|
||||
}
|
||||
out = p[:len(p)-footerSize]
|
||||
return
|
||||
}
|
||||
|
||||
// parseTableIndexByCopy reads the footer, copies indexSize(chunkCount) bytes, and parses an on heap table index.
|
||||
@@ -127,7 +167,7 @@ func ReadTableIndexByCopy(rd io.ReadSeeker, q MemoryQuotaProvider) (onHeapTableI
|
||||
iS := int64(indexSize(chunkCount))
|
||||
_, err = rd.Seek(-(iS + footerSize), io.SeekEnd)
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, ErrInvalidTableFile
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
buff := make([]byte, iS)
|
||||
_, err = io.ReadFull(rd, buff)
|
||||
@@ -135,7 +175,11 @@ func ReadTableIndexByCopy(rd io.ReadSeeker, q MemoryQuotaProvider) (onHeapTableI
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
|
||||
return NewOnHeapTableIndex(buff, chunkCount, totalUncompressedData, q)
|
||||
chunks2 := chunkCount / 2
|
||||
chunks1 := chunkCount - chunks2
|
||||
offsets1Buff := make([]byte, chunks1*offsetSize)
|
||||
|
||||
return newOnHeapTableIndex(buff, offsets1Buff, chunkCount, totalUncompressedData, q)
|
||||
}
|
||||
|
||||
type onHeapTableIndex struct {
|
||||
@@ -145,7 +189,8 @@ type onHeapTableIndex struct {
|
||||
// Tuple bytes
|
||||
tupleB []byte
|
||||
// Offset bytes
|
||||
offsetB []byte
|
||||
offsetB1 []byte
|
||||
offsetB2 []byte
|
||||
// Suffix bytes
|
||||
suffixB []byte
|
||||
chunkCount uint32
|
||||
@@ -154,23 +199,35 @@ type onHeapTableIndex struct {
|
||||
|
||||
var _ tableIndex = &onHeapTableIndex{}
|
||||
|
||||
// NewOnHeapTableIndex creates a table index given a buffer of just the table index (no footer)
|
||||
func NewOnHeapTableIndex(b []byte, chunkCount uint32, totalUncompressedData uint64, q MemoryQuotaProvider) (onHeapTableIndex, error) {
|
||||
tuples := b[:prefixTupleSize*chunkCount]
|
||||
lengths := b[prefixTupleSize*chunkCount : prefixTupleSize*chunkCount+lengthSize*chunkCount]
|
||||
suffixes := b[prefixTupleSize*chunkCount+lengthSize*chunkCount:]
|
||||
// newOnHeapTableIndex converts a table file index with stored lengths on
|
||||
// |indexBuff| into an index with stored offsets. Since offsets are twice the
|
||||
// size of a length, we need to allocate additional space to store all the
|
||||
// offsets. It stores the first n - n/2 offsets in |offsetsBuff1| (the
|
||||
// additional space) and the rest into the region of |indexBuff| previously
|
||||
// occupied by lengths. |onHeapTableIndex| computes directly on the given
|
||||
// |indexBuff| and |offsetsBuff1| buffers.
|
||||
func newOnHeapTableIndex(indexBuff []byte, offsetsBuff1 []byte, chunkCount uint32, totalUncompressedData uint64, q MemoryQuotaProvider) (onHeapTableIndex, error) {
|
||||
tuples := indexBuff[:prefixTupleSize*chunkCount]
|
||||
lengths := indexBuff[prefixTupleSize*chunkCount : prefixTupleSize*chunkCount+lengthSize*chunkCount]
|
||||
suffixes := indexBuff[prefixTupleSize*chunkCount+lengthSize*chunkCount:]
|
||||
|
||||
chunks2 := chunkCount / 2
|
||||
|
||||
lR := bytes.NewReader(lengths)
|
||||
offsets := make([]byte, chunkCount*offsetSize)
|
||||
_, err := io.ReadFull(NewOffsetsReader(lR), offsets)
|
||||
r := NewOffsetsReader(lR)
|
||||
_, err := io.ReadFull(r, offsetsBuff1)
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
/**
|
||||
TODO: Optimize memory usage further
|
||||
There's wasted space here. The lengths segment in the buffer is retained unnecessarily. We can use that space to
|
||||
store half the offsets and then allocate an additional len(lengths) to store the rest.
|
||||
*/
|
||||
|
||||
var offsetsBuff2 []byte
|
||||
if chunks2 > 0 {
|
||||
offsetsBuff2 = lengths[:chunks2*offsetSize]
|
||||
_, err = io.ReadFull(r, offsetsBuff2)
|
||||
if err != nil {
|
||||
return onHeapTableIndex{}, err
|
||||
}
|
||||
}
|
||||
|
||||
refCnt := new(int32)
|
||||
*refCnt = 1
|
||||
@@ -179,7 +236,8 @@ func NewOnHeapTableIndex(b []byte, chunkCount uint32, totalUncompressedData uint
|
||||
refCnt: refCnt,
|
||||
q: q,
|
||||
tupleB: tuples,
|
||||
offsetB: offsets,
|
||||
offsetB1: offsetsBuff1,
|
||||
offsetB2: offsetsBuff2,
|
||||
suffixB: suffixes,
|
||||
chunkCount: chunkCount,
|
||||
totalUncompressedData: totalUncompressedData,
|
||||
@@ -190,6 +248,10 @@ func (ti onHeapTableIndex) ChunkCount() uint32 {
|
||||
return ti.chunkCount
|
||||
}
|
||||
|
||||
func (ti onHeapTableIndex) PrefixAt(idx uint32) uint64 {
|
||||
return ti.prefixAt(idx)
|
||||
}
|
||||
|
||||
func (ti onHeapTableIndex) EntrySuffixMatches(idx uint32, h *addr) (bool, error) {
|
||||
ord := ti.ordinalAt(idx)
|
||||
o := ord * addrSuffixSize
|
||||
@@ -295,9 +357,18 @@ func (ti onHeapTableIndex) ordinalAt(idx uint32) uint32 {
|
||||
return binary.BigEndian.Uint32(b)
|
||||
}
|
||||
|
||||
// the first n - n/2 offsets are stored in offsetsB1 and the rest in offsetsB2
|
||||
func (ti onHeapTableIndex) offsetAt(ord uint32) uint64 {
|
||||
off := int64(offsetSize * ord)
|
||||
b := ti.offsetB[off : off+offsetSize]
|
||||
chunks1 := ti.chunkCount - ti.chunkCount/2
|
||||
var b []byte
|
||||
if ord < chunks1 {
|
||||
off := int64(offsetSize * ord)
|
||||
b = ti.offsetB1[off : off+offsetSize]
|
||||
} else {
|
||||
off := int64(offsetSize * (ord - chunks1))
|
||||
b = ti.offsetB2[off : off+offsetSize]
|
||||
}
|
||||
|
||||
return binary.BigEndian.Uint64(b)
|
||||
}
|
||||
|
||||
@@ -463,11 +534,17 @@ func (ti onHeapTableIndex) TotalUncompressedData() uint64 {
|
||||
func (ti onHeapTableIndex) Close() error {
|
||||
cnt := atomic.AddInt32(ti.refCnt, -1)
|
||||
if cnt == 0 {
|
||||
return ti.q.ReleaseQuota(memSize(ti.chunkCount))
|
||||
ti.tupleB = nil
|
||||
ti.offsetB1 = nil
|
||||
ti.offsetB2 = nil
|
||||
ti.suffixB = nil
|
||||
|
||||
return ti.q.ReleaseQuota(indexMemSize(ti.chunkCount))
|
||||
}
|
||||
if cnt < 0 {
|
||||
panic("Close() called and reduced ref count to < 0.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -479,188 +556,138 @@ func (ti onHeapTableIndex) Clone() (tableIndex, error) {
|
||||
return ti, nil
|
||||
}
|
||||
|
||||
// mmap table index
|
||||
|
||||
type mmapIndexEntry []byte
|
||||
|
||||
const mmapIndexEntryOffsetStart = addrSuffixSize
|
||||
const mmapIndexEntryLengthStart = addrSuffixSize + uint64Size
|
||||
|
||||
func (e mmapIndexEntry) suffix() []byte {
|
||||
return e[:addrSuffixSize]
|
||||
}
|
||||
|
||||
func (e mmapIndexEntry) Offset() uint64 {
|
||||
return binary.BigEndian.Uint64(e[mmapIndexEntryOffsetStart:])
|
||||
}
|
||||
|
||||
func (e mmapIndexEntry) Length() uint32 {
|
||||
return binary.BigEndian.Uint32(e[mmapIndexEntryLengthStart:])
|
||||
}
|
||||
|
||||
func mmapOffheapSize(chunks int) int {
|
||||
pageSize := 4096
|
||||
esz := addrSuffixSize + uint64Size + lengthSize
|
||||
min := esz * chunks
|
||||
if min%pageSize == 0 {
|
||||
return min
|
||||
} else {
|
||||
return (min/pageSize + 1) * pageSize
|
||||
}
|
||||
}
|
||||
|
||||
// An mmapIndexEntry is an addrSuffix, a BigEndian uint64 for the offset and a
|
||||
// BigEnding uint32 for the chunk size.
|
||||
const mmapIndexEntrySize = addrSuffixSize + uint64Size + lengthSize
|
||||
|
||||
type mmapOrdinal struct {
|
||||
idx int
|
||||
offset uint64
|
||||
}
|
||||
type mmapOrdinalSlice []mmapOrdinal
|
||||
|
||||
func (s mmapOrdinalSlice) Len() int { return len(s) }
|
||||
func (s mmapOrdinalSlice) Less(i, j int) bool { return s[i].offset < s[j].offset }
|
||||
func (s mmapOrdinalSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// mmapTableIndex is an onHeapTableIndex but creates all of its slice buffers
|
||||
// from mmap. It overrides Clone and Close of mmapTableIndex so that it can
|
||||
// count references and release mmapped regions appropriately.
|
||||
type mmapTableIndex struct {
|
||||
chunkCount uint32
|
||||
totalUncompressedData uint64
|
||||
fileSz uint64
|
||||
prefixes []uint64
|
||||
data mmap.MMap
|
||||
refCnt *int32
|
||||
onHeapTableIndex
|
||||
refCnt *int32
|
||||
q MemoryQuotaProvider
|
||||
mmapped mmapWStat
|
||||
indexDataBuff []byte
|
||||
offset1DataBuff []byte
|
||||
}
|
||||
|
||||
func newMmapTableIndex(ti onHeapTableIndex, f *os.File) (mmapTableIndex, error) {
|
||||
flags := 0
|
||||
if f == nil {
|
||||
flags = mmap.ANON
|
||||
}
|
||||
arr, err := mmap.MapRegion(f, mmapOffheapSize(int(ti.chunkCount)), mmap.RDWR, flags, 0)
|
||||
if err != nil {
|
||||
return mmapTableIndex{}, err
|
||||
}
|
||||
var a addr
|
||||
for i := uint32(0); i < ti.chunkCount; i++ {
|
||||
idx := i * mmapIndexEntrySize
|
||||
si := addrSuffixSize * ti.ordinalAt(i)
|
||||
copy(arr[idx:], ti.suffixB[si:si+addrSuffixSize])
|
||||
// newMmapTableIndex mmaps a region of memory large enough to store a fully
|
||||
// parsed onHeapTableIndex. After creating the mmapTableIndex, index data should
|
||||
// be loaded into |indexDataBuff| and then parsed with parseIndexBuffer.
|
||||
func newMmapTableIndex(chunkCount uint32) (*mmapTableIndex, error) {
|
||||
indexSize := int(indexSize(chunkCount) + footerSize)
|
||||
|
||||
e, err := ti.IndexEntry(i, &a)
|
||||
if err != nil {
|
||||
return mmapTableIndex{}, err
|
||||
}
|
||||
binary.BigEndian.PutUint64(arr[idx+mmapIndexEntryOffsetStart:], e.Offset())
|
||||
binary.BigEndian.PutUint32(arr[idx+mmapIndexEntryLengthStart:], e.Length())
|
||||
chunks2 := chunkCount / 2
|
||||
chunks1 := chunkCount - chunks2
|
||||
offsets1Size := int(chunks1 * offsetSize)
|
||||
|
||||
mmapped, err := mmapWithStats(nil, indexSize+offsets1Size, mmap.RDWR, mmap.ANON, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexBytesBuff := mmapped.m[:indexSize]
|
||||
offsets1Buff := mmapped.m[indexSize : indexSize+offsets1Size]
|
||||
|
||||
refCnt := new(int32)
|
||||
*refCnt = 1
|
||||
p, err := ti.Prefixes()
|
||||
if err != nil {
|
||||
return mmapTableIndex{}, err
|
||||
|
||||
return &mmapTableIndex{
|
||||
refCnt: refCnt,
|
||||
mmapped: mmapped,
|
||||
indexDataBuff: indexBytesBuff,
|
||||
offset1DataBuff: offsets1Buff}, nil
|
||||
}
|
||||
|
||||
func (ti *mmapTableIndex) Clone() (tableIndex, error) {
|
||||
cnt := atomic.AddInt32(ti.refCnt, 1)
|
||||
if cnt == 1 {
|
||||
panic("Clone() called after last Close(). This index is no longer valid.")
|
||||
}
|
||||
return mmapTableIndex{
|
||||
ti.chunkCount,
|
||||
ti.totalUncompressedData,
|
||||
ti.TableFileSize(),
|
||||
p,
|
||||
arr,
|
||||
refCnt,
|
||||
}, nil
|
||||
return ti, nil
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) ChunkCount() uint32 {
|
||||
return i.chunkCount
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) EntrySuffixMatches(idx uint32, h *addr) (bool, error) {
|
||||
mi := idx * mmapIndexEntrySize
|
||||
e := mmapIndexEntry(i.data[mi : mi+mmapIndexEntrySize])
|
||||
return bytes.Equal(e.suffix(), h[addrPrefixSize:]), nil
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) IndexEntry(idx uint32, a *addr) (indexEntry, error) {
|
||||
mi := idx * mmapIndexEntrySize
|
||||
e := mmapIndexEntry(i.data[mi : mi+mmapIndexEntrySize])
|
||||
if a != nil {
|
||||
binary.BigEndian.PutUint64(a[:], i.prefixes[idx])
|
||||
copy(a[addrPrefixSize:], e.suffix())
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) Lookup(h *addr) (indexEntry, bool, error) {
|
||||
prefix := binary.BigEndian.Uint64(h[:])
|
||||
for idx := i.prefixIdx(prefix); idx < i.chunkCount && i.prefixes[idx] == prefix; idx++ {
|
||||
mi := idx * mmapIndexEntrySize
|
||||
e := mmapIndexEntry(i.data[mi : mi+mmapIndexEntrySize])
|
||||
if bytes.Equal(e.suffix(), h[addrPrefixSize:]) {
|
||||
return e, true, nil
|
||||
}
|
||||
}
|
||||
return mmapIndexEntry{}, false, nil
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) Ordinals() ([]uint32, error) {
|
||||
s := mmapOrdinalSlice(make([]mmapOrdinal, i.chunkCount))
|
||||
for idx := 0; uint32(idx) < i.chunkCount; idx++ {
|
||||
mi := idx * mmapIndexEntrySize
|
||||
e := mmapIndexEntry(i.data[mi : mi+mmapIndexEntrySize])
|
||||
s[idx] = mmapOrdinal{idx, e.Offset()}
|
||||
}
|
||||
sort.Sort(s)
|
||||
res := make([]uint32, i.chunkCount)
|
||||
for j, r := range s {
|
||||
res[r.idx] = uint32(j)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) Prefixes() ([]uint64, error) {
|
||||
return i.prefixes, nil
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) TableFileSize() uint64 {
|
||||
return i.fileSz
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) TotalUncompressedData() uint64 {
|
||||
return i.totalUncompressedData
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) Close() error {
|
||||
cnt := atomic.AddInt32(i.refCnt, -1)
|
||||
// Close closes the underlying onHeapTableIndex and then unmaps the memory
|
||||
// region.
|
||||
func (ti *mmapTableIndex) Close() error {
|
||||
cnt := atomic.AddInt32(ti.refCnt, -1)
|
||||
if cnt == 0 {
|
||||
return i.data.Unmap()
|
||||
chunkCount := ti.chunkCount
|
||||
// mmapTableIndex sets the quota provider for onHeapTableIndex to a
|
||||
// noopQuotaProvider, so that we can release quota after the memory region
|
||||
// is unmapped.
|
||||
err := ti.onHeapTableIndex.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ti.indexDataBuff = nil
|
||||
ti.offset1DataBuff = nil
|
||||
err = ti.mmapped.Unmap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ti.q.ReleaseQuota(indexMemSize(chunkCount))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cnt < 0 {
|
||||
panic("Close() called and reduced ref count to < 0.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) Clone() (tableIndex, error) {
|
||||
cnt := atomic.AddInt32(i.refCnt, 1)
|
||||
if cnt == 1 {
|
||||
panic("Clone() called after last Close(). This index is no longer valid.")
|
||||
}
|
||||
return i, nil
|
||||
func (ti *mmapTableIndex) parseIndexBuffer(q MemoryQuotaProvider) (err error) {
|
||||
ti.onHeapTableIndex, err = parseTableIndexWithOffsetBuff(ti.indexDataBuff, ti.offset1DataBuff, &noopQuotaProvider{})
|
||||
ti.q = q
|
||||
return err
|
||||
}
|
||||
|
||||
func (i mmapTableIndex) prefixIdx(prefix uint64) (idx uint32) {
|
||||
// NOTE: The golang impl of sort.Search is basically inlined here. This method can be called in
|
||||
// an extremely tight loop and inlining the code was a significant perf improvement.
|
||||
idx, j := 0, i.chunkCount
|
||||
for idx < j {
|
||||
h := idx + (j-idx)/2 // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if i.prefixes[h] < prefix {
|
||||
idx = h + 1 // preserves f(i-1) == false
|
||||
} else {
|
||||
j = h // preserves f(j) == true
|
||||
}
|
||||
}
|
||||
return
|
||||
type notifyFunc func(n uint64, total uint64)
|
||||
|
||||
var noOpNotify = func(uint64, uint64) {}
|
||||
|
||||
type mmapStats struct {
|
||||
mu sync.Mutex
|
||||
totalUsed uint64
|
||||
WillMmap notifyFunc
|
||||
Mmapped notifyFunc
|
||||
UnMapped notifyFunc
|
||||
}
|
||||
|
||||
var GlobalMmapStats = &mmapStats{
|
||||
sync.Mutex{},
|
||||
0,
|
||||
noOpNotify,
|
||||
noOpNotify,
|
||||
noOpNotify,
|
||||
}
|
||||
|
||||
type mmapWStat struct {
|
||||
m mmap.MMap
|
||||
used uint64
|
||||
}
|
||||
|
||||
func mmapWithStats(f *os.File, length int, prot, flags int, offset int64) (mmapWStat, error) {
|
||||
GlobalMmapStats.mu.Lock()
|
||||
defer GlobalMmapStats.mu.Unlock()
|
||||
GlobalMmapStats.WillMmap(uint64(length), GlobalMmapStats.totalUsed)
|
||||
mmap, err := mmap.MapRegion(f, length, prot, flags, offset)
|
||||
if err != nil {
|
||||
return mmapWStat{}, err
|
||||
}
|
||||
GlobalMmapStats.totalUsed += uint64(length)
|
||||
GlobalMmapStats.Mmapped(uint64(length), GlobalMmapStats.totalUsed)
|
||||
return mmapWStat{mmap, uint64(length)}, nil
|
||||
}
|
||||
|
||||
func (m mmapWStat) Unmap() error {
|
||||
GlobalMmapStats.mu.Lock()
|
||||
defer GlobalMmapStats.mu.Unlock()
|
||||
err := m.m.Unmap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
GlobalMmapStats.totalUsed -= m.used
|
||||
GlobalMmapStats.UnMapped(m.used, GlobalMmapStats.totalUsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -59,9 +59,13 @@ func TestMMapIndex(t *testing.T) {
|
||||
idx, err := parseTableIndexByCopy(bs, &noopQuotaProvider{})
|
||||
require.NoError(t, err)
|
||||
defer idx.Close()
|
||||
mmidx, err := newMmapTableIndex(idx, nil)
|
||||
mmidx, err := newMmapTableIndex(idx.chunkCount)
|
||||
require.NoError(t, err)
|
||||
copy(mmidx.indexDataBuff, bs)
|
||||
err = mmidx.parseIndexBuffer(&noopQuotaProvider{})
|
||||
require.NoError(t, err)
|
||||
defer mmidx.Close()
|
||||
|
||||
assert.Equal(t, idx.ChunkCount(), mmidx.ChunkCount())
|
||||
seen := make(map[addr]bool)
|
||||
for i := uint32(0); i < idx.ChunkCount(); i++ {
|
||||
|
||||
@@ -132,7 +132,6 @@ type tableReaderAt interface {
|
||||
// more chunks together into a single read request to backing storage.
|
||||
type tableReader struct {
|
||||
tableIndex
|
||||
prefixes []uint64
|
||||
chunkCount uint32
|
||||
totalUncompressedData uint64
|
||||
r tableReaderAt
|
||||
@@ -143,13 +142,8 @@ type tableReader struct {
|
||||
// and footer, though it may contain an unspecified number of bytes before that data. r should allow
|
||||
// retrieving any desired range of bytes from the table.
|
||||
func newTableReader(index tableIndex, r tableReaderAt, blockSize uint64) (tableReader, error) {
|
||||
p, err := index.Prefixes()
|
||||
if err != nil {
|
||||
return tableReader{}, err
|
||||
}
|
||||
return tableReader{
|
||||
index,
|
||||
p,
|
||||
index.ChunkCount(),
|
||||
index.TotalUncompressedData(),
|
||||
r,
|
||||
@@ -157,6 +151,10 @@ func newTableReader(index tableIndex, r tableReaderAt, blockSize uint64) (tableR
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tr tableReader) prefixAt(idx uint32) uint64 {
|
||||
return tr.tableIndex.PrefixAt(idx)
|
||||
}
|
||||
|
||||
// Scan across (logically) two ordered slices of address prefixes.
|
||||
func (tr tableReader) hasMany(addrs []hasRecord) (bool, error) {
|
||||
// TODO: Use findInIndex if (tr.chunkCount - len(addrs)*Log2(tr.chunkCount)) > (tr.chunkCount - len(addrs))
|
||||
@@ -170,7 +168,7 @@ func (tr tableReader) hasMany(addrs []hasRecord) (bool, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
for filterIdx < filterLen && addr.prefix > tr.prefixes[filterIdx] {
|
||||
for filterIdx < filterLen && addr.prefix > tr.prefixAt(filterIdx) {
|
||||
filterIdx++
|
||||
}
|
||||
|
||||
@@ -178,13 +176,13 @@ func (tr tableReader) hasMany(addrs []hasRecord) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if addr.prefix != tr.prefixes[filterIdx] {
|
||||
if addr.prefix != tr.prefixAt(filterIdx) {
|
||||
remaining = true
|
||||
continue
|
||||
}
|
||||
|
||||
// prefixes are equal, so locate and compare against the corresponding suffix
|
||||
for j := filterIdx; j < filterLen && addr.prefix == tr.prefixes[j]; j++ {
|
||||
for j := filterIdx; j < filterLen && addr.prefix == tr.prefixAt(j); j++ {
|
||||
m, err := tr.EntrySuffixMatches(j, addr.a)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -483,7 +481,7 @@ func (tr tableReader) getManyAtOffsetsWithReadFunc(
|
||||
// order.
|
||||
func (tr tableReader) findOffsets(reqs []getRecord) (ors offsetRecSlice, remaining bool, err error) {
|
||||
filterIdx := uint32(0)
|
||||
filterLen := uint32(len(tr.prefixes))
|
||||
filterLen := tr.chunkCount
|
||||
ors = make(offsetRecSlice, 0, len(reqs))
|
||||
|
||||
// Iterate over |reqs| and |tr.prefixes| (both sorted by address) and build the set
|
||||
@@ -494,7 +492,7 @@ func (tr tableReader) findOffsets(reqs []getRecord) (ors offsetRecSlice, remaini
|
||||
}
|
||||
|
||||
// advance within the prefixes until we reach one which is >= req.prefix
|
||||
for filterIdx < filterLen && tr.prefixes[filterIdx] < req.prefix {
|
||||
for filterIdx < filterLen && tr.prefixAt(filterIdx) < req.prefix {
|
||||
filterIdx++
|
||||
}
|
||||
|
||||
@@ -503,13 +501,13 @@ func (tr tableReader) findOffsets(reqs []getRecord) (ors offsetRecSlice, remaini
|
||||
break
|
||||
}
|
||||
|
||||
if req.prefix != tr.prefixes[filterIdx] {
|
||||
if req.prefix != tr.prefixAt(filterIdx) {
|
||||
remaining = true
|
||||
continue
|
||||
}
|
||||
|
||||
// record all offsets within the table which contain the data required.
|
||||
for j := filterIdx; j < filterLen && req.prefix == tr.prefixes[j]; j++ {
|
||||
for j := filterIdx; j < filterLen && req.prefix == tr.prefixAt(j); j++ {
|
||||
m, err := tr.EntrySuffixMatches(j, req.a)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@@ -660,7 +658,7 @@ func (tr tableReader) Clone() (tableReader, error) {
|
||||
if err != nil {
|
||||
return tableReader{}, err
|
||||
}
|
||||
return tableReader{ti, tr.prefixes, tr.chunkCount, tr.totalUncompressedData, tr.r, tr.blockSize}, nil
|
||||
return tableReader{ti, tr.chunkCount, tr.totalUncompressedData, tr.r, tr.blockSize}, nil
|
||||
}
|
||||
|
||||
type readerAdapter struct {
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/atomicerr"
|
||||
"github.com/dolthub/dolt/go/store/chunks"
|
||||
)
|
||||
|
||||
@@ -424,11 +423,10 @@ func (ts tableSet) Flatten(ctx context.Context) (tableSet, error) {
|
||||
// those specified by |specs|.
|
||||
func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) {
|
||||
merged := tableSet{
|
||||
novel: make(chunkSources, 0, len(ts.novel)),
|
||||
upstream: make(chunkSources, 0, len(specs)),
|
||||
p: ts.p,
|
||||
q: ts.q,
|
||||
rl: ts.rl,
|
||||
novel: make(chunkSources, 0, len(ts.novel)),
|
||||
p: ts.p,
|
||||
q: ts.q,
|
||||
rl: ts.rl,
|
||||
}
|
||||
|
||||
// Rebase the novel tables, skipping those that are actually empty (usually due to de-duping during table compaction)
|
||||
@@ -458,54 +456,80 @@ func (ts tableSet) Rebase(ctx context.Context, specs []tableSpec, stats *Stats)
|
||||
}
|
||||
}
|
||||
|
||||
// Open all the new upstream tables concurrently
|
||||
var rp atomic.Value
|
||||
ae := atomicerr.New()
|
||||
merged.upstream = make(chunkSources, len(tablesToOpen))
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(len(tablesToOpen))
|
||||
for i, spec := range tablesToOpen {
|
||||
go func(idx int, spec tableSpec) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
rp.Store(r)
|
||||
}
|
||||
}()
|
||||
if !ae.IsSet() {
|
||||
for _, existing := range ts.upstream {
|
||||
h, err := existing.hash()
|
||||
if err != nil {
|
||||
ae.SetIfError(err)
|
||||
return
|
||||
}
|
||||
if spec.name == h {
|
||||
c, err := existing.Clone()
|
||||
if err != nil {
|
||||
ae.SetIfError(err)
|
||||
return
|
||||
}
|
||||
merged.upstream[idx] = c
|
||||
return
|
||||
}
|
||||
}
|
||||
err := ts.q.AcquireQuota(ctx, spec.GetMemorySize())
|
||||
if err != nil {
|
||||
ae.SetIfError(err)
|
||||
return
|
||||
}
|
||||
merged.upstream[idx], err = ts.p.Open(ctx, spec.name, spec.chunkCount, stats)
|
||||
ae.SetIfError(err)
|
||||
merged.upstream = make([]chunkSource, len(tablesToOpen))
|
||||
|
||||
type openOp struct {
|
||||
idx int
|
||||
spec tableSpec
|
||||
}
|
||||
var openOps []openOp
|
||||
var memoryNeeded uint64
|
||||
|
||||
// Clone tables that we have already opened
|
||||
OUTER:
|
||||
for idx, spec := range tablesToOpen {
|
||||
for _, existing := range ts.upstream {
|
||||
h, err := existing.hash()
|
||||
if err != nil {
|
||||
return tableSet{}, err
|
||||
}
|
||||
}(i, spec)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if r := rp.Load(); r != nil {
|
||||
panic(r)
|
||||
if spec.name == h {
|
||||
c, err := existing.Clone()
|
||||
if err != nil {
|
||||
return tableSet{}, err
|
||||
}
|
||||
merged.upstream[idx] = c
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
openOps = append(openOps, openOp{idx, spec})
|
||||
memoryNeeded += indexMemSize(spec.chunkCount)
|
||||
}
|
||||
|
||||
if err := ae.Get(); err != nil {
|
||||
err := ts.q.AcquireQuota(ctx, memoryNeeded)
|
||||
if err != nil {
|
||||
return tableSet{}, err
|
||||
}
|
||||
|
||||
var rp atomic.Value
|
||||
group, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
mu := sync.Mutex{}
|
||||
var opened []chunkSource
|
||||
|
||||
for _, op := range openOps {
|
||||
idx, spec := op.idx, op.spec
|
||||
group.Go(
|
||||
func() (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
rp.Store(r)
|
||||
err = errors.New("panicked")
|
||||
}
|
||||
}()
|
||||
cs, err := ts.p.Open(ctx, spec.name, spec.chunkCount, stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
merged.upstream[idx] = cs
|
||||
mu.Lock()
|
||||
opened = append(opened, cs)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
err = group.Wait()
|
||||
if err != nil {
|
||||
// Close any opened chunkSources
|
||||
for _, cs := range opened {
|
||||
_ = cs.Close()
|
||||
}
|
||||
|
||||
if r := rp.Load(); r != nil {
|
||||
panic(r)
|
||||
}
|
||||
return tableSet{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ package nbs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -144,6 +145,15 @@ func TestTableSetExtract(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func persist(t *testing.T, p tablePersister, chunks ...[]byte) {
|
||||
for _, c := range chunks {
|
||||
mt := newMemTable(testMemTableSize)
|
||||
mt.addChunk(computeAddr(c), c)
|
||||
_, err := p.Persist(context.Background(), mt, nil, &Stats{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTableSetRebase(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
q := NewUnlimitedMemQuotaProvider()
|
||||
@@ -208,3 +218,34 @@ func TestTableSetPhysicalLen(t *testing.T) {
|
||||
|
||||
assert.True(mustUint64(ts.physicalLen()) > indexSize(mustUint32(ts.count())))
|
||||
}
|
||||
|
||||
func TestTableSetClosesOpenedChunkSourcesOnErr(t *testing.T) {
|
||||
q := NewUnlimitedMemQuotaProvider()
|
||||
p := newFakeTablePersister(q)
|
||||
persist(t, p, testChunks...)
|
||||
|
||||
var mem uint64 = 0
|
||||
var sources []addr
|
||||
for addr := range p.sources {
|
||||
sources = append(sources, addr)
|
||||
mem += indexMemSize(1)
|
||||
}
|
||||
|
||||
idx := rand.Intn(len(testChunks))
|
||||
addrToFail := sources[idx]
|
||||
p.sourcesToFail[addrToFail] = true
|
||||
|
||||
var specs []tableSpec
|
||||
for _, addr := range sources {
|
||||
specs = append(specs, tableSpec{addr, 1})
|
||||
}
|
||||
|
||||
ts := tableSet{p: p, q: q, rl: make(chan struct{}, 1)}
|
||||
_, err := ts.Rebase(context.Background(), specs, &Stats{})
|
||||
require.Error(t, err)
|
||||
|
||||
for _ = range p.opened {
|
||||
mem -= indexMemSize(1)
|
||||
}
|
||||
require.EqualValues(t, mem, q.Usage())
|
||||
}
|
||||
|
||||
@@ -64,10 +64,6 @@ func maxTableSize(numChunks, totalData uint64) uint64 {
|
||||
return numChunks*(prefixTupleSize+lengthSize+addrSuffixSize+checksumSize+uint64(maxSnappySize)) + footerSize
|
||||
}
|
||||
|
||||
func memSize(numChunks uint32) uint64 {
|
||||
return indexSize(numChunks) + footerSize
|
||||
}
|
||||
|
||||
func indexSize(numChunks uint32) uint64 {
|
||||
return uint64(numChunks) * (addrSuffixSize + lengthSize + prefixTupleSize)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
package prolly
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
fb "github.com/google/flatbuffers/go"
|
||||
@@ -41,7 +43,7 @@ type Node struct {
|
||||
count uint16
|
||||
}
|
||||
|
||||
func mapNodeFromBytes(bb []byte) Node {
|
||||
func MapNodeFromBytes(bb []byte) Node {
|
||||
buf := serial.GetRootAsTupleMap(bb, 0)
|
||||
return mapNodeFromFlatbuffer(*buf)
|
||||
}
|
||||
@@ -73,10 +75,12 @@ func (nd Node) hashOf() hash.Hash {
|
||||
return hash.Of(nd.bytes())
|
||||
}
|
||||
|
||||
// getKey returns the |ith| key of this node
|
||||
func (nd Node) getKey(i int) nodeItem {
|
||||
return nd.keys.GetSlice(i)
|
||||
}
|
||||
|
||||
// getValue returns the |ith| value of this node. Only valid for leaf nodes.
|
||||
func (nd Node) getValue(i int) nodeItem {
|
||||
if nd.leafNode() {
|
||||
return nd.values.GetSlice(i)
|
||||
@@ -86,6 +90,12 @@ func (nd Node) getValue(i int) nodeItem {
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the number of keys in this node
|
||||
func (nd Node) size() int {
|
||||
return nd.keys.Len()
|
||||
}
|
||||
|
||||
// getRef returns the |ith| ref in this node. Only valid for internal nodes.
|
||||
func (nd Node) getRef(i int) hash.Hash {
|
||||
refs := nd.buf.RefArrayBytes()
|
||||
start, stop := i*refSize, (i+1)*refSize
|
||||
@@ -101,10 +111,12 @@ func (nd Node) getSubtreeCounts() subtreeCounts {
|
||||
return readSubtreeCounts(int(nd.count), buf)
|
||||
}
|
||||
|
||||
// level returns the tree level for this node
|
||||
func (nd Node) level() int {
|
||||
return int(nd.buf.TreeLevel())
|
||||
}
|
||||
|
||||
// leafNode returns whether this node is a leaf
|
||||
func (nd Node) leafNode() bool {
|
||||
return nd.level() == 0
|
||||
}
|
||||
@@ -136,3 +148,47 @@ func getValueOffsetsVector(buf serial.TupleMap) []byte {
|
||||
|
||||
return tab.Bytes[start:stop]
|
||||
}
|
||||
|
||||
// OutputProllyNode writes the node given to the writer given in a semi-human-readable format, where values are still
|
||||
// displayed in hex-encoded byte strings, but are delineated into their fields. All nodes have keys displayed in this
|
||||
// manner. Interior nodes have their child hash references spelled out, leaf nodes have value tuples delineated like
|
||||
// the keys
|
||||
func OutputProllyNode(w io.Writer, node Node) error {
|
||||
w.Write([]byte("["))
|
||||
for i := 0; i < node.size(); i++ {
|
||||
k := node.getKey(i)
|
||||
kt := val.Tuple(k)
|
||||
|
||||
w.Write([]byte("\n { key: "))
|
||||
for j := 0; j < kt.Count(); j++ {
|
||||
if j > 0 {
|
||||
w.Write([]byte(", "))
|
||||
}
|
||||
w.Write([]byte(hex.EncodeToString(kt.GetField(j))))
|
||||
}
|
||||
|
||||
if node.leafNode() {
|
||||
v := node.getValue(i)
|
||||
vt := val.Tuple(v)
|
||||
|
||||
w.Write([]byte(" value: "))
|
||||
for j := 0; j < vt.Count(); j++ {
|
||||
if j > 0 {
|
||||
w.Write([]byte(", "))
|
||||
}
|
||||
w.Write([]byte(hex.EncodeToString(vt.GetField(j))))
|
||||
}
|
||||
|
||||
w.Write([]byte(" }"))
|
||||
} else {
|
||||
ref := node.getRef(i)
|
||||
|
||||
w.Write([]byte(" ref: #"))
|
||||
w.Write([]byte(ref.String()))
|
||||
w.Write([]byte(" }"))
|
||||
}
|
||||
}
|
||||
|
||||
w.Write([]byte("\n]\n"))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ func (nb *nodeBuilder) build(pool pool.BuffPool) (node Node) {
|
||||
b.Finish(serial.TupleMapEnd(b))
|
||||
|
||||
buf := b.FinishedBytes()
|
||||
return mapNodeFromBytes(buf)
|
||||
return MapNodeFromBytes(buf)
|
||||
}
|
||||
|
||||
func newSubtreeCounts(count int) subtreeCounts {
|
||||
|
||||
@@ -70,7 +70,7 @@ func NewNodeStore(cs chunks.ChunkStore) NodeStore {
|
||||
func (ns nodeStore) Read(ctx context.Context, ref hash.Hash) (Node, error) {
|
||||
c, ok := ns.cache.get(ref)
|
||||
if ok {
|
||||
return mapNodeFromBytes(c.Data()), nil
|
||||
return MapNodeFromBytes(c.Data()), nil
|
||||
}
|
||||
|
||||
c, err := ns.store.Get(ctx, ref)
|
||||
@@ -81,7 +81,7 @@ func (ns nodeStore) Read(ctx context.Context, ref hash.Hash) (Node, error) {
|
||||
|
||||
ns.cache.insert(c)
|
||||
|
||||
return mapNodeFromBytes(c.Data()), err
|
||||
return MapNodeFromBytes(c.Data()), err
|
||||
}
|
||||
|
||||
// Write implements NodeStore.
|
||||
|
||||
@@ -27,11 +27,11 @@ import (
|
||||
)
|
||||
|
||||
func NodeFromValue(v types.Value) Node {
|
||||
return mapNodeFromBytes(v.(types.InlineBlob))
|
||||
return MapNodeFromBytes(v.(types.TupleRowStorage))
|
||||
}
|
||||
|
||||
func ValueFromMap(m Map) types.Value {
|
||||
return types.InlineBlob(m.root.bytes())
|
||||
return types.TupleRowStorage(m.root.bytes())
|
||||
}
|
||||
|
||||
func MapFromValue(v types.Value, sch schema.Schema, vrw types.ValueReadWriter) Map {
|
||||
|
||||
@@ -27,8 +27,6 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/d"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/datas"
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
@@ -103,11 +101,13 @@ func NewAbsolutePath(str string) (AbsolutePath, error) {
|
||||
}
|
||||
|
||||
// Resolve returns the Value reachable by 'p' in 'db'.
|
||||
func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database, vrw types.ValueReadWriter) (val types.Value) {
|
||||
func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database, vrw types.ValueReadWriter) (val types.Value, err error) {
|
||||
if len(p.Dataset) > 0 {
|
||||
var ok bool
|
||||
ds, err := db.GetDataset(ctx, p.Dataset)
|
||||
d.PanicIfError(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if val, ok = ds.MaybeHead(); !ok {
|
||||
val = nil
|
||||
@@ -115,7 +115,9 @@ func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database, vrw types.
|
||||
} else if !p.Hash.IsEmpty() {
|
||||
var err error
|
||||
val, err = vrw.ReadValue(ctx, p.Hash)
|
||||
d.PanicIfError(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
panic("Unreachable")
|
||||
}
|
||||
@@ -123,7 +125,9 @@ func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database, vrw types.
|
||||
if val != nil && p.Path != nil {
|
||||
var err error
|
||||
val, err = p.Path.Resolve(ctx, val, vrw)
|
||||
d.PanicIfError(err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -159,7 +163,11 @@ func ReadAbsolutePaths(ctx context.Context, db datas.Database, vrw types.ValueRe
|
||||
return nil, fmt.Errorf("invalid input path '%s'", ps)
|
||||
}
|
||||
|
||||
v := p.Resolve(ctx, db, vrw)
|
||||
v, err := p.Resolve(ctx, db, vrw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
return nil, fmt.Errorf("input path '%s' does not exist in database", ps)
|
||||
}
|
||||
|
||||
@@ -81,7 +81,8 @@ func TestAbsolutePaths(t *testing.T) {
|
||||
resolvesTo := func(exp types.Value, str string) {
|
||||
p, err := NewAbsolutePath(str)
|
||||
assert.NoError(err)
|
||||
act := p.Resolve(context.Background(), db, vs)
|
||||
act, err := p.Resolve(context.Background(), db, vs)
|
||||
assert.NoError(err)
|
||||
if exp == nil {
|
||||
assert.Nil(act)
|
||||
} else {
|
||||
|
||||
@@ -396,9 +396,12 @@ func (sp Spec) GetDataset(ctx context.Context) (ds datas.Dataset) {
|
||||
|
||||
// GetValue returns the Value at this Spec's Path within its Database, or nil
|
||||
// if this isn't a Path Spec or if that path isn't found.
|
||||
func (sp Spec) GetValue(ctx context.Context) (val types.Value) {
|
||||
func (sp Spec) GetValue(ctx context.Context) (val types.Value, err error) {
|
||||
if !sp.Path.IsEmpty() {
|
||||
val = sp.Path.Resolve(ctx, sp.GetDatabase(ctx), sp.GetVRW(ctx))
|
||||
val, err = sp.Path.Resolve(ctx, sp.GetDatabase(ctx), sp.GetVRW(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -131,7 +131,9 @@ func TestMemHashPathSpec(t *testing.T) {
|
||||
// assert.Nil(spec.GetValue())
|
||||
|
||||
spec.GetVRW(context.Background()).WriteValue(context.Background(), s)
|
||||
assert.Equal(s, spec.GetValue(context.Background()))
|
||||
value, err := spec.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(s, value)
|
||||
}
|
||||
|
||||
func TestMemDatasetPathSpec(t *testing.T) {
|
||||
@@ -153,7 +155,9 @@ func TestMemDatasetPathSpec(t *testing.T) {
|
||||
_, err = datas.CommitValue(context.Background(), db, ds, mustList(types.NewList(context.Background(), spec.GetVRW(context.Background()), types.Float(42))))
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(types.Float(42), spec.GetValue(context.Background()))
|
||||
value, err := spec.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), value)
|
||||
}
|
||||
|
||||
func TestNBSDatabaseSpec(t *testing.T) {
|
||||
@@ -428,15 +432,23 @@ func TestPinPathSpec(t *testing.T) {
|
||||
|
||||
assert.Equal(mustHash(head.Hash(types.Format_7_18)), pinned.Path.Hash)
|
||||
assert.Equal(fmt.Sprintf("mem::#%s.value", mustHash(head.Hash(types.Format_7_18)).String()), pinned.String())
|
||||
assert.Equal(types.Float(42), pinned.GetValue(context.Background()))
|
||||
assert.Equal(types.Float(42), unpinned.GetValue(context.Background()))
|
||||
pinnedValue, err := pinned.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), pinnedValue)
|
||||
unpinnedValue, err := unpinned.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), unpinnedValue)
|
||||
|
||||
ds, err = db.GetDataset(context.Background(), "foo")
|
||||
assert.NoError(err)
|
||||
_, err = datas.CommitValue(context.Background(), db, ds, types.Float(43))
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), pinned.GetValue(context.Background()))
|
||||
assert.Equal(types.Float(43), unpinned.GetValue(context.Background()))
|
||||
pinnedValue, err = pinned.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), pinnedValue)
|
||||
unpinnedValue, err = unpinned.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(43), unpinnedValue)
|
||||
}
|
||||
|
||||
func TestPinDatasetSpec(t *testing.T) {
|
||||
@@ -472,7 +484,9 @@ func TestPinDatasetSpec(t *testing.T) {
|
||||
|
||||
assert.Equal(mustHash(head.Hash(types.Format_7_18)), pinned.Path.Hash)
|
||||
assert.Equal(fmt.Sprintf("mem::#%s", mustHash(head.Hash(types.Format_7_18)).String()), pinned.String())
|
||||
assert.Equal(types.Float(42), commitValue(pinned.GetValue(context.Background())))
|
||||
pinnedValue, err := pinned.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), commitValue(pinnedValue))
|
||||
headVal, ok, err := unpinned.GetDataset(context.Background()).MaybeHeadValue()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
@@ -482,7 +496,9 @@ func TestPinDatasetSpec(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
_, err = datas.CommitValue(context.Background(), db, ds, types.Float(43))
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), commitValue(pinned.GetValue(context.Background())))
|
||||
pinnedValue, err = pinned.GetValue(context.Background())
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Float(42), commitValue(pinnedValue))
|
||||
headVal, ok, err = unpinned.GetDataset(context.Background()).MaybeHeadValue()
|
||||
assert.NoError(err)
|
||||
assert.True(ok)
|
||||
|
||||
@@ -62,6 +62,7 @@ const (
|
||||
PolygonKind
|
||||
|
||||
SerialMessageKind
|
||||
TupleRowStorageKind
|
||||
|
||||
UnknownKind NomsKind = 255
|
||||
)
|
||||
@@ -94,6 +95,7 @@ func init() {
|
||||
KindToType[LinestringKind] = Linestring{}
|
||||
KindToType[PolygonKind] = Polygon{}
|
||||
KindToType[SerialMessageKind] = SerialMessage{}
|
||||
KindToType[TupleRowStorageKind] = TupleRowStorage{}
|
||||
|
||||
SupportedKinds[BlobKind] = true
|
||||
SupportedKinds[BoolKind] = true
|
||||
@@ -122,39 +124,41 @@ func init() {
|
||||
SupportedKinds[LinestringKind] = true
|
||||
SupportedKinds[PolygonKind] = true
|
||||
SupportedKinds[SerialMessageKind] = true
|
||||
SupportedKinds[TupleRowStorageKind] = true
|
||||
}
|
||||
|
||||
var KindToTypeSlice []Value
|
||||
|
||||
var KindToString = map[NomsKind]string{
|
||||
UnknownKind: "unknown",
|
||||
BlobKind: "Blob",
|
||||
BoolKind: "Bool",
|
||||
CycleKind: "Cycle",
|
||||
ListKind: "List",
|
||||
MapKind: "Map",
|
||||
FloatKind: "Float",
|
||||
RefKind: "Ref",
|
||||
SetKind: "Set",
|
||||
StructKind: "Struct",
|
||||
StringKind: "String",
|
||||
TypeKind: "Type",
|
||||
UnionKind: "Union",
|
||||
ValueKind: "Value",
|
||||
UUIDKind: "UUID",
|
||||
IntKind: "Int",
|
||||
UintKind: "Uint",
|
||||
NullKind: "Null",
|
||||
TupleKind: "Tuple",
|
||||
InlineBlobKind: "InlineBlob",
|
||||
TimestampKind: "Timestamp",
|
||||
DecimalKind: "Decimal",
|
||||
JSONKind: "JSON",
|
||||
GeometryKind: "Geometry",
|
||||
PointKind: "Point",
|
||||
LinestringKind: "Linestring",
|
||||
PolygonKind: "Polygon",
|
||||
SerialMessageKind: "SerialMessage",
|
||||
UnknownKind: "unknown",
|
||||
BlobKind: "Blob",
|
||||
BoolKind: "Bool",
|
||||
CycleKind: "Cycle",
|
||||
ListKind: "List",
|
||||
MapKind: "Map",
|
||||
FloatKind: "Float",
|
||||
RefKind: "Ref",
|
||||
SetKind: "Set",
|
||||
StructKind: "Struct",
|
||||
StringKind: "String",
|
||||
TypeKind: "Type",
|
||||
UnionKind: "Union",
|
||||
ValueKind: "Value",
|
||||
UUIDKind: "UUID",
|
||||
IntKind: "Int",
|
||||
UintKind: "Uint",
|
||||
NullKind: "Null",
|
||||
TupleKind: "Tuple",
|
||||
InlineBlobKind: "InlineBlob",
|
||||
TimestampKind: "Timestamp",
|
||||
DecimalKind: "Decimal",
|
||||
JSONKind: "JSON",
|
||||
GeometryKind: "Geometry",
|
||||
PointKind: "Point",
|
||||
LinestringKind: "Linestring",
|
||||
PolygonKind: "Polygon",
|
||||
SerialMessageKind: "SerialMessage",
|
||||
TupleRowStorageKind: "TupleRowStorage",
|
||||
}
|
||||
|
||||
// String returns the name of the kind.
|
||||
|
||||
108
go/store/types/tuplerowstorage.go
Executable file
108
go/store/types/tuplerowstorage.go
Executable file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
)
|
||||
|
||||
// TupleRowStorage is a clone of InlineBlob. It only exists to be able to easily differentiate these two very different
|
||||
// use cases during the migration from the old storage format to the new one.
|
||||
type TupleRowStorage []byte
|
||||
|
||||
func (v TupleRowStorage) Value(ctx context.Context) (Value, error) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) Equals(other Value) bool {
|
||||
v2, ok := other.(TupleRowStorage)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(v, v2)
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
|
||||
if v2, ok := other.(TupleRowStorage); ok {
|
||||
return bytes.Compare(v, v2) == -1, nil
|
||||
}
|
||||
return TupleRowStorageKind < other.Kind(), nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) Hash(nbf *NomsBinFormat) (hash.Hash, error) {
|
||||
return getHash(v, nbf)
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) isPrimitive() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) WalkValues(ctx context.Context, cb ValueCallback) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) typeOf() (*Type, error) {
|
||||
return PrimitiveTypeMap[TupleRowStorageKind], nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) Kind() NomsKind {
|
||||
return TupleRowStorageKind
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) valueReadWriter() ValueReadWriter {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) writeTo(w nomsWriter, nbf *NomsBinFormat) error {
|
||||
byteLen := len(v)
|
||||
if byteLen > math.MaxUint16 {
|
||||
return fmt.Errorf("TupleRowStorage has length %v when max is %v", byteLen, math.MaxUint16)
|
||||
}
|
||||
|
||||
err := TupleRowStorageKind.writeTo(w, nbf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.writeUint16(uint16(byteLen))
|
||||
w.writeRaw(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) readFrom(nbf *NomsBinFormat, b *binaryNomsReader) (Value, error) {
|
||||
bytes := b.ReadInlineBlob()
|
||||
return TupleRowStorage(bytes), nil
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) skip(nbf *NomsBinFormat, b *binaryNomsReader) {
|
||||
size := uint32(b.readUint16())
|
||||
b.skipBytes(size)
|
||||
}
|
||||
|
||||
func (v TupleRowStorage) HumanReadableString() string {
|
||||
return strings.ToUpper(hex.EncodeToString(v))
|
||||
}
|
||||
@@ -36,6 +36,11 @@ func (sb SlicedBuffer) GetSlice(i int) []byte {
|
||||
return sb.Buf[start:stop]
|
||||
}
|
||||
|
||||
func (sb SlicedBuffer) Len() int {
|
||||
// offsets stored as uint16s with first offset omitted
|
||||
return len(sb.Offs)/2 + 1
|
||||
}
|
||||
|
||||
type offsets []byte
|
||||
|
||||
// offsetsSize returns the number of bytes needed to
|
||||
|
||||
@@ -216,8 +216,8 @@ var CopiedNomsFiles []CopiedNomsFile = []CopiedNomsFile{
|
||||
{Path: "store/nbs/manifest_cache_test.go", NomsPath: "go/nbs/manifest_cache_test.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/mem_table.go", NomsPath: "go/nbs/mem_table.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/mem_table_test.go", NomsPath: "go/nbs/mem_table_test.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/mmap_table_reader.go", NomsPath: "go/nbs/mmap_table_reader.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/mmap_table_reader_test.go", NomsPath: "go/nbs/mmap_table_reader_test.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/file_table_reader.go", NomsPath: "go/nbs/mmap_table_reader.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/file_table_reader_test.go", NomsPath: "go/nbs/mmap_table_reader_test.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/persisting_chunk_source.go", NomsPath: "go/nbs/persisting_chunk_source.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/persisting_chunk_source_test.go", NomsPath: "go/nbs/persisting_chunk_source_test.go", HadCopyrightNotice: true},
|
||||
{Path: "store/nbs/root_tracker_test.go", NomsPath: "go/nbs/root_tracker_test.go", HadCopyrightNotice: true},
|
||||
|
||||
@@ -149,8 +149,7 @@ teardown() {
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "dump: SQL type - with views/trigger" {
|
||||
skip "dolt dump views/trigger NOT implemented"
|
||||
@test "dump: SQL type - with views/triggers and procedures" {
|
||||
dolt sql -q "CREATE TABLE test(pk BIGINT PRIMARY KEY, v1 BIGINT);"
|
||||
dolt sql -q "CREATE TRIGGER trigger1 BEFORE INSERT ON test FOR EACH ROW SET new.v1 = -new.v1;"
|
||||
dolt sql -q "CREATE VIEW view1 AS SELECT v1 FROM test;"
|
||||
@@ -160,6 +159,71 @@ teardown() {
|
||||
dolt sql -q "CREATE VIEW view2 AS SELECT y FROM b;"
|
||||
dolt sql -q "CREATE TRIGGER trigger2 AFTER INSERT ON a FOR EACH ROW INSERT INTO b VALUES (new.x * 2);"
|
||||
dolt sql -q "INSERT INTO a VALUES (2);"
|
||||
dolt sql -q "CREATE TRIGGER trigger3 AFTER INSERT ON a FOR EACH ROW FOLLOWS trigger2 INSERT INTO b VALUES (new.x * 2);"
|
||||
dolt sql -q "CREATE TRIGGER trigger4 AFTER INSERT ON a FOR EACH ROW PRECEDES trigger3 INSERT INTO b VALUES (new.x * 2);"
|
||||
dolt sql -q "CREATE PROCEDURE p1 (in x int) select x from dual"
|
||||
|
||||
dolt add .
|
||||
dolt commit -m "create tables"
|
||||
|
||||
run dolt dump
|
||||
[ "$status" -eq 0 ]
|
||||
[ -f doltdump.sql ]
|
||||
|
||||
rm -rf ./.dolt
|
||||
dolt init
|
||||
|
||||
run dolt sql < doltdump.sql
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT * from test"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "-1" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT * from a"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT * from b"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
|
||||
run dolt sql -q "select * from view1"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "-1" ]] || false
|
||||
|
||||
run dolt sql -q "select * from view2"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
|
||||
run dolt sql -q "show create view view1"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ 'CREATE VIEW `view1` AS select v1 from test' ]] || false
|
||||
|
||||
run dolt sql -q "show create view view2"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ 'CREATE VIEW `view2` AS select y from b' ]] || false
|
||||
|
||||
run dolt sql -q "show create trigger trigger1"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ 'CREATE TRIGGER `trigger1` before insert on test for each row set new.v1 = -new.v1' ]] || false
|
||||
|
||||
run dolt sql -q "show create trigger trigger2"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ 'CREATE TRIGGER `trigger2` after insert on a for each row insert into b values (new.x * 2)' ]] || false
|
||||
|
||||
run dolt sql -q "show create trigger trigger3"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ 'CREATE TRIGGER `trigger3` after insert on a for each row follows trigger2 insert into b values (new.x * 2)' ]] || false
|
||||
|
||||
run dolt sql -q "show create trigger trigger4"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ 'CREATE TRIGGER `trigger4` after insert on a for each row precedes trigger3 insert into b values (new.x * 2)' ]] || false
|
||||
|
||||
run dolt sql -q "show create procedure p1"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ 'CREATE PROCEDURE `p1` (in x int) select x from dual' ]] || false
|
||||
}
|
||||
|
||||
@test "dump: SQL type - with keyless tables" {
|
||||
|
||||
@@ -25,6 +25,34 @@ teardown() {
|
||||
ps -p $remotesrv_pid | grep remotesrv
|
||||
}
|
||||
|
||||
@test "remotes: pull also fetches" {
|
||||
mkdir remote
|
||||
mkdir repo1
|
||||
|
||||
cd repo1
|
||||
dolt init
|
||||
dolt remote add origin file://../remote
|
||||
dolt push origin main
|
||||
|
||||
cd ..
|
||||
dolt clone file://./remote repo2
|
||||
|
||||
cd repo2
|
||||
run dolt branch -va
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
[[ ! "$output" =~ "other" ]] || false
|
||||
|
||||
cd ../repo1
|
||||
dolt checkout -b other
|
||||
dolt push origin other
|
||||
|
||||
cd ../repo2
|
||||
dolt pull
|
||||
run dolt branch -va
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
[[ "$output" =~ "other" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes: add a remote using dolt remote" {
|
||||
run dolt remote add test-remote http://localhost:50051/test-org/test-repo
|
||||
[ "$status" -eq 0 ]
|
||||
@@ -174,7 +202,7 @@ SQL
|
||||
[[ "$output" =~ "v1" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes: tags are only pulled if their commit is pulled" {
|
||||
@test "remotes: tags are fetched when pulling" {
|
||||
dolt remote add test-remote http://localhost:50051/test-org/test-repo
|
||||
dolt sql <<SQL
|
||||
CREATE TABLE test (pk int PRIMARY KEY);
|
||||
@@ -199,11 +227,6 @@ SQL
|
||||
cd dolt-repo-clones/test-repo
|
||||
run dolt pull
|
||||
[ "$status" -eq 0 ]
|
||||
run dolt tag
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "v1" ]] || false
|
||||
[[ ! "$output" =~ "other_tag" ]] || false
|
||||
dolt fetch
|
||||
run dolt tag -v
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "v1" ]] || false
|
||||
@@ -1463,3 +1486,88 @@ setup_ref_test() {
|
||||
dolt push --set-upstream origin feature
|
||||
dolt push
|
||||
}
|
||||
|
||||
@test "remotes: clone local repo with file url" {
|
||||
mkdir repo1
|
||||
cd repo1
|
||||
dolt init
|
||||
dolt commit --allow-empty -am "commit from repo1"
|
||||
|
||||
cd ..
|
||||
dolt clone file://./repo1/.dolt/noms repo2
|
||||
cd repo2
|
||||
run dolt log
|
||||
[[ "$output" =~ "commit from repo1" ]] || false
|
||||
|
||||
run dolt status
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
dolt commit --allow-empty -am "commit from repo2"
|
||||
dolt push
|
||||
|
||||
cd ../repo1
|
||||
run dolt log
|
||||
[[ "$output" =~ "commit from repo1" ]]
|
||||
[[ "$output" =~ "commit from repo2" ]]
|
||||
}
|
||||
|
||||
@test "remotes: clone local repo with absolute file path" {
|
||||
skiponwindows "absolute paths don't work on windows"
|
||||
mkdir repo1
|
||||
cd repo1
|
||||
dolt init
|
||||
dolt commit --allow-empty -am "commit from repo1"
|
||||
|
||||
cd ..
|
||||
dolt clone file://$(pwd)/repo1/.dolt/noms repo2
|
||||
cd repo2
|
||||
run dolt log
|
||||
[[ "$output" =~ "commit from repo1" ]] || false
|
||||
|
||||
run dolt status
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
dolt commit --allow-empty -am "commit from repo2"
|
||||
dolt push
|
||||
|
||||
cd ../repo1
|
||||
run dolt log
|
||||
[[ "$output" =~ "commit from repo1" ]]
|
||||
[[ "$output" =~ "commit from repo2" ]]
|
||||
}
|
||||
|
||||
@test "remotes: local clone does not contain working set changes" {
|
||||
mkdir repo1
|
||||
cd repo1
|
||||
dolt init
|
||||
run dolt sql -q "create table t (i int)"
|
||||
[ "$status" -eq 0 ]
|
||||
run dolt status
|
||||
[[ "$output" =~ "new table:" ]] || false
|
||||
|
||||
cd ..
|
||||
dolt clone file://./repo1/.dolt/noms repo2
|
||||
cd repo2
|
||||
|
||||
run dolt status
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "remotes: local clone pushes to other branch" {
|
||||
mkdir repo1
|
||||
cd repo1
|
||||
dolt init
|
||||
|
||||
cd ..
|
||||
dolt clone file://./repo1/.dolt/noms repo2
|
||||
cd repo2
|
||||
dolt checkout -b other
|
||||
dolt sql -q "create table t (i int)"
|
||||
dolt commit -am "adding table from other"
|
||||
dolt push origin other
|
||||
|
||||
cd ../repo1
|
||||
dolt checkout other
|
||||
run dolt log
|
||||
[[ "$output" =~ "adding table from other" ]]
|
||||
}
|
||||
|
||||
@@ -393,3 +393,12 @@ teardown() {
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "replication: local clone" {
|
||||
run dolt clone file://./repo1/.dolt/noms repo2
|
||||
[ "$status" -eq 0 ]
|
||||
cd repo2
|
||||
run dolt ls
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 1 ]
|
||||
}
|
||||
@@ -145,6 +145,16 @@ SQL
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure HEAD" {
|
||||
dolt sql -q "CALL DOLT_REVERT('HEAD')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "pk,v1" ]] || false
|
||||
[[ "$output" =~ "1,1" ]] || false
|
||||
[[ "$output" =~ "2,2" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL HEAD~1" {
|
||||
dolt sql -q "SELECT DOLT_REVERT('HEAD~1')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
@@ -155,6 +165,16 @@ SQL
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure HEAD~1" {
|
||||
dolt sql -q "CALL DOLT_REVERT('HEAD~1')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "pk,v1" ]] || false
|
||||
[[ "$output" =~ "1,1" ]] || false
|
||||
[[ "$output" =~ "3,3" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL HEAD & HEAD~1" {
|
||||
dolt sql -q "SELECT DOLT_REVERT('HEAD', 'HEAD~1')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
@@ -164,6 +184,15 @@ SQL
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure HEAD & HEAD~1" {
|
||||
dolt sql -q "CALL DOLT_REVERT('HEAD', 'HEAD~1')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "pk,v1" ]] || false
|
||||
[[ "$output" =~ "1,1" ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL has changes in the working set" {
|
||||
dolt sql -q "INSERT INTO test VALUES (4, 4)"
|
||||
run dolt sql -q "SELECT DOLT_REVERT('HEAD')"
|
||||
@@ -171,6 +200,13 @@ SQL
|
||||
[[ "$output" =~ "changes" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure has changes in the working set" {
|
||||
dolt sql -q "INSERT INTO test VALUES (4, 4)"
|
||||
run dolt sql -q "CALL DOLT_REVERT('HEAD')"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ "changes" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL conflicts" {
|
||||
dolt sql -q "INSERT INTO test VALUES (4, 4)"
|
||||
dolt add -A
|
||||
@@ -183,6 +219,18 @@ SQL
|
||||
[[ "$output" =~ "conflict" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure conflicts" {
|
||||
dolt sql -q "INSERT INTO test VALUES (4, 4)"
|
||||
dolt add -A
|
||||
dolt commit -m "Inserted 4"
|
||||
dolt sql -q "REPLACE INTO test VALUES (4, 5)"
|
||||
dolt add -A
|
||||
dolt commit -m "Updated 4"
|
||||
run dolt sql -q "CALL DOLT_REVERT('HEAD~1')"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ "conflict" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL constraint violations" {
|
||||
dolt sql <<"SQL"
|
||||
CREATE TABLE parent (pk BIGINT PRIMARY KEY, v1 BIGINT, INDEX(v1));
|
||||
@@ -203,12 +251,38 @@ SQL
|
||||
[[ "$output" =~ "constraint violation" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure constraint violations" {
|
||||
dolt sql <<"SQL"
|
||||
CREATE TABLE parent (pk BIGINT PRIMARY KEY, v1 BIGINT, INDEX(v1));
|
||||
CREATE TABLE child (pk BIGINT PRIMARY KEY, v1 BIGINT, CONSTRAINT fk_name FOREIGN KEY (v1) REFERENCES parent (v1));
|
||||
INSERT INTO parent VALUES (10, 1), (20, 2);
|
||||
INSERT INTO child VALUES (1, 1), (2, 2);
|
||||
SQL
|
||||
dolt add -A
|
||||
dolt commit -m "MC1"
|
||||
dolt sql -q "DELETE FROM child WHERE pk = 2"
|
||||
dolt add -A
|
||||
dolt commit -m "MC2"
|
||||
dolt sql -q "DELETE FROM parent WHERE pk = 20"
|
||||
dolt add -A
|
||||
dolt commit -m "MC3"
|
||||
run dolt sql -q "CALL DOLT_REVERT('HEAD~1')"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ "constraint violation" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL too far back" {
|
||||
run dolt sql -q "SELECT DOLT_REVERT('HEAD~10')"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ "ancestor" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure too far back" {
|
||||
run dolt sql -q "CALL DOLT_REVERT('HEAD~10')"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ "ancestor" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL no changes" {
|
||||
dolt sql -q "SELECT DOLT_REVERT('HEAD~4')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
@@ -220,12 +294,29 @@ SQL
|
||||
[[ "${#lines[@]}" = "4" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure no changes" {
|
||||
dolt sql -q "CALL DOLT_REVERT('HEAD~4')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "pk,v1" ]] || false
|
||||
[[ "$output" =~ "1,1" ]] || false
|
||||
[[ "$output" =~ "2,2" ]] || false
|
||||
[[ "$output" =~ "3,3" ]] || false
|
||||
[[ "${#lines[@]}" = "4" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL invalid hash" {
|
||||
run dolt sql -q "SELECT DOLT_REVERT('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ "target commit not found" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure invalid hash" {
|
||||
run dolt sql -q "CALL DOLT_REVERT('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ "target commit not found" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL HEAD with author" {
|
||||
dolt sql -q "SELECT DOLT_REVERT('HEAD', '--author', 'john doe <johndoe@gmail.com>')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
@@ -239,6 +330,19 @@ SQL
|
||||
[[ "$output" =~ "Author: john doe <johndoe@gmail.com>" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure HEAD with author" {
|
||||
dolt sql -q "CALL DOLT_REVERT('HEAD', '--author', 'john doe <johndoe@gmail.com>')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "pk,v1" ]] || false
|
||||
[[ "$output" =~ "1,1" ]] || false
|
||||
[[ "$output" =~ "2,2" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[[ "$output" =~ "Author: john doe <johndoe@gmail.com>" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: SQL HEAD & HEAD~1 with author" {
|
||||
dolt sql -q "SELECT DOLT_REVERT('HEAD', 'HEAD~1', '--author', 'john doe <johndoe@gmail.com>')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
@@ -250,3 +354,15 @@ SQL
|
||||
run dolt log -n 1
|
||||
[[ "$output" =~ "Author: john doe <johndoe@gmail.com>" ]] || false
|
||||
}
|
||||
|
||||
@test "revert: Stored Procedure HEAD & HEAD~1 with author" {
|
||||
dolt sql -q "CALL DOLT_REVERT('HEAD', 'HEAD~1', '--author', 'john doe <johndoe@gmail.com>')"
|
||||
run dolt sql -q "SELECT * FROM test" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "pk,v1" ]] || false
|
||||
[[ "$output" =~ "1,1" ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[[ "$output" =~ "Author: john doe <johndoe@gmail.com>" ]] || false
|
||||
}
|
||||
|
||||
@@ -36,6 +36,22 @@ teardown() {
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: CALL DOLT_ADD all flag works" {
|
||||
run dolt sql -q "CALL DOLT_ADD('-A')"
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-m', 'Commit1')"
|
||||
|
||||
# Check that everything was added
|
||||
run dolt diff
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
regex='Bats Tests <bats@email.fake>'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: DOLT_ADD all w/ . works" {
|
||||
run dolt sql -q "SELECT DOLT_ADD('.')"
|
||||
run dolt sql -q "SELECT DOLT_COMMIT('-m', 'Commit1')"
|
||||
@@ -52,6 +68,22 @@ teardown() {
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: CALL DOLT_ADD all w/ . works" {
|
||||
run dolt sql -q "CALL DOLT_ADD('.')"
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-m', 'Commit1')"
|
||||
|
||||
# Check that everything was added
|
||||
run dolt diff
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
regex='Bats Tests <bats@email.fake>'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: DOLT_ADD all w/ . combined with DOLT_COMMIT -a works" {
|
||||
run dolt sql -q "SELECT DOLT_ADD('.')"
|
||||
run dolt sql -q "SELECT DOLT_COMMIT('-a', '-m', 'Commit1')"
|
||||
@@ -67,6 +99,21 @@ teardown() {
|
||||
[[ "$output" =~ "Bats Tests <bats@email.fake>" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: CALL DOLT_ADD all w/ . combined with DOLT_COMMIT -a works" {
|
||||
run dolt sql -q "CALL DOLT_ADD('.')"
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-a', '-m', 'Commit1')"
|
||||
|
||||
# Check that everything was added
|
||||
run dolt diff
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
[[ "$output" =~ "Bats Tests <bats@email.fake>" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: DOLT_ADD can take in one table" {
|
||||
dolt sql -q "SELECT DOLT_ADD('test')"
|
||||
dolt sql -q "SELECT DOLT_COMMIT('-m', 'Commit1')"
|
||||
@@ -84,6 +131,23 @@ teardown() {
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: CALL DOLT_ADD can take in one table" {
|
||||
dolt sql -q "CALL DOLT_ADD('test')"
|
||||
dolt sql -q "CALL DOLT_COMMIT('-m', 'Commit1')"
|
||||
|
||||
# Check that just test was added and not test2.
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
regex='test2'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
regex='Bats Tests <bats@email.fake>'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: DOLT_ADD can take in multiple tables" {
|
||||
run dolt sql -q "SELECT DOLT_ADD('test', 'test2')"
|
||||
run dolt sql -q "SELECT DOLT_COMMIT('-m', 'Commit1')"
|
||||
@@ -100,6 +164,22 @@ teardown() {
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: CALL DOLT_ADD can take in multiple tables" {
|
||||
run dolt sql -q "CALL DOLT_ADD('test', 'test2')"
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-m', 'Commit1')"
|
||||
|
||||
# Check that both test and test2 are added.
|
||||
run dolt diff
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
regex='Bats Tests <bats@email.fake>'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-add: Check that Dolt add works with docs" {
|
||||
skip "We don't know if this use case makes sense or not"
|
||||
|
||||
|
||||
@@ -35,6 +35,23 @@ teardown() {
|
||||
[[ "$output" =~ "new-branch" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-branch: CALL DOLT_BRANCH works" {
|
||||
run dolt branch
|
||||
[[ ! "$output" =~ "new_branch" ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('new-branch')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# should create new branch and should not checkout the new branch
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
|
||||
run dolt branch
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "new-branch" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-branch: DOLT_BRANCH throws error" {
|
||||
# branches that already exist
|
||||
dolt branch existing_branch
|
||||
@@ -48,6 +65,19 @@ teardown() {
|
||||
[ "$output" = "error: cannot branch empty string" ]
|
||||
}
|
||||
|
||||
@test "sql-branch: CALL DOLT_BRANCH throws error" {
|
||||
# branches that already exist
|
||||
dolt branch existing_branch
|
||||
run dolt sql -q "CALL DOLT_BRANCH('existing_branch')"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "fatal: A branch named 'existing_branch' already exists." ]]
|
||||
|
||||
# empty branch
|
||||
run dolt sql -q "CALL DOLT_BRANCH('')"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "error: cannot branch empty string" ]]
|
||||
}
|
||||
|
||||
@test "sql-branch: DOLT_BRANCH -c copies not current branch and stays on current branch" {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
run dolt status
|
||||
@@ -79,6 +109,37 @@ SQL
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-branch: CALL DOLT_BRANCH -c copies not current branch and stays on current branch" {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
run dolt status
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
|
||||
dolt checkout -b original
|
||||
dolt sql -q "insert into test values (4);"
|
||||
dolt add .
|
||||
dolt commit -m "add 4 in original"
|
||||
|
||||
dolt checkout main
|
||||
|
||||
# Current branch should be still main with test table without entry 4
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_BRANCH('-c', 'original', 'copy');
|
||||
SELECT * FROM test WHERE pk > 3;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ "4" ]] || false
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
|
||||
run dolt checkout copy
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT * FROM test WHERE pk > 3;"
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-branch: DOLT_BRANCH -c throws error on error cases" {
|
||||
run dolt status
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
@@ -110,6 +171,37 @@ SQL
|
||||
[ "$output" = "fatal: A branch named 'existing_branch' already exists." ]
|
||||
}
|
||||
|
||||
@test "sql-branch: CALL DOLT_BRANCH -c throws error on error cases" {
|
||||
run dolt status
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
|
||||
# branch copying from is empty
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-c','','copy')"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "error: cannot branch empty string" ]]
|
||||
|
||||
# branch copying to is empty
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-c','main','')"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "error: cannot branch empty string" ]]
|
||||
|
||||
dolt branch 'existing_branch'
|
||||
run dolt branch
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
[[ "$output" =~ "existing_branch" ]] || false
|
||||
[[ ! "$output" =~ "original" ]] || false
|
||||
|
||||
# branch copying from that don't exist
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-c', 'original', 'copy');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "fatal: A branch named 'original' not found" ]]
|
||||
|
||||
# branch copying to that exists
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-c', 'main', 'existing_branch');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "fatal: A branch named 'existing_branch' already exists." ]]
|
||||
}
|
||||
|
||||
@test "sql-branch: DOLT_BRANCH works as insert into dolt_branches table" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
|
||||
@@ -123,6 +215,19 @@ SQL
|
||||
[ "$output" = "$mainhash" ]
|
||||
}
|
||||
|
||||
@test "sql-branch: CALL DOLT_BRANCH works as insert into dolt_branches table" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
|
||||
run dolt sql -q "SELECT hash FROM dolt_branches WHERE name='main';"
|
||||
[ $status -eq 0 ]
|
||||
mainhash=$output
|
||||
|
||||
dolt sql -q "CALL DOLT_BRANCH('feature-branch');"
|
||||
run dolt sql -q "SELECT hash FROM dolt_branches WHERE name='feature-branch';"
|
||||
[ $status -eq 0 ]
|
||||
[ "$output" = "$mainhash" ]
|
||||
}
|
||||
|
||||
@test "sql-branch: asserts unsupported -m, -d, -D flags" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
dolt branch new_branch
|
||||
@@ -139,3 +244,20 @@ SQL
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Deleting branches is not supported." ]] || false
|
||||
}
|
||||
|
||||
@test "sql-branch: asserts unsupported -m, -d, -D flags on CALL" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
dolt branch new_branch
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-m', 'new_branch', 'changed');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Renaming a branch is not supported." ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-d', 'new_branch');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Deleting branches is not supported." ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-D', 'new_branch');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Deleting branches is not supported." ]] || false
|
||||
}
|
||||
|
||||
@@ -41,21 +41,59 @@ teardown() {
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT just works" {
|
||||
run dolt sql -q "SELECT DOLT_CHECKOUT('-b', 'feature-branch')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# dolt sql -q "select dolt_checkout() should not change the branch
|
||||
# It changes the branch for that session which ends after the SQL
|
||||
# statements are executed.
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
|
||||
run dolt branch
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "feature-branch" ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_CHECKOUT('main');"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "main" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT -b throws error on branches that already exist" {
|
||||
run dolt sql -q "SELECT DOLT_CHECKOUT('-b', 'main')"
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT -b throws error on branches that already exist" {
|
||||
run dolt sql -q "CALL SELECT DOLT_CHECKOUT('-b', 'main')"
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT throws error on branches that don't exist" {
|
||||
run dolt sql -q "SELECT DOLT_CHECKOUT('feature-branch')"
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT throws error on branches that don't exist" {
|
||||
run dolt sql -q "CALL DOLT_CHECKOUT('feature-branch')"
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT -b throws error on empty branch" {
|
||||
run dolt sql -q "SELECT DOLT_CHECKOUT('-b', '')"
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT -b throws error on empty branch" {
|
||||
run dolt sql -q "CALL DOLT_CHECKOUT('-b', '')"
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT updates the head ref session var" {
|
||||
run dolt sql <<SQL
|
||||
SELECT DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
@@ -66,6 +104,16 @@ SQL
|
||||
[[ "$output" =~ "refs/heads/feature-branch" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT updates the head ref session var" {
|
||||
run dolt sql <<SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
select @@dolt_repo_$$_head_ref;
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "refs/heads/feature-branch" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT changes branches, leaves behind working set unmodified." {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
dolt sql -q "insert into test values (4);"
|
||||
@@ -134,6 +182,74 @@ SQL
|
||||
[ $status -eq 0 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT changes branches, leaves behind working set unmodified." {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
dolt sql -q "insert into test values (4);"
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
# After switching to a new branch, we don't see working set changes
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
select * from test where pk > 3;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ "4" ]] || false
|
||||
|
||||
# the branch was created by dolt_checkout
|
||||
run dolt branch
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "feature-branch" ]] || false
|
||||
|
||||
# but the shell is still on branch main, with the same changes as before
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
run dolt sql << SQL
|
||||
select * from test where pk > 3;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch2');
|
||||
insert into test values (5);
|
||||
select * from test where pk > 3;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ "4" ]] || false
|
||||
[[ "$output" =~ "5" ]] || false
|
||||
|
||||
# working set from main has 4, but not 5
|
||||
run dolt sql -q "select * from test where pk > 3"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
[[ ! "$output" =~ "5" ]] || false
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
# In a new session, the value inserted should still be there
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('feature-branch2');
|
||||
select * from test where pk > 3;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ "4" ]] || false
|
||||
[[ "$output" =~ "5" ]] || false
|
||||
|
||||
# This is an error on the command line, but not in SQL
|
||||
run dolt sql -q "SELECT DOLT_CHECKOUT('main')"
|
||||
[ $status -eq 0 ]
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT works with dolt_diff tables" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
|
||||
@@ -177,6 +293,49 @@ SQL
|
||||
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT works with dolt_diff tables" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
|
||||
run dolt sql -q "SELECT * FROM dolt_diff_test";
|
||||
[ $status -eq 0 ]
|
||||
emptydiff=$output
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
SELECT * FROM dolt_diff_test;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "$emptydiff" ]] || false
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('feature-branch');
|
||||
SELECT * FROM dolt_diff_test;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "$emptydiff" ]] || false
|
||||
|
||||
# add some changes to the working set
|
||||
dolt sql -q "insert into test values (4)"
|
||||
run dolt sql -q "SELECT * FROM dolt_diff_test";
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ "$emptydiff" ]] || false
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch2');
|
||||
SELECT * FROM dolt_diff_test;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "$emptydiff" ]] || false
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('feature-branch2');
|
||||
SELECT * FROM dolt_diff_test;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "$emptydiff" ]] || false
|
||||
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT followed by DOLT_COMMIT" {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
|
||||
@@ -210,6 +369,39 @@ SQL
|
||||
[[ "$output" =~ "John Doe" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT followed by DOLT_COMMIT" {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (4);
|
||||
CALL DOLT_ADD('.');
|
||||
CALL DOLT_COMMIT('-m', 'Added 4', '--author', 'John Doe <john@doe.com>');
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
|
||||
dolt status
|
||||
|
||||
# on branch main, no changes visible
|
||||
run dolt log -n 1
|
||||
[[ ! "$output" =~ "Added 4" ]] || false
|
||||
[[ "$output" =~ "0, 1, and 2" ]] || false
|
||||
|
||||
dolt checkout feature-branch
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Added 4" ]] || false
|
||||
[[ "$output" =~ "John Doe" ]] || false
|
||||
|
||||
dolt checkout main
|
||||
run dolt merge feature-branch
|
||||
|
||||
[ $status -eq 0 ]
|
||||
run dolt log -n 1
|
||||
[[ "$output" =~ "Added 4" ]] || false
|
||||
[[ "$output" =~ "John Doe" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT with table name clears working set changes" {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
|
||||
@@ -232,6 +424,28 @@ SQL
|
||||
[[ ! "$output" =~ "4" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT with table name clears working set changes" {
|
||||
dolt add . && dolt commit -m "0, 1, and 2 in test table"
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (4);
|
||||
select * from test where pk > 3;
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('feature-branch');
|
||||
CALL DOLT_CHECKOUT('test');
|
||||
select * from test where pk > 3;
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ "4" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT between branches operating on the same table works." {
|
||||
run dolt sql << SQL
|
||||
CREATE TABLE one_pk (
|
||||
@@ -265,12 +479,51 @@ SQL
|
||||
[[ ! "$output" =~ "0,0,0" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT between branches operating on the same table works." {
|
||||
run dolt sql << SQL
|
||||
CREATE TABLE one_pk (
|
||||
pk1 BIGINT NOT NULL,
|
||||
c1 BIGINT,
|
||||
c2 BIGINT,
|
||||
PRIMARY KEY (pk1)
|
||||
);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'add tables');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,0,0);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed main');
|
||||
CALL DOLT_CHECKOUT('feature-branch');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,1,1);
|
||||
CALL dolt_commit('-a', '-m', "changed feature-branch");
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT * FROM one_pk" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk1,c1,c2" ]] || false
|
||||
[[ ! "$output" =~ "0,1,1" ]] || false
|
||||
[[ "$output" =~ "0,0,0" ]] || false
|
||||
|
||||
dolt checkout feature-branch
|
||||
run dolt sql -q "SELECT * FROM one_pk" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk1,c1,c2" ]] || false
|
||||
[[ "$output" =~ "0,1,1" ]] || false
|
||||
[[ ! "$output" =~ "0,0,0" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: DOLT_CHECKOUT does not throw an error when checking out to the same branch" {
|
||||
run dolt sql -q "SELECT DOLT_CHECKOUT('main')"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-checkout: CALL DOLT_CHECKOUT does not throw an error when checking out to the same branch" {
|
||||
run dolt sql -q "CALL DOLT_CHECKOUT('main')"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
}
|
||||
|
||||
get_head_commit() {
|
||||
dolt log -n 1 | grep -m 1 commit | cut -c 8-
|
||||
}
|
||||
|
||||
@@ -30,6 +30,18 @@ teardown() {
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: CALL DOLT_COMMIT without a message throws error" {
|
||||
run dolt sql -q "CALL DOLT_ADD('.')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "CALL DOLT_COMMIT()"
|
||||
[ $status -eq 1 ]
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
regex='Initialize'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: DOLT_COMMIT with just a message reads session parameters" {
|
||||
run dolt sql -q "SELECT DOLT_ADD('.')"
|
||||
[ $status -eq 0 ]
|
||||
@@ -43,6 +55,19 @@ teardown() {
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: CALL DOLT_COMMIT with just a message reads session parameters" {
|
||||
run dolt sql -q "CALL DOLT_ADD('.')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-m', 'Commit1')"
|
||||
[ $status -eq 0 ]
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
regex='Bats Tests <bats@email.fake>'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: DOLT_COMMIT with the all flag performs properly" {
|
||||
run dolt sql -q "SELECT DOLT_COMMIT('-a', '-m', 'Commit1')"
|
||||
|
||||
@@ -58,6 +83,21 @@ teardown() {
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: CALL DOLT_COMMIT with the all flag performs properly" {
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-a', '-m', 'Commit1')"
|
||||
|
||||
# Check that everything was added
|
||||
run dolt diff
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "" ]
|
||||
|
||||
run dolt log
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
regex='Bats Tests <bats@email.fake>'
|
||||
[[ "$output" =~ "$regex" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: DOLT_COMMIT with all flag, message and author" {
|
||||
run dolt sql -r csv -q "SELECT DOLT_COMMIT('-a', '-m', 'Commit1', '--author', 'John Doe <john@doe.com>') as commit_hash"
|
||||
[ $status -eq 0 ]
|
||||
@@ -124,6 +164,16 @@ SQL
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: CALL DOLT_COMMIT immediately updates dolt log system table." {
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Commit1');
|
||||
SELECT * FROM dolt_log;
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Commit1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: DOLT_COMMIT immediately updates dolt diff system table." {
|
||||
original_hash=$(get_head_commit)
|
||||
run dolt sql << SQL
|
||||
@@ -136,6 +186,18 @@ SQL
|
||||
[[ "$output" =~ $original_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: CALL DOLT_COMMIT immediately updates dolt diff system table." {
|
||||
original_hash=$(get_head_commit)
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Commit1');
|
||||
SELECT from_commit FROM dolt_diff_test WHERE to_commit = hashof('head');
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
# Represents that the diff table marks a change from the recent commit.
|
||||
[[ "$output" =~ $original_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: DOLT_COMMIT updates session variables" {
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
head_commit=$(get_head_commit)
|
||||
@@ -156,6 +218,26 @@ SQL
|
||||
[[ "$output" =~ $head_commit ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: CALL DOLT_COMMIT updates session variables" {
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
head_commit=$(get_head_commit)
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Commit1');
|
||||
SELECT $head_variable = HASHOF('head');
|
||||
SELECT $head_variable
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
|
||||
# Verify that the head commit changes.
|
||||
[[ ! "$output" =~ $head_commit ]] || false
|
||||
|
||||
# Verify that head on log matches the new session variable.
|
||||
head_commit=$(get_head_commit)
|
||||
[[ "$output" =~ $head_commit ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: DOLT_COMMIT with unstaged tables leaves them in the working set" {
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
|
||||
@@ -215,6 +297,65 @@ SQL
|
||||
[[ "$output" =~ 'test,false,modified' ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: CALL DOLT_COMMIT with unstaged tables leaves them in the working set" {
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
|
||||
run dolt sql << SQL
|
||||
CREATE TABLE test2 (
|
||||
pk int primary key
|
||||
);
|
||||
CALL DOLT_ADD('test');
|
||||
CALL DOLT_COMMIT('-m', '0, 1, 2 in test');
|
||||
SELECT $head_variable = HASHOF('head');
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
|
||||
run dolt log -n1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "0, 1, 2" ]] || false
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ ([[:space:]]*new table:[[:space:]]*test2) ]] || false
|
||||
|
||||
run dolt sql -r csv -q "show tables"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ 'test2' ]] || false
|
||||
|
||||
run dolt sql -r csv -q "select * from dolt_status;"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ 'test2,false,new table' ]] || false
|
||||
|
||||
# Now another partial commit
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_ADD('test2');
|
||||
insert into test values (20);
|
||||
CALL DOLT_COMMIT('-m', 'added test2 table');
|
||||
SELECT $head_variable = HASHOF('head');
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
|
||||
run dolt log -n1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "added test2 table" ]] || false
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
run dolt diff
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "20" ]] || false
|
||||
|
||||
run dolt sql -r csv -q "select * from dolt_status;"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ 'test,false,modified' ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: The -f parameter is properly parsed and executes" {
|
||||
run dolt sql <<SQL
|
||||
SET FOREIGN_KEY_CHECKS=0;
|
||||
@@ -250,6 +391,41 @@ SQL
|
||||
[[ "$output" =~ '2' ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: The -f parameter is properly parsed and executes on CALL" {
|
||||
run dolt sql <<SQL
|
||||
SET FOREIGN_KEY_CHECKS=0;
|
||||
CREATE TABLE colors (
|
||||
id INT NOT NULL,
|
||||
color VARCHAR(32) NOT NULL,
|
||||
|
||||
PRIMARY KEY (id),
|
||||
INDEX color_index(color)
|
||||
);
|
||||
CREATE TABLE objects (
|
||||
id INT NOT NULL,
|
||||
name VARCHAR(64) NOT NULL,
|
||||
color VARCHAR(32),
|
||||
|
||||
PRIMARY KEY(id),
|
||||
FOREIGN KEY (color) REFERENCES colors(color)
|
||||
);
|
||||
|
||||
INSERT INTO objects (id,name,color) VALUES (1,'truck','red'),(2,'ball','green'),(3,'shoe','blue');
|
||||
|
||||
CALL DOLT_COMMIT('-fam', 'Commit1');
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -r csv -q "select COUNT(*) from objects;"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ '3' ]] || false
|
||||
|
||||
run dolt sql -r csv -q "select COUNT(*) from dolt_log;"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ '2' ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: missing message does not panic and throws an error" {
|
||||
run dolt sql -q "SELECT DOLT_COMMIT('--allow-empty', '-fam')"
|
||||
[ $status -eq 1 ]
|
||||
@@ -257,6 +433,13 @@ SQL
|
||||
[[ "$output" =~ 'error: no value for option `message' ]] || false
|
||||
}
|
||||
|
||||
@test "sql-commit: missing message does not panic and throws an error on CALL" {
|
||||
run dolt sql -q "CALL DOLT_COMMIT('--allow-empty', '-fam')"
|
||||
[ $status -eq 1 ]
|
||||
! [[ "$output" =~ 'panic' ]] || false
|
||||
[[ "$output" =~ 'error: no value for option `message' ]] || false
|
||||
}
|
||||
|
||||
get_head_commit() {
|
||||
dolt log -n 1 | grep -m 1 commit | cut -c 13-44
|
||||
}
|
||||
|
||||
@@ -50,6 +50,19 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch default" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_fetch()"
|
||||
|
||||
run dolt diff main origin/main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('origin/main')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch origin" {
|
||||
cd repo2
|
||||
@@ -65,6 +78,20 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch origin" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_fetch('origin')"
|
||||
|
||||
run dolt diff main origin/main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('origin/main')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch main" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_fetch('origin', 'main')"
|
||||
@@ -79,6 +106,20 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch main" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_fetch('origin', 'main')"
|
||||
|
||||
run dolt diff main origin/main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('origin/main')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch custom remote" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_fetch('test-remote')"
|
||||
@@ -93,6 +134,20 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch custom remote" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_fetch('test-remote')"
|
||||
|
||||
run dolt diff main test-remote/main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('test-remote/main')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch specific ref" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_fetch('test-remote', 'refs/heads/main:refs/remotes/test-remote/main')"
|
||||
@@ -107,6 +162,20 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch specific ref" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_fetch('test-remote', 'refs/heads/main:refs/remotes/test-remote/main')"
|
||||
|
||||
run dolt diff main test-remote/main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('test-remote/main')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch feature branch" {
|
||||
cd repo1
|
||||
dolt push origin feature
|
||||
@@ -124,6 +193,23 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch feature branch" {
|
||||
cd repo1
|
||||
dolt push origin feature
|
||||
|
||||
cd ../repo2
|
||||
dolt sql -q "CALL dolt_fetch('origin', 'feature')"
|
||||
|
||||
run dolt diff main origin/feature
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('origin/feature')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch tag" {
|
||||
cd repo1
|
||||
dolt tag v1
|
||||
@@ -142,6 +228,24 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch tag" {
|
||||
cd repo1
|
||||
dolt tag v1
|
||||
dolt push origin v1
|
||||
|
||||
cd ../repo2
|
||||
dolt sql -q "CALL dolt_fetch('origin', 'main')"
|
||||
|
||||
run dolt diff main v1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('v1')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch only tag" {
|
||||
skip "todo tag refspec support, and/or --tags option"
|
||||
cd repo1
|
||||
@@ -161,6 +265,25 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch only tag" {
|
||||
skip "todo tag refspec support, and/or --tags option"
|
||||
cd repo1
|
||||
dolt tag v1
|
||||
dolt push origin v1
|
||||
|
||||
cd ../repo2
|
||||
dolt sql -q "CALL dolt_fetch('origin', 'refs/tags/v1:refs/tags/v1')"
|
||||
|
||||
run dolt diff main origin/v1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('v1')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch rename ref" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_fetch('test-remote', 'refs/heads/main:refs/remotes/test-remote/other')"
|
||||
@@ -175,6 +298,20 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch rename ref" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_fetch('test-remote', 'refs/heads/main:refs/remotes/test-remote/other')"
|
||||
|
||||
run dolt diff main test-remote/other
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "added table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('test-remote/other')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch override local branch" {
|
||||
skip "todo more flexible refspec support"
|
||||
cd repo2
|
||||
@@ -190,6 +327,21 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch override local branch" {
|
||||
skip "todo more flexible refspec support"
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_fetch('origin', 'main:refs/heads/main')"
|
||||
|
||||
dolt diff main origin/main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ ! "$output" =~ "removed table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('main')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch --force" {
|
||||
# reverse information flow for force fetch repo1->rem1->repo2
|
||||
cd repo2
|
||||
@@ -214,6 +366,30 @@ teardown() {
|
||||
[[ "$output" =~ "t2" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch --force" {
|
||||
# reverse information flow for force fetch repo1->rem1->repo2
|
||||
cd repo2
|
||||
dolt sql -q "create table t2 (a int)"
|
||||
dolt commit -am "forced commit"
|
||||
dolt push --force origin main
|
||||
|
||||
cd ../repo1
|
||||
run dolt sql -q "CALL dolt_fetch('origin', 'main')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "fetch failed: can't fast forward merge" ]] || false
|
||||
|
||||
dolt sql -q "CALL dolt_fetch('--force', 'origin', 'main')"
|
||||
|
||||
run dolt diff main origin/main
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "deleted table" ]] || false
|
||||
|
||||
run dolt sql -q "show tables as of hashof('origin/main')" -r csv
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t2" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch unknown remote fails" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
@@ -222,6 +398,14 @@ teardown() {
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch unknown remote fails" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
run dolt sql -q "CALL dolt_fetch('unknown')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch unknown remote with fetchspec fails" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
@@ -230,6 +414,14 @@ teardown() {
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch unknown remote with fetchspec fails" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
run dolt sql -q "CALL dolt_fetch('unknown', 'main')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch unknown ref fails" {
|
||||
cd repo2
|
||||
run dolt sql -q "select dolt_fetch('origin', 'unknown')"
|
||||
@@ -237,6 +429,13 @@ teardown() {
|
||||
[[ "$output" =~ "invalid ref spec: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch unknown ref fails" {
|
||||
cd repo2
|
||||
run dolt sql -q "CALL dolt_fetch('origin', 'unknown')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "invalid ref spec: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch empty remote fails" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
@@ -245,9 +444,24 @@ teardown() {
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch empty remote fails" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
run dolt sql -q "CALL dolt_fetch('')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: dolt_fetch empty ref fails" {
|
||||
cd repo2
|
||||
run dolt sql -q "select dolt_fetch('origin', '')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "invalid fetch spec: ''" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-fetch: CALL dolt_fetch empty ref fails" {
|
||||
cd repo2
|
||||
run dolt sql -q "CALL dolt_fetch('origin', '')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "invalid fetch spec: ''" ]] || false
|
||||
}
|
||||
|
||||
@@ -25,6 +25,13 @@ teardown() {
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE with unknown branch name throws an error" {
|
||||
dolt sql -q "CALL DOLT_COMMIT('-a', '-m', 'Step 1');"
|
||||
|
||||
run dolt sql -q "CALL DOLT_MERGE('feature-branch');"
|
||||
[ $status -eq 1 ]
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE works with ff" {
|
||||
dolt sql <<SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -62,6 +69,43 @@ SQL
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE works with ff" {
|
||||
dolt sql <<SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
UPDATE test SET pk=1000 WHERE pk=0;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a ff');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
SQL
|
||||
run dolt sql -q "CALL DOLT_MERGE('feature-branch');"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "this is a ff" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT COUNT(*) FROM dolt_log"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT * FROM test;" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk" ]] || false
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
[[ "$output" =~ "1000" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT COUNT(*) FROM test;" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE works in the session for fastforward." {
|
||||
run dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -88,6 +132,32 @@ SQL
|
||||
[ "$MAIN_HASH" = "$FB_HASH" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE works in the session for fastforward." {
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a ff');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SELECT COUNT(*) > 0 FROM test WHERE pk=3;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
|
||||
run dolt sql -r csv -q "select count(*) from dolt_status"
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[ "${lines[1]}" = "0" ]
|
||||
|
||||
run dolt sql -r csv -q "select hash from dolt_branches where branch='main'"
|
||||
MAIN_HASH=${lines[1]}
|
||||
|
||||
run dolt sql -r csv -q "select hash from dolt_branches where branch='feature-branch'"
|
||||
FB_HASH=${lines[1]}
|
||||
|
||||
[ "$MAIN_HASH" = "$FB_HASH" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE with autocommit off works in fast-forward." {
|
||||
dolt sql << SQL
|
||||
set autocommit = off;
|
||||
@@ -108,6 +178,26 @@ SQL
|
||||
[ "${lines[4]}" = "3" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE with autocommit off works in fast-forward." {
|
||||
dolt sql << SQL
|
||||
set autocommit = off;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a ff');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SELECT DOLT_CHECKOUT('-b', 'new-branch');
|
||||
SQL
|
||||
|
||||
run dolt sql -r csv -q "select * from test order by pk"
|
||||
[ "${#lines[@]}" -eq 5 ]
|
||||
[ "${lines[1]}" = "0" ]
|
||||
[ "${lines[2]}" = "1" ]
|
||||
[ "${lines[3]}" = "2" ]
|
||||
[ "${lines[4]}" = "3" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE no-ff works with autocommit off." {
|
||||
dolt sql << SQL
|
||||
set autocommit = off;
|
||||
@@ -128,6 +218,26 @@ SQL
|
||||
[ "${lines[4]}" = "3" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE no-ff works with autocommit off." {
|
||||
dolt sql << SQL
|
||||
set autocommit = off;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a ff');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch', '-no-ff');
|
||||
COMMIT;
|
||||
SQL
|
||||
|
||||
run dolt sql -r csv -q "select * from test order by pk"
|
||||
[ "${#lines[@]}" -eq 5 ]
|
||||
[ "${lines[1]}" = "0" ]
|
||||
[ "${lines[2]}" = "1" ]
|
||||
[ "${lines[3]}" = "2" ]
|
||||
[ "${lines[4]}" = "3" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: End to End Conflict Resolution with autocommit off." {
|
||||
dolt sql << SQL
|
||||
CREATE TABLE test2 (pk int primary key, val int);
|
||||
@@ -152,6 +262,30 @@ SQL
|
||||
[ "${lines[2]}" = "1,1" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL End to End Conflict Resolution with autocommit off." {
|
||||
dolt sql << SQL
|
||||
CREATE TABLE test2 (pk int primary key, val int);
|
||||
INSERT INTO test2 VALUES (0, 0);
|
||||
SET autocommit = 0;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test2 VALUES (1, 1);
|
||||
UPDATE test2 SET val=1000 WHERE pk=0;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a normal commit');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
UPDATE test2 SET val=1001 WHERE pk=0;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'update a value');
|
||||
CALL DOLT_MERGE('feature-branch', '-m', 'this is a merge');
|
||||
DELETE FROM dolt_conflicts_test2;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'remove conflicts');
|
||||
SQL
|
||||
|
||||
run dolt sql -r csv -q "select * from test2 order by pk"
|
||||
[ "${#lines[@]}" -eq 3 ]
|
||||
[ "${lines[1]}" = "0,1001" ]
|
||||
[ "${lines[2]}" = "1,1" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE works with autocommit off." {
|
||||
dolt sql << SQL
|
||||
set autocommit = off;
|
||||
@@ -175,6 +309,29 @@ SQL
|
||||
[ "${lines[5]}" = "5" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE works with autocommit off." {
|
||||
dolt sql << SQL
|
||||
set autocommit = off;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a normal commit');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO test VALUES (5);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a normal commit');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
COMMIT;
|
||||
SQL
|
||||
|
||||
run dolt sql -r csv -q "select * from test order by pk"
|
||||
[ "${#lines[@]}" -eq 6 ]
|
||||
[ "${lines[1]}" = "0" ]
|
||||
[ "${lines[2]}" = "1" ]
|
||||
[ "${lines[3]}" = "2" ]
|
||||
[ "${lines[4]}" = "3" ]
|
||||
[ "${lines[5]}" = "5" ]
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE correctly returns head and working session variables." {
|
||||
dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -202,6 +359,33 @@ SQL
|
||||
[[ "$output" =~ $head_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE correctly returns head and working session variables." {
|
||||
dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a ff');
|
||||
SQL
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
|
||||
dolt checkout feature-branch
|
||||
head_hash=$(get_head_commit)
|
||||
|
||||
dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SQL
|
||||
|
||||
run dolt sql -q "SELECT $head_variable"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ $head_hash ]] || false
|
||||
|
||||
dolt checkout main
|
||||
run dolt sql -q "SELECT $head_variable"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ $head_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE correctly merges branches with differing content in same table without conflicts" {
|
||||
dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -259,6 +443,59 @@ SQL
|
||||
[[ "$output" =~ "Finish up Merge" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE correctly merges branches with differing content in same table without conflicts" {
|
||||
dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 3');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO test VALUES (10000);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 10000');
|
||||
SQL
|
||||
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SELECT COUNT(*) = 2 FROM test WHERE pk > 2;
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
! [[ "$output" =~ "Updating" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT * FROM test" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk" ]] || false
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
[[ "$output" =~ "10000" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Insert 10000" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT COUNT(*) FROM dolt_log"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
|
||||
run dolt status
|
||||
[[ "$output" =~ "All conflicts and constraint violations fixed but you are still merging" ]] || false
|
||||
[[ "$output" =~ "Changes to be committed:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
run dolt sql -q "SELECT DOLT_COMMIT('-a', '-m', 'Finish up Merge')";
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Finish up Merge" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE works with no-ff" {
|
||||
run dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -277,6 +514,24 @@ SQL
|
||||
[[ "$output" =~ "this is a no-ff" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE works with no-ff" {
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'update feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch', '-no-ff', '-m', 'this is a no-ff');
|
||||
SELECT COUNT(*) = 4 FROM dolt_log
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "this is a no-ff" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE -no-ff correctly changes head and working session variables." {
|
||||
dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -302,6 +557,31 @@ SQL
|
||||
[[ ! "$output" =~ $working_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE -no-ff correctly changes head and working session variables." {
|
||||
dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'update feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
SQL
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
head_hash=$(get_head_commit)
|
||||
working_variable=@@dolt_repo_$$_working
|
||||
working_hash=$(get_working_hash)
|
||||
|
||||
run dolt sql -q "CALL DOLT_MERGE('feature-branch', '-no-ff', '-m', 'this is a no-ff');"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT $head_variable"
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ $head_hash ]] || false
|
||||
|
||||
run dolt sql -q "SELECT $working_variable"
|
||||
[ $status -eq 0 ]
|
||||
[[ ! "$output" =~ $working_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE detects merge conflicts, fails to commit and leaves working set clean when dolt_allow_commit_conflicts = 0" {
|
||||
# The dolt_merge fails here, and leaves the working set clean, no conflicts, no merge in progress
|
||||
run dolt sql << SQL
|
||||
@@ -353,6 +633,57 @@ SQL
|
||||
[[ "$output" =~ "0,1,1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE detects merge conflicts, fails to commit and leaves working set clean when dolt_allow_commit_conflicts = 0" {
|
||||
# The dolt_merge fails here, and leaves the working set clean, no conflicts, no merge in progress
|
||||
run dolt sql << SQL
|
||||
SET dolt_allow_commit_conflicts = 0;
|
||||
CREATE TABLE one_pk (
|
||||
pk1 BIGINT NOT NULL,
|
||||
c1 BIGINT,
|
||||
c2 BIGINT,
|
||||
PRIMARY KEY (pk1)
|
||||
);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'add tables');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,0,0);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed main');
|
||||
CALL DOLT_CHECKOUT('feature-branch');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,1,1);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed feature branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SQL
|
||||
[ $status -eq 1 ]
|
||||
[[ $output =~ "merge has unresolved conflicts" ]] || false
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ $output =~ "working tree clean" ]] || false
|
||||
|
||||
run dolt merge --abort
|
||||
[ $status -eq 1 ]
|
||||
[[ $output =~ "no merge to abort" ]] || false
|
||||
|
||||
# make sure a clean SQL session doesn't have any merge going
|
||||
run dolt sql -q "CALL DOLT_MERGE('--abort');"
|
||||
[ $status -eq 1 ]
|
||||
[[ $output =~ "no merge to abort" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT * FROM one_pk;" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk1,c1,c2" ]] || false
|
||||
[[ "$output" =~ "0,0,0" ]] || false
|
||||
[[ ! "$output" =~ "0,1,1" ]] || false
|
||||
|
||||
dolt checkout feature-branch
|
||||
run dolt sql -q "SELECT * FROM one_pk;" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk1,c1,c2" ]] || false
|
||||
[[ ! "$output" =~ "0,0,0" ]] || false
|
||||
[[ "$output" =~ "0,1,1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE detects conflicts, returns them in dolt_conflicts table" {
|
||||
run dolt sql << SQL
|
||||
SET dolt_allow_commit_conflicts = 0;
|
||||
@@ -457,6 +788,45 @@ SQL
|
||||
[[ "$output" =~ "9,9,9" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE(--abort) clears session state and allows additional edits" {
|
||||
run dolt sql << SQL
|
||||
set autocommit = off;
|
||||
CREATE TABLE one_pk (
|
||||
pk1 BIGINT NOT NULL,
|
||||
c1 BIGINT,
|
||||
c2 BIGINT,
|
||||
PRIMARY KEY (pk1)
|
||||
);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'add tables');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,0,0);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed main');
|
||||
CALL DOLT_CHECKOUT('feature-branch');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,1,1);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed feature branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
CALL DOLT_MERGE('--abort');
|
||||
insert into one_pk values (9,9,9);
|
||||
commit;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# We can see the latest inserted row back on the command line
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*one_pk) ]] || false
|
||||
|
||||
run dolt diff
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "9" ]] || false
|
||||
|
||||
run dolt sql -r csv -q "select * from one_pk where pk1 > 3";
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "9,9,9" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE(--abort) clears index state" {
|
||||
run dolt sql << SQL
|
||||
set autocommit = off;
|
||||
@@ -487,6 +857,36 @@ SQL
|
||||
[[ "${lines[1]}" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE(--abort) clears index state" {
|
||||
run dolt sql << SQL
|
||||
set autocommit = off;
|
||||
CREATE TABLE one_pk (
|
||||
pk1 BIGINT NOT NULL,
|
||||
c1 BIGINT,
|
||||
c2 BIGINT,
|
||||
PRIMARY KEY (pk1)
|
||||
);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'add tables');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,0,0);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed main');
|
||||
CALL DOLT_CHECKOUT('feature-branch');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,1,1);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed feature branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
CALL DOLT_MERGE('--abort');
|
||||
commit;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "${lines[0]}" =~ "On branch main" ]] || false
|
||||
[[ "${lines[1]}" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE can correctly commit unresolved conflicts" {
|
||||
dolt sql << SQL
|
||||
CREATE TABLE one_pk (
|
||||
@@ -515,6 +915,34 @@ SQL
|
||||
[[ $output =~ "merge has unresolved conflicts" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE can correctly commit unresolved conflicts" {
|
||||
dolt sql << SQL
|
||||
CREATE TABLE one_pk (
|
||||
pk1 BIGINT NOT NULL,
|
||||
c1 BIGINT,
|
||||
c2 BIGINT,
|
||||
PRIMARY KEY (pk1)
|
||||
);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'add tables');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,0,0);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed main');
|
||||
CALL DOLT_CHECKOUT('feature-branch');
|
||||
INSERT INTO one_pk (pk1,c1,c2) VALUES (0,1,1);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'changed feature branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SQL
|
||||
|
||||
run dolt sql -r csv -q "SELECT count(*) from dolt_conflicts"
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT DOLT_MERGE('feature-branch');"
|
||||
[ $status -eq 1 ]
|
||||
[[ $output =~ "merge has unresolved conflicts" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE during an active merge throws an error" {
|
||||
run dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -532,6 +960,23 @@ SQL
|
||||
[[ $output =~ "merging is not possible because you have not committed an active merge" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE during an active merge throws an error" {
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 3');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO test VALUES (500000);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 500000');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SQL
|
||||
|
||||
[ $status -eq 1 ]
|
||||
[[ $output =~ "merging is not possible because you have not committed an active merge" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE works with ff and squash" {
|
||||
run dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -567,6 +1012,41 @@ SQL
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE works with ff and squash" {
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a ff');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CALL DOLT_MERGE('feature-branch', '--squash');
|
||||
SELECT COUNT(*) > 0 FROM test WHERE pk=3;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Step 1" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT COUNT(*) FROM dolt_log"
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "Changes to be committed:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-a', '-m', 'hi');"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE with no-ff and squash works." {
|
||||
dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -597,6 +1077,36 @@ SQL
|
||||
[[ "$output" =~ "Finish up Merge" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE with no-ff and squash works." {
|
||||
dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 3');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO test VALUES (500000);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 500000');
|
||||
CALL DOLT_MERGE('feature-branch', '--squash');
|
||||
SQL
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "Changes to be committed:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-a', '-m', 'Finish up Merge')";
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Finish up Merge" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE throws errors with working set changes." {
|
||||
run dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -613,6 +1123,22 @@ SQL
|
||||
[[ "$output" =~ "cannot merge with uncommitted changes" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE throws errors with working set changes." {
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'this is a ff');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
CREATE TABLE tbl (
|
||||
pk int primary key
|
||||
);
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SQL
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "cannot merge with uncommitted changes" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE with a long series of changing operations works." {
|
||||
dolt sql << SQL
|
||||
SELECT DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
@@ -665,6 +1191,58 @@ SQL
|
||||
[[ "$output" =~ "6" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: CALL DOLT_MERGE with a long series of changing operations works." {
|
||||
dolt sql << SQL
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Step 1');
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
INSERT INTO test VALUES (3);
|
||||
INSERT INTO test VALUES (4);
|
||||
INSERT INTO test VALUES (21232);
|
||||
DELETE FROM test WHERE pk=4;
|
||||
UPDATE test SET pk=21 WHERE pk=21232;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 3');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO test VALUES (500000);
|
||||
INSERT INTO test VALUES (500001);
|
||||
DELETE FROM test WHERE pk=500001;
|
||||
UPDATE test SET pk=60 WHERE pk=500000;
|
||||
CALL DOLT_COMMIT('-a', '-m', 'Insert 60');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SQL
|
||||
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "Changes to be committed:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_COMMIT('-a', '-m', 'Finish up Merge')";
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
run dolt log -n 1
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "Finish up Merge" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT * FROM test;" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "pk" ]] || false
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
[[ "$output" =~ "21" ]] || false
|
||||
[[ "$output" =~ "60" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT COUNT(*) FROM test;" -r csv
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "6" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: DOLT_MERGE with conflicts renders the dolt_conflicts table" {
|
||||
run dolt sql --continue << SQL
|
||||
set autocommit = off;
|
||||
@@ -741,6 +1319,20 @@ SQL
|
||||
[[ "$output" =~ "current fast forward from a to b. a is ahead of b already" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: up-to-date branch does not error on CALL" {
|
||||
dolt commit -am "commit all changes"
|
||||
run dolt sql << SQL
|
||||
CALL DOLT_CHECKOUT('-b', 'feature-branch');
|
||||
CALL DOLT_CHECKOUT('main');
|
||||
INSERT INTO test VALUES (3);
|
||||
CALL DOLT_COMMIT('-a', '-m', 'a commit');
|
||||
CALL DOLT_MERGE('feature-branch');
|
||||
SHOW WARNINGS;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "current fast forward from a to b. a is ahead of b already" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-merge: adding and dropping primary keys any number of times not produce schema merge conflicts" {
|
||||
dolt commit -am "commit all changes"
|
||||
dolt sql -q "create table test_null (i int)"
|
||||
|
||||
@@ -47,6 +47,16 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull main" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_pull('origin')"
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull custom remote" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_pull('test-remote')"
|
||||
@@ -57,6 +67,16 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull custom remote" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_pull('test-remote')"
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull default origin" {
|
||||
cd repo2
|
||||
dolt remote remove test-remote
|
||||
@@ -68,6 +88,17 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull default origin" {
|
||||
cd repo2
|
||||
dolt remote remove test-remote
|
||||
dolt sql -q "CALL dolt_pull()"
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull default custom remote" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
@@ -79,6 +110,17 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull default custom remote" {
|
||||
cd repo2
|
||||
dolt remote remove origin
|
||||
dolt sql -q "CALL dolt_pull()"
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull up to date does not error" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_pull('origin')"
|
||||
@@ -90,6 +132,17 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull up to date does not error" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_pull('origin')"
|
||||
dolt sql -q "CALL dolt_pull('origin')"
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull unknown remote fails" {
|
||||
cd repo2
|
||||
run dolt sql -q "select dolt_pull('unknown')"
|
||||
@@ -97,6 +150,15 @@ teardown() {
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
[[ ! "$output" =~ "panic" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull unknown remote fails" {
|
||||
cd repo2
|
||||
run dolt sql -q "CALL dolt_pull('unknown')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "unknown remote" ]] || false
|
||||
[[ ! "$output" =~ "panic" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull unknown feature branch fails" {
|
||||
cd repo2
|
||||
dolt checkout feature
|
||||
@@ -106,6 +168,15 @@ teardown() {
|
||||
[[ ! "$output" =~ "panic" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull unknown feature branch fails" {
|
||||
cd repo2
|
||||
dolt checkout feature
|
||||
run dolt sql -q "CALL dolt_pull('origin')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "branch not found" ]] || false
|
||||
[[ ! "$output" =~ "panic" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull feature branch" {
|
||||
cd repo1
|
||||
dolt checkout feature
|
||||
@@ -122,6 +193,22 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull feature branch" {
|
||||
cd repo1
|
||||
dolt checkout feature
|
||||
dolt merge main
|
||||
dolt push origin feature
|
||||
|
||||
cd ../repo2
|
||||
dolt checkout feature
|
||||
dolt sql -q "CALL dolt_pull('origin')"
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull force" {
|
||||
skip "todo: support dolt pull --force (cli too)"
|
||||
cd repo2
|
||||
@@ -152,6 +239,36 @@ teardown() {
|
||||
[[ "$output" =~ "t3" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull force" {
|
||||
skip "todo: support dolt pull --force (cli too)"
|
||||
cd repo2
|
||||
dolt sql -q "create table t2 (a int)"
|
||||
dolt commit -am "2.0 commit"
|
||||
dolt push origin main
|
||||
|
||||
cd ../repo1
|
||||
dolt sql -q "create table t2 (a int primary key)"
|
||||
dolt sql -q "create table t3 (a int primary key)"
|
||||
dolt commit -am "2.1 commit"
|
||||
dolt push -f origin main
|
||||
|
||||
cd ../repo2
|
||||
run dolt sql -q "CALL dolt_pull('origin')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ ! "$output" =~ "panic" ]] || false
|
||||
[[ "$output" =~ "fetch failed; dataset head is not ancestor of commit" ]] || false
|
||||
|
||||
dolt sql -q "CALL dolt_pull('-f', 'origin')"
|
||||
|
||||
run dolt log -n 1
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "2.1 commit" ]] || false
|
||||
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "${#lines[@]}" -eq 4 ]
|
||||
[[ "$output" =~ "t3" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull squash" {
|
||||
skip "todo: support dolt pull --squash (cli too)"
|
||||
cd repo2
|
||||
@@ -163,6 +280,17 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull squash" {
|
||||
skip "todo: support dolt pull --squash (cli too)"
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_pull('--squash', 'origin')"
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull --noff flag" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_pull('--no-ff', 'origin')"
|
||||
@@ -179,11 +307,32 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull --noff flag" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_pull('--no-ff', 'origin')"
|
||||
dolt status
|
||||
run dolt log -n 1
|
||||
[ "$status" -eq 0 ]
|
||||
# TODO change the default message name
|
||||
[[ "$output" =~ "automatic SQL merge" ]] || false
|
||||
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: empty remote name does not panic" {
|
||||
cd repo2
|
||||
dolt sql -q "select dolt_pull('')"
|
||||
}
|
||||
|
||||
@test "sql-pull: empty remote name does not panic on CALL" {
|
||||
cd repo2
|
||||
dolt sql -q "CALL dolt_pull('')"
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull dirty working set fails" {
|
||||
cd repo2
|
||||
dolt sql -q "create table t2 (a int)"
|
||||
@@ -192,6 +341,14 @@ teardown() {
|
||||
[[ "$output" =~ "cannot merge with uncommitted changes" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull dirty working set fails" {
|
||||
cd repo2
|
||||
dolt sql -q "create table t2 (a int)"
|
||||
run dolt sql -q "CALL dolt_pull('origin')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "cannot merge with uncommitted changes" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull tag" {
|
||||
cd repo1
|
||||
dolt tag v1
|
||||
@@ -205,6 +362,19 @@ teardown() {
|
||||
[[ "$output" =~ "v1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull tag" {
|
||||
cd repo1
|
||||
dolt tag v1
|
||||
dolt push origin v1
|
||||
dolt tag
|
||||
|
||||
cd ../repo2
|
||||
dolt sql -q "CALL dolt_pull('origin')"
|
||||
run dolt tag
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "v1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: dolt_pull tags only for resolved commits" {
|
||||
cd repo1
|
||||
dolt tag v1 head
|
||||
@@ -227,3 +397,24 @@ teardown() {
|
||||
[[ ! "$output" =~ "v3" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-pull: CALL dolt_pull tags only for resolved commits" {
|
||||
cd repo1
|
||||
dolt tag v1 head
|
||||
dolt tag v2 head^
|
||||
dolt push origin v1
|
||||
dolt push origin v2
|
||||
|
||||
dolt checkout feature
|
||||
dolt sql -q "create table t2 (a int)"
|
||||
dolt commit -am "feature commit"
|
||||
dolt tag v3
|
||||
dolt push origin v3
|
||||
|
||||
cd ../repo2
|
||||
dolt sql -q "CALL dolt_pull('origin')"
|
||||
run dolt tag
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "v1" ]] || false
|
||||
[[ "$output" =~ "v2" ]] || false
|
||||
[[ ! "$output" =~ "v3" ]] || false
|
||||
}
|
||||
|
||||
@@ -47,6 +47,19 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: CALL dolt_push origin" {
|
||||
cd repo1
|
||||
dolt sql -q "CALL dolt_push('origin', 'main')"
|
||||
|
||||
cd ../repo2
|
||||
dolt pull origin
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: dolt_push custom remote" {
|
||||
cd repo1
|
||||
dolt sql -q "select dolt_push('test-remote', 'main')"
|
||||
@@ -60,6 +73,19 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: CALL dolt_push custom remote" {
|
||||
cd repo1
|
||||
dolt sql -q "CALL dolt_push('test-remote', 'main')"
|
||||
|
||||
cd ../repo2
|
||||
dolt pull origin
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: dolt_push active branch" {
|
||||
skip "upstream state lost between sessions"
|
||||
cd repo1
|
||||
@@ -74,6 +100,20 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: CALL dolt_push active branch" {
|
||||
skip "upstream state lost between sessions"
|
||||
cd repo1
|
||||
dolt sql -q "CALL dolt_push('origin')"
|
||||
|
||||
cd ../repo2
|
||||
dolt pull origin
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: dolt_push feature branch" {
|
||||
cd repo1
|
||||
dolt checkout -b feature
|
||||
@@ -89,6 +129,21 @@ teardown() {
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: CALL dolt_push feature branch" {
|
||||
cd repo1
|
||||
dolt checkout -b feature
|
||||
dolt sql -q "CALL dolt_push('origin', 'feature')"
|
||||
|
||||
cd ../repo2
|
||||
dolt fetch origin feature
|
||||
dolt checkout feature
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: dolt_push --set-upstream transient outside of session" {
|
||||
cd repo1
|
||||
dolt sql -q "select dolt_push('-u', 'origin', 'main')"
|
||||
@@ -108,6 +163,25 @@ teardown() {
|
||||
[[ "$output" =~ "the current branch has no upstream branch" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: CALL dolt_push --set-upstream transient outside of session" {
|
||||
cd repo1
|
||||
dolt sql -q "CALL dolt_push('-u', 'origin', 'main')"
|
||||
|
||||
cd ../repo2
|
||||
dolt pull origin
|
||||
run dolt sql -q "show tables" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[ "${#lines[@]}" -eq 2 ]
|
||||
[[ "$output" =~ "Table" ]] || false
|
||||
[[ "$output" =~ "t1" ]] || false
|
||||
|
||||
cd ../repo1
|
||||
# TODO persist branch config?
|
||||
run dolt sql -q "CALL dolt_push()"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "the current branch has no upstream branch" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: dolt_push --force flag" {
|
||||
cd repo2
|
||||
dolt sql -q "create table t2 (a int)"
|
||||
@@ -123,6 +197,21 @@ teardown() {
|
||||
dolt sql -q "select dolt_push('--force', 'origin', 'main')"
|
||||
}
|
||||
|
||||
@test "sql-push: CALL dolt_push --force flag" {
|
||||
cd repo2
|
||||
dolt sql -q "create table t2 (a int)"
|
||||
dolt commit -am "commit to override"
|
||||
dolt push origin main
|
||||
|
||||
cd ../repo1
|
||||
run dolt sql -q "CALL dolt_push('origin', 'main')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "the tip of your current branch is behind its remote counterpart" ]] || false
|
||||
|
||||
|
||||
dolt sql -q "CALL dolt_push('--force', 'origin', 'main')"
|
||||
}
|
||||
|
||||
@test "sql-push: push to unknown remote" {
|
||||
cd repo1
|
||||
run dolt sql -q "select dolt_push('unknown', 'main')"
|
||||
@@ -130,6 +219,13 @@ teardown() {
|
||||
[[ "$output" =~ "unknown remote: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: push to unknown remote on CALL" {
|
||||
cd repo1
|
||||
run dolt sql -q "CALL dolt_push('unknown', 'main')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "unknown remote: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: push unknown branch" {
|
||||
cd repo1
|
||||
run dolt sql -q "select dolt_push('origin', 'unknown')"
|
||||
@@ -137,6 +233,13 @@ teardown() {
|
||||
[[ "$output" =~ "refspec not found: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: push unknown branch on CALL" {
|
||||
cd repo1
|
||||
run dolt sql -q "CALL dolt_push('origin', 'unknown')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "refspec not found: 'unknown'" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: not specifying a branch throws an error" {
|
||||
cd repo1
|
||||
run dolt sql -q "select dolt_push('-u', 'origin')"
|
||||
@@ -144,6 +247,13 @@ teardown() {
|
||||
[[ "$output" =~ "invalid set-upstream arguments" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: not specifying a branch throws an error on CALL" {
|
||||
cd repo1
|
||||
run dolt sql -q "CALL dolt_push('-u', 'origin')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "invalid set-upstream arguments" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: pushing empty branch does not panic" {
|
||||
cd repo1
|
||||
run dolt sql -q "select dolt_push('origin', '')"
|
||||
@@ -151,3 +261,9 @@ teardown() {
|
||||
[[ "$output" =~ "invalid ref spec: ''" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-push: pushing empty branch does not panic on CALL" {
|
||||
cd repo1
|
||||
run dolt sql -q "CALL dolt_push('origin', '')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "invalid ref spec: ''" ]] || false
|
||||
}
|
||||
|
||||
@@ -53,6 +53,41 @@ teardown() {
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET --hard works on unstaged and staged table changes" {
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('--hard')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
|
||||
dolt add .
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('--hard')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
|
||||
# Reset to head results in clean main.
|
||||
run dolt sql -q "CALL DOLT_RESET('--hard', 'head');"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET --hard does not ignore staged docs" {
|
||||
# New docs gets referred as untracked file.
|
||||
echo ~license~ > LICENSE.md
|
||||
@@ -81,6 +116,34 @@ teardown() {
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*LICENSE.md) ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET --hard does not ignore staged docs" {
|
||||
# New docs gets referred as untracked file.
|
||||
echo ~license~ > LICENSE.md
|
||||
dolt add .
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('--hard')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Untracked files:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*new doc:[[:space:]]*LICENSE.md) ]] || false
|
||||
|
||||
# Tracked file gets reset
|
||||
dolt commit -a -m "Add a the license file"
|
||||
echo ~edited-license~ > LICENSE.md
|
||||
|
||||
dolt add .
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('--hard')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes not staged for commit:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*LICENSE.md) ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET --soft works on unstaged and staged table changes" {
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
|
||||
@@ -104,6 +167,29 @@ teardown() {
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET --soft works on unstaged and staged table changes" {
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
|
||||
# Table should still be unstaged
|
||||
run dolt sql -q "CALL DOLT_RESET('--soft')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes not staged for commit:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
dolt add .
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('--soft')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes not staged for commit:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET --soft ignores staged docs" {
|
||||
echo ~license~ > LICENSE.md
|
||||
dolt add .
|
||||
@@ -122,6 +208,24 @@ teardown() {
|
||||
[[ "$output" =~ ("error: the table(s) LICENSE.md do not exist") ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET --soft ignores staged docs" {
|
||||
echo ~license~ > LICENSE.md
|
||||
dolt add .
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('--soft')"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes to be committed:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*new doc:[[:space:]]*LICENSE.md) ]] || false
|
||||
|
||||
# Explicitly defining the file ignores it.
|
||||
run dolt sql -q "CALL DOLT_RESET('LICENSE.md')"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ ("error: the table(s) LICENSE.md do not exist") ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET works on specific tables" {
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
|
||||
@@ -145,6 +249,29 @@ teardown() {
|
||||
[[ "$output" =~ ([[:space:]]*new table:[[:space:]]*test2) ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET works on specific tables" {
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
|
||||
# Table should still be unstaged
|
||||
run dolt sql -q "CALL DOLT_RESET('test')"
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes not staged for commit:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
dolt sql -q "CREATE TABLE test2 (pk int primary key);"
|
||||
|
||||
dolt add .
|
||||
run dolt sql -q "CALL DOLT_RESET('test', 'test2')"
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes not staged for commit:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*new table:[[:space:]]*test2) ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET --soft and --hard on the same table" {
|
||||
# Make a change to the table and do a soft reset
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
@@ -184,6 +311,45 @@ teardown() {
|
||||
[[ "$output" != 1 ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET --soft and --hard on the same table" {
|
||||
# Make a change to the table and do a soft reset
|
||||
dolt sql -q "INSERT INTO test VALUES (1)"
|
||||
run dolt sql -q "CALL DOLT_RESET('test')"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes not staged for commit:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
# Add and unstage the table with a soft reset. Make sure the same data exists.
|
||||
dolt add .
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('test')"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Changes not staged for commit:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*modified:[[:space:]]*test) ]] || false
|
||||
|
||||
run dolt sql -r csv -q "select * from test"
|
||||
[[ "$output" =~ pk ]] || false
|
||||
[[ "$output" =~ 1 ]] || false
|
||||
|
||||
# Do a hard reset and validate the insert was wiped properly
|
||||
run dolt sql -q "CALL DOLT_RESET('--hard')"
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
|
||||
run dolt sql -r csv -q "select * from test"
|
||||
[[ "$output" =~ pk ]] || false
|
||||
[[ "$output" != 1 ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET('--hard') doesn't remove newly created table." {
|
||||
dolt sql << SQL
|
||||
CREATE TABLE test2 (
|
||||
@@ -206,6 +372,28 @@ SQL
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET('--hard') doesn't remove newly created table." {
|
||||
dolt sql << SQL
|
||||
CREATE TABLE test2 (
|
||||
pk int primary key
|
||||
);
|
||||
SQL
|
||||
dolt sql -q "CALL DOLT_RESET('--hard');"
|
||||
|
||||
run dolt status
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "Untracked files:" ]] || false
|
||||
[[ "$output" =~ ([[:space:]]*new table:[[:space:]]*test2) ]] || false
|
||||
|
||||
dolt add .
|
||||
dolt sql -q "CALL DOLT_RESET('--hard');"
|
||||
run dolt status
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "On branch main" ]] || false
|
||||
[[ "$output" =~ "nothing to commit, working tree clean" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: No rows in dolt_diff table after DOLT_RESET('--hard') on committed table." {
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
@@ -217,6 +405,17 @@ SQL
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: No rows in dolt_diff table after CALL DOLT_RESET('--hard') on committed table." {
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
CALL DOLT_RESET('--hard');
|
||||
SELECT count(*)=0 FROM dolt_diff_test;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
# Represents that the diff table marks a change from the recent commit.
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: No rows in dolt_status table after DOLT_RESET('--hard') on committed table." {
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
@@ -227,6 +426,16 @@ SQL
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: No rows in dolt_status table after CALL DOLT_RESET('--hard') on committed table." {
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
CALL DOLT_RESET('--hard');
|
||||
SELECT count(*)=0 FROM dolt_status;
|
||||
SQL
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "true" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET --hard properly maintains session variables." {
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
head_hash=$(get_head_commit)
|
||||
@@ -240,6 +449,19 @@ SQL
|
||||
[[ "$output" =~ $head_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET --hard properly maintains session variables." {
|
||||
head_variable=@@dolt_repo_$$_head
|
||||
head_hash=$(get_head_commit)
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
CALL DOLT_RESET('--hard');
|
||||
SELECT $head_variable;
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ $head_hash ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: dolt_status still has the same information in the face of a DOLT_RESET" {
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
@@ -252,6 +474,18 @@ SQL
|
||||
[[ "$output" =~ "false" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: dolt_status still has the same information in the face of a CALL DOLT_RESET" {
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
SQL
|
||||
|
||||
dolt sql -q "CALL DOLT_RESET('test');"
|
||||
run dolt sql -q "SELECT * FROM dolt_status;"
|
||||
|
||||
[ $status -eq 0 ]
|
||||
[[ "$output" =~ "false" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: DOLT_RESET soft maintains staged session variable" {
|
||||
working_hash_var=@@dolt_repo_$$_working
|
||||
run dolt sql -q "SELECT $working_hash_var"
|
||||
@@ -279,6 +513,33 @@ SQL
|
||||
[[ "$output" = "$working_hash" ]] || false
|
||||
}
|
||||
|
||||
@test "sql-reset: CALL DOLT_RESET soft maintains staged session variable" {
|
||||
working_hash_var=@@dolt_repo_$$_working
|
||||
run dolt sql -q "SELECT $working_hash_var"
|
||||
working_hash=$output
|
||||
|
||||
run dolt sql << SQL
|
||||
INSERT INTO test VALUES (1);
|
||||
SELECT DOLT_ADD('.');
|
||||
SELECT DOLT_RESET('test');
|
||||
SELECT $working_hash_var
|
||||
SQL
|
||||
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# These should not match as @@_working should become a new staged hash different from the original working.
|
||||
[[ ! "$output" =~ $working_hash ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_RESET('--hard');"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT $working_hash_var"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
# Matches exactly.
|
||||
[[ "$output" = "$working_hash" ]] || false
|
||||
}
|
||||
|
||||
get_head_commit() {
|
||||
dolt log -n 1 | grep -m 1 commit | cut -c 13-44
|
||||
}
|
||||
|
||||
@@ -1404,50 +1404,50 @@ CREATE PROCEDURE p2() SELECT 6*6;
|
||||
SQL
|
||||
# We're excluding timestamps in these statements
|
||||
# Initial look
|
||||
run dolt sql -q "SELECT * FROM dolt_procedures" -r=csv
|
||||
run dolt sql -b -q "SET @@show_external_procedures = 0;SELECT * FROM dolt_procedures" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "name,create_stmt,created_at,modified_at" ]] || false
|
||||
[[ "$output" =~ 'p1,CREATE PROCEDURE p1() SELECT 5*5' ]] || false
|
||||
[[ "$output" =~ 'p2,CREATE PROCEDURE p2() SELECT 6*6' ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
run dolt sql -q "SHOW PROCEDURE STATUS" -r=csv
|
||||
[[ "${#lines[@]}" = "4" ]] || false
|
||||
run dolt sql -b -q "SET @@show_external_procedures = 0;SHOW PROCEDURE STATUS" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "Db,Name,Type,Definer,Modified,Created,Security_type,Comment,character_set_client,collation_connection,Database Collation" ]] || false
|
||||
[[ "$output" =~ ',p1,PROCEDURE,' ]] || false
|
||||
[[ "$output" =~ ',p2,PROCEDURE,' ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
[[ "${#lines[@]}" = "4" ]] || false
|
||||
# Drop p2
|
||||
dolt sql -q "DROP PROCEDURE p2"
|
||||
run dolt sql -q "SELECT * FROM dolt_procedures" -r=csv
|
||||
run dolt sql -b -q "SET @@show_external_procedures = 0;SELECT * FROM dolt_procedures" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "name,create_stmt,created_at,modified_at" ]] || false
|
||||
[[ "$output" =~ 'p1,CREATE PROCEDURE p1() SELECT 5*5' ]] || false
|
||||
[[ ! "$output" =~ 'p2,CREATE PROCEDURE p2() SELECT 6*6' ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
run dolt sql -q "SHOW PROCEDURE STATUS" -r=csv
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
run dolt sql -b -q "SET @@show_external_procedures = 0;SHOW PROCEDURE STATUS" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "Db,Name,Type,Definer,Modified,Created,Security_type,Comment,character_set_client,collation_connection,Database Collation" ]] || false
|
||||
[[ "$output" =~ ',p1,PROCEDURE,' ]] || false
|
||||
[[ ! "$output" =~ ',p2,PROCEDURE,' ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
# Drop p2 again and error
|
||||
run dolt sql -q "DROP PROCEDURE p2"
|
||||
[ "$status" -eq "1" ]
|
||||
[[ "$output" =~ '"p2" does not exist' ]] || false
|
||||
# Drop p1 using if exists
|
||||
dolt sql -q "DROP PROCEDURE IF EXISTS p1"
|
||||
run dolt sql -q "SELECT * FROM dolt_procedures" -r=csv
|
||||
run dolt sql -b -q "SET @@show_external_procedures = 0;SELECT * FROM dolt_procedures" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "name,create_stmt,created_at,modified_at" ]] || false
|
||||
[[ ! "$output" =~ 'p1,CREATE PROCEDURE p1() SELECT 5*5' ]] || false
|
||||
[[ ! "$output" =~ 'p2,CREATE PROCEDURE p2() SELECT 6*6' ]] || false
|
||||
[[ "${#lines[@]}" = "1" ]] || false
|
||||
run dolt sql -q "SHOW PROCEDURE STATUS" -r=csv
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
run dolt sql -b -q "SET @@show_external_procedures = 0;SHOW PROCEDURE STATUS" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "Db,Name,Type,Definer,Modified,Created,Security_type,Comment,character_set_client,collation_connection,Database Collation" ]] || false
|
||||
[[ ! "$output" =~ ',p1,PROCEDURE,' ]] || false
|
||||
[[ ! "$output" =~ ',p2,PROCEDURE,' ]] || false
|
||||
[[ "${#lines[@]}" = "1" ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
}
|
||||
|
||||
@test "sql: active_branch() func" {
|
||||
|
||||
@@ -203,6 +203,28 @@ SQL
|
||||
[[ "${#lines[@]}" = "1" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: Stored Procedure no violations" {
|
||||
run dolt sql -q "CALL DOLT_VERIFY_CONSTRAINTS('child1')" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "no_violations" ]] || false
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ ! "$output" =~ "child1_parent1" ]] || false
|
||||
[[ ! "$output" =~ "child1_parent2" ]] || false
|
||||
[[ "${#lines[@]}" = "1" ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_VERIFY_ALL_CONSTRAINTS('child1')" -r=csv
|
||||
[ "$status" -eq "0" ]
|
||||
[[ "$output" =~ "no_violations" ]] || false
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ ! "$output" =~ "child1_parent1" ]] || false
|
||||
[[ ! "$output" =~ "child1_parent2" ]] || false
|
||||
[[ "${#lines[@]}" = "1" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: SQL CONSTRAINTS_VERIFY() no named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;SELECT CONSTRAINTS_VERIFY();" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
@@ -213,6 +235,15 @@ SQL
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: Stored Procedure CONSTRAINTS_VERIFY() no named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;CALL DOLT_VERIFY_CONSTRAINTS();" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ "$output" =~ "child3,1" ]] || false
|
||||
[[ "$output" =~ "child4,1" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: SQL CONSTRAINTS_VERIFY() named table" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;SELECT CONSTRAINTS_VERIFY('child3');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
@@ -223,6 +254,15 @@ SQL
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: Stored Procedure CONSTRAINTS_VERIFY() named table" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;CALL DOLT_VERIFY_CONSTRAINTS('child3');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ "$output" =~ "child3,1" ]] || false
|
||||
[[ ! "$output" =~ "child4,1" ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: SQL CONSTRAINTS_VERIFY() named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;SELECT CONSTRAINTS_VERIFY('child3', 'child4');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
@@ -233,6 +273,15 @@ SQL
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: Stored Procedure CONSTRAINTS_VERIFY() named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;CALL DOLT_VERIFY_CONSTRAINTS('child3', 'child4');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ "$output" =~ "child3,1" ]] || false
|
||||
[[ "$output" =~ "child4,1" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: SQL CONSTRAINTS_VERIFY_ALL() no named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;SELECT CONSTRAINTS_VERIFY_ALL();" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
@@ -243,6 +292,15 @@ SQL
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: Stored Procedure CONSTRAINTS_VERIFY_ALL() no named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;CALL DOLT_VERIFY_ALL_CONSTRAINTS();" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ "$output" =~ "child3,2" ]] || false
|
||||
[[ "$output" =~ "child4,2" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: SQL CONSTRAINTS_VERIFY_ALL() named table" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;SELECT CONSTRAINTS_VERIFY_ALL('child3');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
@@ -253,6 +311,15 @@ SQL
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: Stored Procedure CONSTRAINTS_VERIFY_ALL() named table" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;CALL DOLT_VERIFY_ALL_CONSTRAINTS('child3');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ "$output" =~ "child3,2" ]] || false
|
||||
[[ ! "$output" =~ "child4,2" ]] || false
|
||||
[[ "${#lines[@]}" = "2" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: SQL CONSTRAINTS_VERIFY_ALL() named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;SELECT CONSTRAINTS_VERIFY_ALL('child3', 'child4');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
@@ -263,3 +330,11 @@ SQL
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@test "verify-constraints: Stored Procedure CONSTRAINTS_VERIFY_ALL() named tables" {
|
||||
run dolt sql -b -q "SET dolt_force_transaction_commit = 1;CALL DOLT_VERIFY_ALL_CONSTRAINTS('child3', 'child4');" -r=json
|
||||
[ "$status" -eq "0" ]
|
||||
run dolt sql -q "SELECT * FROM dolt_constraint_violations" -r=csv
|
||||
[[ "$output" =~ "child3,2" ]] || false
|
||||
[[ "$output" =~ "child4,2" ]] || false
|
||||
[[ "${#lines[@]}" = "3" ]] || false
|
||||
}
|
||||
|
||||
@@ -49,10 +49,10 @@ if (rowsAff != 1) {
|
||||
}
|
||||
|
||||
got <- dbGetQuery(conn, "select * from test where pk = 1")
|
||||
want = data.frame(pk = c(1), value = c(1))
|
||||
want = data.frame(pk = c(0, 1), value = c(0, 1))
|
||||
if (!isTRUE(all.equal(want, got))) {
|
||||
print("unexpected prepared statement result")
|
||||
print(rows)
|
||||
print(got)
|
||||
quit(1)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user