Merge remote-tracking branch 'origin/main' into aaron/run-bats-in-lambdabats

This commit is contained in:
Aaron Son
2023-12-15 14:57:10 -08:00
99 changed files with 2401 additions and 1565 deletions
@@ -15,7 +15,7 @@ actorprefix="$5"
format="$6"
nomsBinFormat="$7"
precision="4"
precision="6"
if [ -n "$nomsBinFormat" ]; then
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
+68 -29
View File
@@ -1996,35 +1996,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= LICENSE 3f09359866e587619921288cca4607374451bbd3b3f3806bc70598b6 =
================================================================================
================================================================================
= github.com/cespare/xxhash licensed under: =
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= LICENSE.txt 726f1b8f64f7e439b1b12c7cbde7b1427752a00ddea15019e4156465 =
================================================================================
================================================================================
= github.com/cespare/xxhash/v2 licensed under: =
@@ -5034,6 +5005,74 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE bda64ae869be18b50125d9cfe5c370eb7248e84a2324823e4d7f2295 =
================================================================================
================================================================================
= github.com/google/go-github/v57 licensed under: =
Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 4f95ee9c8c81d66113b4c4fe66b684ae243884b5947ee854319dd9cc =
================================================================================
================================================================================
= github.com/google/go-querystring licensed under: =
Copyright (c) 2013 Google. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 7b97c9585df42dc638169348f6350b491fc35fe50884a7e6cf41aa58 =
================================================================================
================================================================================
= github.com/google/s2a-go licensed under: =
+1
View File
@@ -150,6 +150,7 @@ func CreateCleanArgParser() *argparser.ArgParser {
func CreateCheckoutArgParser() *argparser.ArgParser {
ap := argparser.NewArgParserWithVariableArgs("checkout")
ap.SupportsString(CheckoutCreateBranch, "", "branch", "Create a new branch named {{.LessThan}}new_branch{{.GreaterThan}} and start it at {{.LessThan}}start_point{{.GreaterThan}}.")
ap.SupportsString(CreateResetBranch, "", "branch", "Similar to '-b'. Forcibly resets the branch to {{.LessThan}}start_point{{.GreaterThan}} if it exists.")
ap.SupportsFlag(ForceFlag, "f", "If there is any changes in working set, the force flag will wipe out the current changes and checkout the new branch.")
ap.SupportsString(TrackFlag, "t", "", "When creating a new branch, set up 'upstream' configuration.")
return ap
+1
View File
@@ -25,6 +25,7 @@ const (
BranchParam = "branch"
CachedFlag = "cached"
CheckoutCreateBranch = "b"
CreateResetBranch = "B"
CommitFlag = "commit"
CopyFlag = "copy"
DateParam = "date"
+6 -1
View File
@@ -106,15 +106,20 @@ func (cmd CheckoutCmd) Exec(ctx context.Context, commandStr string, args []strin
return 1
}
branchOrTrack := apr.Contains(cli.CheckoutCreateBranch) || apr.Contains(cli.TrackFlag)
// Argument validation in the CLI is strictly nice to have. The stored procedure will do the same, but the errors
// won't be as nice.
branchOrTrack := apr.Contains(cli.CheckoutCreateBranch) || apr.Contains(cli.CreateResetBranch) || apr.Contains(cli.TrackFlag)
if (branchOrTrack && apr.NArg() > 1) || (!branchOrTrack && apr.NArg() == 0) {
usagePrt()
return 1
}
// Branch name retrieval here is strictly for messages. dolt_checkout procedure is the authority on logic around validation.
var branchName string
if apr.Contains(cli.CheckoutCreateBranch) {
branchName, _ = apr.GetValue(cli.CheckoutCreateBranch)
} else if apr.Contains(cli.CreateResetBranch) {
branchName, _ = apr.GetValue(cli.CreateResetBranch)
} else if apr.Contains(cli.TrackFlag) {
if apr.NArg() > 0 {
usagePrt()
+4 -1
View File
@@ -243,7 +243,10 @@ func loginWithCreds(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds,
func openBrowserForCredsAdd(dc creds.DoltCreds, loginUrl string) {
url := fmt.Sprintf("%s#%s", loginUrl, dc.PubKeyBase32Str())
cli.Printf("Opening a browser to:\n\t%s\nPlease associate your key with your account.\n", url)
cli.Println("Attempting to automatically open the credentials page in your default browser.")
cli.Println("If the browser does not open or you wish to use a different device to authorize this request, open the following URL:")
cli.Printf("\t%s\n", url)
cli.Println("Please associate your key with your account.")
open.Start(url)
}
+16 -8
View File
@@ -83,6 +83,22 @@ func Serve(
controller = svcs.NewController()
}
ConfigureServices(serverConfig, controller, version, dEnv)
go controller.Start(ctx)
err := controller.WaitForStart()
if err != nil {
return err, nil
}
return nil, controller.WaitForStop()
}
func ConfigureServices(
serverConfig ServerConfig,
controller *svcs.Controller,
version string,
dEnv *env.DoltEnv,
) {
ValidateConfigStep := &svcs.AnonService{
InitF: func(context.Context) error {
return ValidateConfig(serverConfig)
@@ -202,7 +218,6 @@ func Serve(
controller.Register(LoadServerConfig)
// Create SQL Engine with users
var config *engine.SqlEngineConfig
InitSqlEngineConfig := &svcs.AnonService{
InitF: func(context.Context) error {
@@ -562,13 +577,6 @@ func Serve(
},
}
controller.Register(RunSQLServer)
go controller.Start(ctx)
err := controller.WaitForStart()
if err != nil {
return err, nil
}
return nil, controller.WaitForStop()
}
// heartbeatService is a service that sends a heartbeat event to the metrics server once a day
@@ -68,7 +68,7 @@ func TestServerArgs(t *testing.T) {
assert.NoError(t, dEnv.DoltDB.Close())
}()
go func() {
startServer(context.Background(), "0.0.0", "dolt sql-server", []string{
StartServer(context.Background(), "0.0.0", "dolt sql-server", []string{
"-H", "localhost",
"-P", "15200",
"-u", "username",
@@ -115,7 +115,7 @@ listener:
go func() {
dEnv.FS.WriteFile("config.yaml", []byte(yamlConfig), os.ModePerm)
startServer(context.Background(), "0.0.0", "dolt sql-server", []string{
StartServer(context.Background(), "0.0.0", "dolt sql-server", []string{
"--config", "config.yaml",
}, dEnv, controller)
}()
@@ -150,7 +150,7 @@ func TestServerBadArgs(t *testing.T) {
t.Run(strings.Join(test, " "), func(t *testing.T) {
controller := svcs.NewController()
go func() {
startServer(context.Background(), "test", "dolt sql-server", test, env, controller)
StartServer(context.Background(), "test", "dolt sql-server", test, env, controller)
}()
if !assert.Error(t, controller.WaitForStart()) {
controller.Stop()
@@ -277,7 +277,7 @@ func TestServerFailsIfPortInUse(t *testing.T) {
server.ListenAndServe()
}()
go func() {
startServer(context.Background(), "test", "dolt sql-server", []string{
StartServer(context.Background(), "test", "dolt sql-server", []string{
"-H", "localhost",
"-P", "15200",
"-u", "username",
+49 -35
View File
@@ -66,13 +66,13 @@ func indentLines(s string) string {
var sqlServerDocs = cli.CommandDocumentationContent{
ShortDesc: "Start a MySQL-compatible server.",
LongDesc: "By default, starts a MySQL-compatible server on the dolt database in the current directory. " +
"Databases are named after the directories they appear in, with all non-alphanumeric characters replaced by the _ character. " +
"Databases are named after the directories they appear in." +
"Parameters can be specified using a yaml configuration file passed to the server via " +
"{{.EmphasisLeft}}--config <file>{{.EmphasisRight}}, or by using the supported switches and flags to configure " +
"the server directly on the command line. If {{.EmphasisLeft}}--config <file>{{.EmphasisRight}} is provided all" +
" other command line arguments are ignored.\n\nThis is an example yaml configuration file showing all supported" +
" items and their default values:\n\n" +
indentLines(serverConfigAsYAMLConfig(DefaultServerConfig()).String()) + "\n\n" + `
indentLines(ServerConfigAsYAMLConfig(DefaultServerConfig()).String()) + "\n\n" + `
SUPPORTED CONFIG FILE FIELDS:
{{.EmphasisLeft}}data_dir{{.EmphasisRight}}: A directory where the server will load dolt databases to serve, and create new ones. Defaults to the current directory.
@@ -202,7 +202,17 @@ func (cmd SqlServerCmd) Exec(ctx context.Context, commandStr string, args []stri
cancelF()
}
}()
return startServer(newCtx, cmd.VersionStr, commandStr, args, dEnv, controller)
// We need a username and password for many SQL commands, so set defaults if they don't exist
dEnv.Config.SetFailsafes(env.DefaultFailsafeConfig)
err := StartServer(newCtx, cmd.VersionStr, commandStr, args, dEnv, controller)
if err != nil {
cli.Println(color.RedString(err.Error()))
return 1
}
return 0
}
func validateSqlServerArgs(apr *argparser.ArgParseResults) error {
@@ -217,48 +227,52 @@ func validateSqlServerArgs(apr *argparser.ArgParseResults) error {
return nil
}
func startServer(ctx context.Context, versionStr, commandStr string, args []string, dEnv *env.DoltEnv, controller *svcs.Controller) int {
// StartServer starts the sql server with the controller provided and blocks until the server is stopped.
func StartServer(ctx context.Context, versionStr, commandStr string, args []string, dEnv *env.DoltEnv, controller *svcs.Controller) error {
ap := SqlServerCmd{}.ArgParser()
help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, sqlServerDocs, ap))
// We need a username and password for many SQL commands, so set defaults if they don't exist
dEnv.Config.SetFailsafes(env.DefaultFailsafeConfig)
apr := cli.ParseArgsOrDie(ap, args, help)
if err := validateSqlServerArgs(apr); err != nil {
cli.PrintErrln(color.RedString(err.Error()))
return 1
}
serverConfig, err := GetServerConfig(dEnv.FS, apr)
serverConfig, err := ServerConfigFromArgs(ap, help, args, dEnv)
if err != nil {
cli.PrintErrln(color.RedString("Failed to start server. Bad Configuration"))
cli.PrintErrln(err.Error())
return 1
}
if err = SetupDoltConfig(dEnv, apr, serverConfig); err != nil {
cli.PrintErrln(color.RedString("Failed to start server. Bad Configuration"))
cli.PrintErrln(err.Error())
return 1
return err
}
cli.PrintErrf("Starting server with Config %v\n", ConfigInfo(serverConfig))
if startError, closeError := Serve(ctx, versionStr, serverConfig, controller, dEnv); startError != nil || closeError != nil {
if startError != nil {
cli.PrintErrln(startError)
}
if closeError != nil {
cli.PrintErrln(closeError)
}
return 1
startError, closeError := Serve(ctx, versionStr, serverConfig, controller, dEnv)
if startError != nil {
return startError
}
if closeError != nil {
return closeError
}
return 0
return nil
}
// GetServerConfig returns ServerConfig that is set either from yaml file if given, if not it is set with values defined
// ServerConfigFromArgs returns a ServerConfig from the given args
func ServerConfigFromArgs(ap *argparser.ArgParser, help cli.UsagePrinter, args []string, dEnv *env.DoltEnv) (ServerConfig, error) {
apr := cli.ParseArgsOrDie(ap, args, help)
if err := validateSqlServerArgs(apr); err != nil {
cli.PrintErrln(color.RedString(err.Error()))
return nil, err
}
serverConfig, err := getServerConfig(dEnv.FS, apr)
if err != nil {
return nil, fmt.Errorf("bad configuration: %w", err)
}
if err = setupDoltConfig(dEnv, apr, serverConfig); err != nil {
return nil, fmt.Errorf("bad configuration: %w", err)
}
return serverConfig, nil
}
// getServerConfig returns ServerConfig that is set either from yaml file if given, if not it is set with values defined
// on command line. Server config variables not defined are set to default values.
func GetServerConfig(cwdFS filesys.Filesys, apr *argparser.ArgParseResults) (ServerConfig, error) {
func getServerConfig(cwdFS filesys.Filesys, apr *argparser.ArgParseResults) (ServerConfig, error) {
var yamlCfg YAMLConfig
if cfgFile, ok := apr.GetValue(configFileFlag); ok {
cfg, err := getYAMLServerConfig(cwdFS, cfgFile)
@@ -287,7 +301,7 @@ func GetServerConfig(cwdFS filesys.Filesys, apr *argparser.ArgParseResults) (Ser
// GetClientConfig returns configuration which is sutable for a client to use. The fact that it returns a ServerConfig
// is a little confusing, but it is because the client and server use the same configuration struct. The main difference
// between this method and GetServerConfig is that this method required a cli.UserPassword argument. It is created by
// between this method and getServerConfig is that this method required a cli.UserPassword argument. It is created by
// prompting the user, and we don't want the server to follow that code path.
func GetClientConfig(cwdFS filesys.Filesys, creds *cli.UserPassword, apr *argparser.ArgParseResults) (ServerConfig, error) {
cfgFile, hasCfgFile := apr.GetValue(configFileFlag)
@@ -317,8 +331,8 @@ func GetClientConfig(cwdFS filesys.Filesys, creds *cli.UserPassword, apr *argpar
return yamlCfg, nil
}
// SetupDoltConfig updates the given server config with where to create .doltcfg directory
func SetupDoltConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults, config ServerConfig) error {
// setupDoltConfig updates the given server config with where to create .doltcfg directory
func setupDoltConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults, config ServerConfig) error {
if _, ok := apr.GetValue(configFileFlag); ok {
return nil
}
@@ -174,7 +174,7 @@ func NewYamlConfig(configFileData []byte) (YAMLConfig, error) {
return cfg, err
}
func serverConfigAsYAMLConfig(cfg ServerConfig) YAMLConfig {
func ServerConfigAsYAMLConfig(cfg ServerConfig) YAMLConfig {
return YAMLConfig{
LogLevelStr: strPtr(string(cfg.LogLevel())),
MaxQueryLenInLogs: nillableIntPtr(cfg.MaxLoggedQueryLen()),
@@ -88,7 +88,7 @@ jwks:
field1: a
fields_to_log:
`
expected := serverConfigAsYAMLConfig(DefaultServerConfig())
expected := ServerConfigAsYAMLConfig(DefaultServerConfig())
expected.BehaviorConfig.DoltTransactionCommit = &trueValue
expected.CfgDirStr = nillableStrPtr("")
+20 -3
View File
@@ -482,6 +482,8 @@ func newImportSqlEngineMover(ctx context.Context, dEnv *env.DoltEnv, rdSchema sc
return nil, dmce
}
tableSchemaDiff := tableSchema.GetAllCols().NameToCol
var rowOperationDiff []string
// construct the schema of the set of column to be updated.
rowOperationColColl := schema.NewColCollection()
rdSchema.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
@@ -489,6 +491,9 @@ func newImportSqlEngineMover(ctx context.Context, dEnv *env.DoltEnv, rdSchema sc
wrCol, ok := tableSchema.GetAllCols().GetByName(wrColName)
if ok {
rowOperationColColl = rowOperationColColl.Append(wrCol)
delete(tableSchemaDiff, wrColName)
} else {
rowOperationDiff = append(rowOperationDiff, wrColName)
}
return false, nil
@@ -499,10 +504,22 @@ func newImportSqlEngineMover(ctx context.Context, dEnv *env.DoltEnv, rdSchema sc
return nil, &mvdata.DataMoverCreationError{ErrType: mvdata.SchemaErr, Cause: err}
}
// Leave a warning if the import operation is operating on fewer columns than the relevant table's schema.
// Leave a warning if the import operation has a different schema than the relevant table's schema.
// This can certainly be intentional, but it is often due to typos in the header of a csv file.
if rowOperationSchema.GetAllCols().Size() < tableSchema.GetAllCols().Size() {
cli.PrintErrln(color.YellowString("Warning: There are fewer columns in the import file's schema than the table's schema.\nIf unintentional, check for any typos in the import file's header."))
if len(tableSchemaDiff) != 0 || len(rowOperationDiff) != 0 {
cli.PrintErrln(color.YellowString("Warning: The import file's schema does not match the table's schema.\nIf unintentional, check for any typos in the import file's header."))
if len(tableSchemaDiff) != 0 {
cli.Printf("Missing columns in %s:\n", imOpts.destTableName)
for _, col := range tableSchemaDiff {
cli.Println("\t" + col.Name)
}
}
if len(rowOperationDiff) != 0 {
cli.Println("Extra columns in import file:")
for _, col := range rowOperationDiff {
cli.Println("\t" + col)
}
}
}
mv, err := mvdata.NewSqlEngineTableWriter(ctx, dEnv, tableSchema, rowOperationSchema, moveOps, importStatsCB)
+147 -7
View File
@@ -16,9 +16,19 @@ package commands
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/fatih/color"
"github.com/google/go-github/v57/github"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
@@ -27,8 +37,17 @@ import (
const (
featureVersionFlag = "feature"
verboseFlag = "verbose"
versionCheckFile = "version_check.txt"
)
var versionDocs = cli.CommandDocumentationContent{
ShortDesc: "Displays the version for the Dolt binary.",
LongDesc: `Displays the version for the Dolt binary.`,
Synopsis: []string{
`[--verbose] [--feature]`,
},
}
type VersionCmd struct {
VersionStr string
}
@@ -40,7 +59,7 @@ func (cmd VersionCmd) Name() string {
// Description returns a description of the command
func (cmd VersionCmd) Description() string {
return "Displays the current Dolt cli version."
return versionDocs.ShortDesc
}
// RequiresRepo should return false if this interface is implemented, and the command does not have the requirement
@@ -50,7 +69,8 @@ func (cmd VersionCmd) RequiresRepo() bool {
}
func (cmd VersionCmd) Docs() *cli.CommandDocumentation {
return nil
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(versionDocs, ap)
}
func (cmd VersionCmd) ArgParser() *argparser.ArgParser {
@@ -63,11 +83,18 @@ func (cmd VersionCmd) ArgParser() *argparser.ArgParser {
// Version displays the version of the running dolt client
// Exec executes the command
func (cmd VersionCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {
ap := cmd.ArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, versionDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
cli.Println("dolt version", cmd.VersionStr)
usage := func() {}
ap := cmd.ArgParser()
apr := cli.ParseArgsOrDie(ap, args, usage)
var verr errhand.VerboseError
verr = checkAndPrintVersionOutOfDateWarning(cmd.VersionStr, dEnv)
if verr != nil {
// print error but don't fail
cli.PrintErrf(color.YellowString(verr.Verbose()))
}
if apr.Contains(verboseFlag) {
if dEnv.HasDoltDir() && dEnv.RSLoadErr == nil && !cli.CheckEnvIsValid(dEnv) {
@@ -78,7 +105,6 @@ func (cmd VersionCmd) Exec(ctx context.Context, commandStr string, args []string
}
}
var verr errhand.VerboseError
if apr.Contains(featureVersionFlag) {
if !cli.CheckEnvIsValid(dEnv) {
return 2
@@ -92,12 +118,126 @@ func (cmd VersionCmd) Exec(ctx context.Context, commandStr string, args []string
fv, ok, err := wr.GetFeatureVersion(ctx)
if err != nil {
verr = errhand.BuildDError("error reading feature version").AddCause(err).Build()
return HandleVErrAndExitCode(verr, usage)
} else if !ok {
verr = errhand.BuildDError("the current head does not have a feature version").Build()
return HandleVErrAndExitCode(verr, usage)
} else {
cli.Println("feature version:", fv)
}
}
return HandleVErrAndExitCode(verr, usage)
return HandleVErrAndExitCode(nil, usage)
}
// checkAndPrintVersionOutOfDateWarning checks if the current version of Dolt is out of date and prints a warning if it
// is. Restricts this check to at most once per week unless the build version is ahead of the stored latest release verion.
func checkAndPrintVersionOutOfDateWarning(curVersion string, dEnv *env.DoltEnv) errhand.VerboseError {
var latestRelease string
var verr errhand.VerboseError
homeDir, err := dEnv.GetUserHomeDir()
if err != nil {
return errhand.BuildDError("error: failed to get user home directory").AddCause(err).Build()
}
path := filepath.Join(homeDir, dbfactory.DoltDir, versionCheckFile)
if exists, _ := dEnv.FS.Exists(path); exists {
vCheck, err := dEnv.FS.ReadFile(path)
if err != nil {
return errhand.BuildDError("error: failed to read version check file").AddCause(err).Build()
}
latestRelease = strings.ReplaceAll(string(vCheck), "\n", "")
lastCheckDate, _ := dEnv.FS.LastModified(path)
if lastCheckDate.Before(time.Now().AddDate(0, 0, -7)) {
latestRelease, verr = getLatestDoltReleaseAndRecord(path, dEnv)
if verr != nil {
return verr
}
} else {
if !isVersionFormattedCorrectly(latestRelease) {
latestRelease, verr = getLatestDoltReleaseAndRecord(path, dEnv)
if verr != nil {
return verr
}
}
}
} else {
latestRelease, verr = getLatestDoltReleaseAndRecord(path, dEnv)
if verr != nil {
return verr
}
}
// if there were new releases in the last week, the latestRelease stored might be behind the current version built
isOutOfDate, verr := isOutOfDate(curVersion, latestRelease)
if verr != nil {
return verr
}
if isOutOfDate {
cli.Printf(color.YellowString("Warning: you are on an old version of Dolt. The newest version is %s.\n", latestRelease))
}
return nil
}
// getLatestDoltRelease returns the latest release of Dolt from GitHub and records the release and current date in the
// version check file.
func getLatestDoltReleaseAndRecord(path string, dEnv *env.DoltEnv) (string, errhand.VerboseError) {
client := github.NewClient(nil)
release, resp, err := client.Repositories.GetLatestRelease(context.Background(), "dolthub", "dolt")
if err != nil || resp.StatusCode != 200 {
return "", errhand.BuildDError("error: failed to verify latest release").AddCause(err).Build()
}
releaseName := strings.TrimPrefix(*release.TagName, "v")
err = dEnv.FS.WriteFile(path, []byte(fmt.Sprintf("%s,%s", releaseName, time.Now().UTC().Format(time.DateOnly))), os.ModePerm)
if err != nil {
return "", errhand.BuildDError("error: failed to update version check file").AddCause(err).Build()
}
return releaseName, nil
}
// isOutOfDate compares the current version of Dolt to the given latest release version and returns true if the current
// version is out of date.
func isOutOfDate(curVersion, latestRelease string) (bool, errhand.VerboseError) {
curVersionParts := strings.Split(curVersion, ".")
latestReleaseParts := strings.Split(latestRelease, ".")
for i := 0; i < len(curVersionParts) && i < len(latestReleaseParts); i++ {
curPart, err := strconv.Atoi(curVersionParts[i])
if err != nil {
return false, errhand.BuildDError("error: failed to parse version number").AddCause(err).Build()
}
latestPart, err := strconv.Atoi(latestReleaseParts[i])
if err != nil {
return false, errhand.BuildDError("error: failed to parse version number").AddCause(err).Build()
}
if latestPart > curPart {
return true, nil
} else if curPart > latestPart {
return false, nil
}
}
return false, nil
}
// isVersionFormattedCorrectly checks if the given version string is formatted correctly, i.e. is of the form
// major.minor.patch where each part is an integer.
func isVersionFormattedCorrectly(version string) bool {
versionParts := strings.Split(version, ".")
if len(versionParts) != 3 {
return false
}
for _, part := range versionParts {
if _, err := strconv.Atoi(part); err != nil {
return false
}
}
return true
}
+1 -1
View File
@@ -65,7 +65,7 @@ import (
)
const (
Version = "1.29.0"
Version = "1.29.5"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
+6 -18
View File
@@ -26,11 +26,7 @@ type AddressMap struct {
func InitAddressMapRoot(o *AddressMap, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if AddressMapNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) (*AddressMap, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) (*AddressMa
return x, InitAddressMapRoot(x, buf, offset)
}
func GetRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) *AddressMap {
x := &AddressMap{}
InitAddressMapRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) (*AddressMap, error) {
x := &AddressMap{}
return x, InitAddressMapRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) *AddressMap {
x := &AddressMap{}
InitAddressMapRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *AddressMap) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *AddressMap) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if AddressMapNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *AddressMap) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type Blob struct {
func InitBlobRoot(o *Blob, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BlobNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) (*Blob, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) (*Blob, error) {
return x, InitBlobRoot(x, buf, offset)
}
func GetRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) *Blob {
x := &Blob{}
InitBlobRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) (*Blob, error) {
x := &Blob{}
return x, InitBlobRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) *Blob {
x := &Blob{}
InitBlobRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Blob) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Blob) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BlobNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Blob) Table() flatbuffers.Table {
+48 -328
View File
@@ -26,11 +26,7 @@ type BranchControl struct {
func InitBranchControlRoot(o *BranchControl, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControl(buf []byte, offset flatbuffers.UOffsetT) (*BranchControl, error) {
@@ -38,45 +34,24 @@ func TryGetRootAsBranchControl(buf []byte, offset flatbuffers.UOffsetT) (*Branch
return x, InitBranchControlRoot(x, buf, offset)
}
func GetRootAsBranchControl(buf []byte, offset flatbuffers.UOffsetT) *BranchControl {
x := &BranchControl{}
InitBranchControlRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControl(buf []byte, offset flatbuffers.UOffsetT) (*BranchControl, error) {
x := &BranchControl{}
return x, InitBranchControlRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControl(buf []byte, offset flatbuffers.UOffsetT) *BranchControl {
x := &BranchControl{}
InitBranchControlRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControl) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControl) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControl) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *BranchControl) AccessTbl(obj *BranchControlAccess) *BranchControlAccess {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(BranchControlAccess)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *BranchControl) TryAccessTbl(obj *BranchControlAccess) (*BranchControlAccess, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -93,19 +68,6 @@ func (rcv *BranchControl) TryAccessTbl(obj *BranchControlAccess) (*BranchControl
return nil, nil
}
func (rcv *BranchControl) NamespaceTbl(obj *BranchControlNamespace) *BranchControlNamespace {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(BranchControlNamespace)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *BranchControl) TryNamespaceTbl(obj *BranchControlNamespace) (*BranchControlNamespace, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
@@ -143,11 +105,7 @@ type BranchControlAccess struct {
func InitBranchControlAccessRoot(o *BranchControlAccess, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlAccessNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControlAccess(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlAccess, error) {
@@ -155,45 +113,24 @@ func TryGetRootAsBranchControlAccess(buf []byte, offset flatbuffers.UOffsetT) (*
return x, InitBranchControlAccessRoot(x, buf, offset)
}
func GetRootAsBranchControlAccess(buf []byte, offset flatbuffers.UOffsetT) *BranchControlAccess {
x := &BranchControlAccess{}
InitBranchControlAccessRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControlAccess(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlAccess, error) {
x := &BranchControlAccess{}
return x, InitBranchControlAccessRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControlAccess(buf []byte, offset flatbuffers.UOffsetT) *BranchControlAccess {
x := &BranchControlAccess{}
InitBranchControlAccessRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControlAccess) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControlAccess) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlAccessNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControlAccess) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *BranchControlAccess) Binlog(obj *BranchControlBinlog) *BranchControlBinlog {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(BranchControlBinlog)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *BranchControlAccess) TryBinlog(obj *BranchControlBinlog) (*BranchControlBinlog, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -210,18 +147,6 @@ func (rcv *BranchControlAccess) TryBinlog(obj *BranchControlBinlog) (*BranchCont
return nil, nil
}
func (rcv *BranchControlAccess) Databases(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlAccess) TryDatabases(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
@@ -245,18 +170,6 @@ func (rcv *BranchControlAccess) DatabasesLength() int {
return 0
}
func (rcv *BranchControlAccess) Branches(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlAccess) TryBranches(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
@@ -280,18 +193,6 @@ func (rcv *BranchControlAccess) BranchesLength() int {
return 0
}
func (rcv *BranchControlAccess) Users(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlAccess) TryUsers(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
@@ -315,18 +216,6 @@ func (rcv *BranchControlAccess) UsersLength() int {
return 0
}
func (rcv *BranchControlAccess) Hosts(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlAccess) TryHosts(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
@@ -350,18 +239,6 @@ func (rcv *BranchControlAccess) HostsLength() int {
return 0
}
func (rcv *BranchControlAccess) Values(obj *BranchControlAccessValue, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlAccess) TryValues(obj *BranchControlAccessValue, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
@@ -433,11 +310,7 @@ type BranchControlAccessValue struct {
func InitBranchControlAccessValueRoot(o *BranchControlAccessValue, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlAccessValueNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControlAccessValue(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlAccessValue, error) {
@@ -445,26 +318,18 @@ func TryGetRootAsBranchControlAccessValue(buf []byte, offset flatbuffers.UOffset
return x, InitBranchControlAccessValueRoot(x, buf, offset)
}
func GetRootAsBranchControlAccessValue(buf []byte, offset flatbuffers.UOffsetT) *BranchControlAccessValue {
x := &BranchControlAccessValue{}
InitBranchControlAccessValueRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControlAccessValue(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlAccessValue, error) {
x := &BranchControlAccessValue{}
return x, InitBranchControlAccessValueRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControlAccessValue(buf []byte, offset flatbuffers.UOffsetT) *BranchControlAccessValue {
x := &BranchControlAccessValue{}
InitBranchControlAccessValueRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControlAccessValue) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControlAccessValue) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlAccessValueNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControlAccessValue) Table() flatbuffers.Table {
@@ -545,11 +410,7 @@ type BranchControlNamespace struct {
func InitBranchControlNamespaceRoot(o *BranchControlNamespace, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlNamespaceNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControlNamespace(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlNamespace, error) {
@@ -557,45 +418,24 @@ func TryGetRootAsBranchControlNamespace(buf []byte, offset flatbuffers.UOffsetT)
return x, InitBranchControlNamespaceRoot(x, buf, offset)
}
func GetRootAsBranchControlNamespace(buf []byte, offset flatbuffers.UOffsetT) *BranchControlNamespace {
x := &BranchControlNamespace{}
InitBranchControlNamespaceRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControlNamespace(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlNamespace, error) {
x := &BranchControlNamespace{}
return x, InitBranchControlNamespaceRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControlNamespace(buf []byte, offset flatbuffers.UOffsetT) *BranchControlNamespace {
x := &BranchControlNamespace{}
InitBranchControlNamespaceRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControlNamespace) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControlNamespace) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlNamespaceNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControlNamespace) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *BranchControlNamespace) Binlog(obj *BranchControlBinlog) *BranchControlBinlog {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(BranchControlBinlog)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *BranchControlNamespace) TryBinlog(obj *BranchControlBinlog) (*BranchControlBinlog, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -612,18 +452,6 @@ func (rcv *BranchControlNamespace) TryBinlog(obj *BranchControlBinlog) (*BranchC
return nil, nil
}
func (rcv *BranchControlNamespace) Databases(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlNamespace) TryDatabases(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
@@ -647,18 +475,6 @@ func (rcv *BranchControlNamespace) DatabasesLength() int {
return 0
}
func (rcv *BranchControlNamespace) Branches(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlNamespace) TryBranches(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
@@ -682,18 +498,6 @@ func (rcv *BranchControlNamespace) BranchesLength() int {
return 0
}
func (rcv *BranchControlNamespace) Users(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlNamespace) TryUsers(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
@@ -717,18 +521,6 @@ func (rcv *BranchControlNamespace) UsersLength() int {
return 0
}
func (rcv *BranchControlNamespace) Hosts(obj *BranchControlMatchExpression, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlNamespace) TryHosts(obj *BranchControlMatchExpression, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
@@ -752,18 +544,6 @@ func (rcv *BranchControlNamespace) HostsLength() int {
return 0
}
func (rcv *BranchControlNamespace) Values(obj *BranchControlNamespaceValue, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlNamespace) TryValues(obj *BranchControlNamespaceValue, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
@@ -835,11 +615,7 @@ type BranchControlNamespaceValue struct {
func InitBranchControlNamespaceValueRoot(o *BranchControlNamespaceValue, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlNamespaceValueNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControlNamespaceValue(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlNamespaceValue, error) {
@@ -847,26 +623,18 @@ func TryGetRootAsBranchControlNamespaceValue(buf []byte, offset flatbuffers.UOff
return x, InitBranchControlNamespaceValueRoot(x, buf, offset)
}
func GetRootAsBranchControlNamespaceValue(buf []byte, offset flatbuffers.UOffsetT) *BranchControlNamespaceValue {
x := &BranchControlNamespaceValue{}
InitBranchControlNamespaceValueRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControlNamespaceValue(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlNamespaceValue, error) {
x := &BranchControlNamespaceValue{}
return x, InitBranchControlNamespaceValueRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControlNamespaceValue(buf []byte, offset flatbuffers.UOffsetT) *BranchControlNamespaceValue {
x := &BranchControlNamespaceValue{}
InitBranchControlNamespaceValueRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControlNamespaceValue) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControlNamespaceValue) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlNamespaceValueNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControlNamespaceValue) Table() flatbuffers.Table {
@@ -932,11 +700,7 @@ type BranchControlBinlog struct {
func InitBranchControlBinlogRoot(o *BranchControlBinlog, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlBinlogNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControlBinlog(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlBinlog, error) {
@@ -944,44 +708,24 @@ func TryGetRootAsBranchControlBinlog(buf []byte, offset flatbuffers.UOffsetT) (*
return x, InitBranchControlBinlogRoot(x, buf, offset)
}
func GetRootAsBranchControlBinlog(buf []byte, offset flatbuffers.UOffsetT) *BranchControlBinlog {
x := &BranchControlBinlog{}
InitBranchControlBinlogRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControlBinlog(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlBinlog, error) {
x := &BranchControlBinlog{}
return x, InitBranchControlBinlogRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControlBinlog(buf []byte, offset flatbuffers.UOffsetT) *BranchControlBinlog {
x := &BranchControlBinlog{}
InitBranchControlBinlogRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControlBinlog) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControlBinlog) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlBinlogNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControlBinlog) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *BranchControlBinlog) Rows(obj *BranchControlBinlogRow, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *BranchControlBinlog) TryRows(obj *BranchControlBinlogRow, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -1026,11 +770,7 @@ type BranchControlBinlogRow struct {
func InitBranchControlBinlogRowRoot(o *BranchControlBinlogRow, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlBinlogRowNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControlBinlogRow(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlBinlogRow, error) {
@@ -1038,26 +778,18 @@ func TryGetRootAsBranchControlBinlogRow(buf []byte, offset flatbuffers.UOffsetT)
return x, InitBranchControlBinlogRowRoot(x, buf, offset)
}
func GetRootAsBranchControlBinlogRow(buf []byte, offset flatbuffers.UOffsetT) *BranchControlBinlogRow {
x := &BranchControlBinlogRow{}
InitBranchControlBinlogRowRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControlBinlogRow(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlBinlogRow, error) {
x := &BranchControlBinlogRow{}
return x, InitBranchControlBinlogRowRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControlBinlogRow(buf []byte, offset flatbuffers.UOffsetT) *BranchControlBinlogRow {
x := &BranchControlBinlogRow{}
InitBranchControlBinlogRowRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControlBinlogRow) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControlBinlogRow) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlBinlogRowNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControlBinlogRow) Table() flatbuffers.Table {
@@ -1153,11 +885,7 @@ type BranchControlMatchExpression struct {
func InitBranchControlMatchExpressionRoot(o *BranchControlMatchExpression, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BranchControlMatchExpressionNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsBranchControlMatchExpression(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlMatchExpression, error) {
@@ -1165,26 +893,18 @@ func TryGetRootAsBranchControlMatchExpression(buf []byte, offset flatbuffers.UOf
return x, InitBranchControlMatchExpressionRoot(x, buf, offset)
}
func GetRootAsBranchControlMatchExpression(buf []byte, offset flatbuffers.UOffsetT) *BranchControlMatchExpression {
x := &BranchControlMatchExpression{}
InitBranchControlMatchExpressionRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsBranchControlMatchExpression(buf []byte, offset flatbuffers.UOffsetT) (*BranchControlMatchExpression, error) {
x := &BranchControlMatchExpression{}
return x, InitBranchControlMatchExpressionRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBranchControlMatchExpression(buf []byte, offset flatbuffers.UOffsetT) *BranchControlMatchExpression {
x := &BranchControlMatchExpression{}
InitBranchControlMatchExpressionRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *BranchControlMatchExpression) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *BranchControlMatchExpression) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if BranchControlMatchExpressionNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *BranchControlMatchExpression) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type Commit struct {
func InitCommitRoot(o *Commit, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if CommitNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) (*Commit, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) (*Commit, error
return x, InitCommitRoot(x, buf, offset)
}
func GetRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) *Commit {
x := &Commit{}
InitCommitRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) (*Commit, error) {
x := &Commit{}
return x, InitCommitRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) *Commit {
x := &Commit{}
InitCommitRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Commit) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Commit) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if CommitNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Commit) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type CommitClosure struct {
func InitCommitClosureRoot(o *CommitClosure, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if CommitClosureNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) (*CommitClosure, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) (*Commit
return x, InitCommitClosureRoot(x, buf, offset)
}
func GetRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) *CommitClosure {
x := &CommitClosure{}
InitCommitClosureRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) (*CommitClosure, error) {
x := &CommitClosure{}
return x, InitCommitClosureRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) *CommitClosure {
x := &CommitClosure{}
InitCommitClosureRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *CommitClosure) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *CommitClosure) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if CommitClosureNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *CommitClosure) Table() flatbuffers.Table {
+12 -48
View File
@@ -61,11 +61,7 @@ type ForeignKeyCollection struct {
func InitForeignKeyCollectionRoot(o *ForeignKeyCollection, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ForeignKeyCollectionNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKeyCollection, error) {
@@ -73,44 +69,24 @@ func TryGetRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) (
return x, InitForeignKeyCollectionRoot(x, buf, offset)
}
func GetRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) *ForeignKeyCollection {
x := &ForeignKeyCollection{}
InitForeignKeyCollectionRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKeyCollection, error) {
x := &ForeignKeyCollection{}
return x, InitForeignKeyCollectionRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) *ForeignKeyCollection {
x := &ForeignKeyCollection{}
InitForeignKeyCollectionRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *ForeignKeyCollection) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *ForeignKeyCollection) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if ForeignKeyCollectionNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *ForeignKeyCollection) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *ForeignKeyCollection) ForeignKeys(obj *ForeignKey, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *ForeignKeyCollection) TryForeignKeys(obj *ForeignKey, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -155,11 +131,7 @@ type ForeignKey struct {
func InitForeignKeyRoot(o *ForeignKey, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ForeignKeyNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKey, error) {
@@ -167,26 +139,18 @@ func TryGetRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKe
return x, InitForeignKeyRoot(x, buf, offset)
}
func GetRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
x := &ForeignKey{}
InitForeignKeyRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKey, error) {
x := &ForeignKey{}
return x, InitForeignKeyRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
x := &ForeignKey{}
InitForeignKeyRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *ForeignKey) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *ForeignKey) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if ForeignKeyNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *ForeignKey) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type MergeArtifacts struct {
func InitMergeArtifactsRoot(o *MergeArtifacts, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if MergeArtifactsNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) (*MergeArtifacts, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) (*Merge
return x, InitMergeArtifactsRoot(x, buf, offset)
}
func GetRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) *MergeArtifacts {
x := &MergeArtifacts{}
InitMergeArtifactsRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) (*MergeArtifacts, error) {
x := &MergeArtifacts{}
return x, InitMergeArtifactsRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) *MergeArtifacts {
x := &MergeArtifacts{}
InitMergeArtifactsRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *MergeArtifacts) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *MergeArtifacts) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if MergeArtifactsNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *MergeArtifacts) Table() flatbuffers.Table {
+6 -18
View File
@@ -52,11 +52,7 @@ type ProllyTreeNode struct {
func InitProllyTreeNodeRoot(o *ProllyTreeNode, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ProllyTreeNodeNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) (*ProllyTreeNode, error) {
@@ -64,26 +60,18 @@ func TryGetRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) (*Proll
return x, InitProllyTreeNodeRoot(x, buf, offset)
}
func GetRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) *ProllyTreeNode {
x := &ProllyTreeNode{}
InitProllyTreeNodeRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) (*ProllyTreeNode, error) {
x := &ProllyTreeNode{}
return x, InitProllyTreeNodeRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) *ProllyTreeNode {
x := &ProllyTreeNode{}
InitProllyTreeNodeRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *ProllyTreeNode) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *ProllyTreeNode) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if ProllyTreeNodeNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *ProllyTreeNode) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type RootValue struct {
func InitRootValueRoot(o *RootValue, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if RootValueNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) (*RootValue, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) (*RootValue,
return x, InitRootValueRoot(x, buf, offset)
}
func GetRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) *RootValue {
x := &RootValue{}
InitRootValueRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) (*RootValue, error) {
x := &RootValue{}
return x, InitRootValueRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) *RootValue {
x := &RootValue{}
InitRootValueRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *RootValue) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *RootValue) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if RootValueNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *RootValue) Table() flatbuffers.Table {
+30 -152
View File
@@ -26,11 +26,7 @@ type TableSchema struct {
func InitTableSchemaRoot(o *TableSchema, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if TableSchemaNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) (*TableSchema, error) {
@@ -38,44 +34,24 @@ func TryGetRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) (*TableSch
return x, InitTableSchemaRoot(x, buf, offset)
}
func GetRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) *TableSchema {
x := &TableSchema{}
InitTableSchemaRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) (*TableSchema, error) {
x := &TableSchema{}
return x, InitTableSchemaRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) *TableSchema {
x := &TableSchema{}
InitTableSchemaRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *TableSchema) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *TableSchema) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if TableSchemaNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *TableSchema) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *TableSchema) Columns(obj *Column, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *TableSchema) TryColumns(obj *Column, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -99,19 +75,6 @@ func (rcv *TableSchema) ColumnsLength() int {
return 0
}
func (rcv *TableSchema) ClusteredIndex(obj *Index) *Index {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Index)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *TableSchema) TryClusteredIndex(obj *Index) (*Index, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
@@ -128,18 +91,6 @@ func (rcv *TableSchema) TryClusteredIndex(obj *Index) (*Index, error) {
return nil, nil
}
func (rcv *TableSchema) SecondaryIndexes(obj *Index, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *TableSchema) TrySecondaryIndexes(obj *Index, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
@@ -163,18 +114,6 @@ func (rcv *TableSchema) SecondaryIndexesLength() int {
return 0
}
func (rcv *TableSchema) Checks(obj *CheckConstraint, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
return true
}
return false
}
func (rcv *TableSchema) TryChecks(obj *CheckConstraint, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
@@ -249,11 +188,7 @@ type Column struct {
func InitColumnRoot(o *Column, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ColumnNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) (*Column, error) {
@@ -261,26 +196,18 @@ func TryGetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) (*Column, error
return x, InitColumnRoot(x, buf, offset)
}
func GetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
x := &Column{}
InitColumnRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) (*Column, error) {
x := &Column{}
return x, InitColumnRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
x := &Column{}
InitColumnRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Column) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Column) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if ColumnNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Column) Table() flatbuffers.Table {
@@ -481,11 +408,7 @@ type Index struct {
func InitIndexRoot(o *Index, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if IndexNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) (*Index, error) {
@@ -493,26 +416,18 @@ func TryGetRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) (*Index, error)
return x, InitIndexRoot(x, buf, offset)
}
func GetRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
x := &Index{}
InitIndexRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) (*Index, error) {
x := &Index{}
return x, InitIndexRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
x := &Index{}
InitIndexRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Index) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Index) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if IndexNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Index) Table() flatbuffers.Table {
@@ -699,19 +614,6 @@ func (rcv *Index) MutateFulltextKey(n bool) bool {
return rcv._tab.MutateBoolSlot(24, n)
}
func (rcv *Index) FulltextInfo(obj *FulltextInfo) *FulltextInfo {
o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(FulltextInfo)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *Index) TryFulltextInfo(obj *FulltextInfo) (*FulltextInfo, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(26))
if o != 0 {
@@ -791,11 +693,7 @@ type FulltextInfo struct {
func InitFulltextInfoRoot(o *FulltextInfo, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if FulltextInfoNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsFulltextInfo(buf []byte, offset flatbuffers.UOffsetT) (*FulltextInfo, error) {
@@ -803,26 +701,18 @@ func TryGetRootAsFulltextInfo(buf []byte, offset flatbuffers.UOffsetT) (*Fulltex
return x, InitFulltextInfoRoot(x, buf, offset)
}
func GetRootAsFulltextInfo(buf []byte, offset flatbuffers.UOffsetT) *FulltextInfo {
x := &FulltextInfo{}
InitFulltextInfoRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsFulltextInfo(buf []byte, offset flatbuffers.UOffsetT) (*FulltextInfo, error) {
x := &FulltextInfo{}
return x, InitFulltextInfoRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsFulltextInfo(buf []byte, offset flatbuffers.UOffsetT) *FulltextInfo {
x := &FulltextInfo{}
InitFulltextInfoRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *FulltextInfo) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *FulltextInfo) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if FulltextInfoNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *FulltextInfo) Table() flatbuffers.Table {
@@ -957,11 +847,7 @@ type CheckConstraint struct {
func InitCheckConstraintRoot(o *CheckConstraint, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if CheckConstraintNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) (*CheckConstraint, error) {
@@ -969,26 +855,18 @@ func TryGetRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) (*Chec
return x, InitCheckConstraintRoot(x, buf, offset)
}
func GetRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) *CheckConstraint {
x := &CheckConstraint{}
InitCheckConstraintRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) (*CheckConstraint, error) {
x := &CheckConstraint{}
return x, InitCheckConstraintRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) *CheckConstraint {
x := &CheckConstraint{}
InitCheckConstraintRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *CheckConstraint) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *CheckConstraint) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if CheckConstraintNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *CheckConstraint) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type Stash struct {
func InitStashRoot(o *Stash, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if StashNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsStash(buf []byte, offset flatbuffers.UOffsetT) (*Stash, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsStash(buf []byte, offset flatbuffers.UOffsetT) (*Stash, error)
return x, InitStashRoot(x, buf, offset)
}
func GetRootAsStash(buf []byte, offset flatbuffers.UOffsetT) *Stash {
x := &Stash{}
InitStashRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsStash(buf []byte, offset flatbuffers.UOffsetT) (*Stash, error) {
x := &Stash{}
return x, InitStashRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsStash(buf []byte, offset flatbuffers.UOffsetT) *Stash {
x := &Stash{}
InitStashRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Stash) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Stash) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if StashNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Stash) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type StashList struct {
func InitStashListRoot(o *StashList, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if StashListNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsStashList(buf []byte, offset flatbuffers.UOffsetT) (*StashList, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsStashList(buf []byte, offset flatbuffers.UOffsetT) (*StashList,
return x, InitStashListRoot(x, buf, offset)
}
func GetRootAsStashList(buf []byte, offset flatbuffers.UOffsetT) *StashList {
x := &StashList{}
InitStashListRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsStashList(buf []byte, offset flatbuffers.UOffsetT) (*StashList, error) {
x := &StashList{}
return x, InitStashListRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsStashList(buf []byte, offset flatbuffers.UOffsetT) *StashList {
x := &StashList{}
InitStashListRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *StashList) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *StashList) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if StashListNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *StashList) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type StoreRoot struct {
func InitStoreRootRoot(o *StoreRoot, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if StoreRootNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) (*StoreRoot, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) (*StoreRoot,
return x, InitStoreRootRoot(x, buf, offset)
}
func GetRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) *StoreRoot {
x := &StoreRoot{}
InitStoreRootRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) (*StoreRoot, error) {
x := &StoreRoot{}
return x, InitStoreRootRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) *StoreRoot {
x := &StoreRoot{}
InitStoreRootRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *StoreRoot) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *StoreRoot) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if StoreRootNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *StoreRoot) Table() flatbuffers.Table {
+12 -49
View File
@@ -26,11 +26,7 @@ type Table struct {
func InitTableRoot(o *Table, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if TableNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsTable(buf []byte, offset flatbuffers.UOffsetT) (*Table, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsTable(buf []byte, offset flatbuffers.UOffsetT) (*Table, error)
return x, InitTableRoot(x, buf, offset)
}
func GetRootAsTable(buf []byte, offset flatbuffers.UOffsetT) *Table {
x := &Table{}
InitTableRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsTable(buf []byte, offset flatbuffers.UOffsetT) (*Table, error) {
x := &Table{}
return x, InitTableRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsTable(buf []byte, offset flatbuffers.UOffsetT) *Table {
x := &Table{}
InitTableRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Table) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Table) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if TableNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Table) Table() flatbuffers.Table {
@@ -178,19 +166,6 @@ func (rcv *Table) MutateAutoIncrementValue(n uint64) bool {
return rcv._tab.MutateUint64Slot(10, n)
}
func (rcv *Table) Conflicts(obj *Conflicts) *Conflicts {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Conflicts)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *Table) TryConflicts(obj *Conflicts) (*Conflicts, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
@@ -326,11 +301,7 @@ type Conflicts struct {
func InitConflictsRoot(o *Conflicts, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ConflictsNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) (*Conflicts, error) {
@@ -338,26 +309,18 @@ func TryGetRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) (*Conflicts,
return x, InitConflictsRoot(x, buf, offset)
}
func GetRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) *Conflicts {
x := &Conflicts{}
InitConflictsRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) (*Conflicts, error) {
x := &Conflicts{}
return x, InitConflictsRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) *Conflicts {
x := &Conflicts{}
InitConflictsRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Conflicts) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Conflicts) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if ConflictsNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Conflicts) Table() flatbuffers.Table {
+6 -18
View File
@@ -26,11 +26,7 @@ type Tag struct {
func InitTagRoot(o *Tag, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if TagNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsTag(buf []byte, offset flatbuffers.UOffsetT) (*Tag, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsTag(buf []byte, offset flatbuffers.UOffsetT) (*Tag, error) {
return x, InitTagRoot(x, buf, offset)
}
func GetRootAsTag(buf []byte, offset flatbuffers.UOffsetT) *Tag {
x := &Tag{}
InitTagRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsTag(buf []byte, offset flatbuffers.UOffsetT) (*Tag, error) {
x := &Tag{}
return x, InitTagRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsTag(buf []byte, offset flatbuffers.UOffsetT) *Tag {
x := &Tag{}
InitTagRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *Tag) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *Tag) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if TagNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *Tag) Table() flatbuffers.Table {
+12 -49
View File
@@ -26,11 +26,7 @@ type WorkingSet struct {
func InitWorkingSetRoot(o *WorkingSet, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if WorkingSetNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) (*WorkingSet, error) {
@@ -38,26 +34,18 @@ func TryGetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) (*WorkingSe
return x, InitWorkingSetRoot(x, buf, offset)
}
func GetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
x := &WorkingSet{}
InitWorkingSetRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) (*WorkingSet, error) {
x := &WorkingSet{}
return x, InitWorkingSetRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
x := &WorkingSet{}
InitWorkingSetRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *WorkingSet) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *WorkingSet) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if WorkingSetNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *WorkingSet) Table() flatbuffers.Table {
@@ -168,19 +156,6 @@ func (rcv *WorkingSet) MutateTimestampMillis(n uint64) bool {
return rcv._tab.MutateUint64Slot(14, n)
}
func (rcv *WorkingSet) MergeState(obj *MergeState) *MergeState {
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(MergeState)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *WorkingSet) TryMergeState(obj *MergeState) (*MergeState, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
@@ -239,11 +214,7 @@ type MergeState struct {
func InitMergeStateRoot(o *MergeState, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if MergeStateNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
return o.Init(buf, n+offset)
}
func TryGetRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) (*MergeState, error) {
@@ -251,26 +222,18 @@ func TryGetRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) (*MergeStat
return x, InitMergeStateRoot(x, buf, offset)
}
func GetRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) *MergeState {
x := &MergeState{}
InitMergeStateRoot(x, buf, offset)
return x
}
func TryGetSizePrefixedRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) (*MergeState, error) {
x := &MergeState{}
return x, InitMergeStateRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) *MergeState {
x := &MergeState{}
InitMergeStateRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
func (rcv *MergeState) Init(buf []byte, i flatbuffers.UOffsetT) {
func (rcv *MergeState) Init(buf []byte, i flatbuffers.UOffsetT) error {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
if MergeStateNumFields < rcv.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func (rcv *MergeState) Table() flatbuffers.Table {
+5 -3
View File
@@ -15,7 +15,7 @@ require (
github.com/dolthub/fslock v0.0.3
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20231127171856-2466012fb61f
github.com/dolthub/vitess v0.0.0-20231207010700-88fb35413580
github.com/dustin/go-humanize v1.0.1
github.com/fatih/color v1.13.0
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -57,9 +57,10 @@ require (
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
github.com/dolthub/go-mysql-server v0.17.1-0.20231205222834-2eb85072ed9d
github.com/dolthub/go-mysql-server v0.17.1-0.20231214194603-39acc5e6f988
github.com/dolthub/swiss v0.1.0
github.com/goccy/go-json v0.10.2
github.com/google/go-github/v57 v57.0.0
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/jmoiron/sqlx v1.3.4
@@ -113,7 +114,8 @@ require (
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
+11 -6
View File
@@ -181,8 +181,8 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e h1:kPsT4a47cw1+y/N5SSCkma7FhAPw7KeGmD6c9PBZW9Y=
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e/go.mod h1:KPUcpx070QOfJK1gNe0zx4pA5sicIK1GMikIGLKC168=
github.com/dolthub/go-mysql-server v0.17.1-0.20231205222834-2eb85072ed9d h1:DBMlz2ONWPx6qZhUps8qwlvGa2QwsDSBKbOHxhr55Gc=
github.com/dolthub/go-mysql-server v0.17.1-0.20231205222834-2eb85072ed9d/go.mod h1:vXlRKS39WHav9N51VsfYphKhmSA2t5FkhHmW3BtwH5I=
github.com/dolthub/go-mysql-server v0.17.1-0.20231214194603-39acc5e6f988 h1:oYhHtAZFJujmu6NeHfjliwNaVrbaq5pYbl9Pu84Bq2I=
github.com/dolthub/go-mysql-server v0.17.1-0.20231214194603-39acc5e6f988/go.mod h1:zJCyPiYe9VZ9xIQTv7S1OFKwyoVQoeGxZXNtkFxTcOI=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
github.com/dolthub/jsonpath v0.0.2-0.20230525180605-8dc13778fd72 h1:NfWmngMi1CYUWU4Ix8wM+USEhjc+mhPlT9JUR/anvbQ=
@@ -193,8 +193,8 @@ github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9X
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/swiss v0.1.0 h1:EaGQct3AqeP/MjASHLiH6i4TAmgbG/c4rA6a1bzCOPc=
github.com/dolthub/swiss v0.1.0/go.mod h1:BeucyB08Vb1G9tumVN3Vp/pyY4AMUnr9p7Rz7wJ7kAQ=
github.com/dolthub/vitess v0.0.0-20231127171856-2466012fb61f h1:I480LKHhb4usnF3dYhp6J4ORKMrncNKaWYZvIZwlK+U=
github.com/dolthub/vitess v0.0.0-20231127171856-2466012fb61f/go.mod h1:IwjNXSQPymrja5pVqmfnYdcy7Uv7eNJNBPK/MEh9OOw=
github.com/dolthub/vitess v0.0.0-20231207010700-88fb35413580 h1:OSp1g3tRBMVIyxza4LN20rZ6yYEKqjf5hNNisVg/Lns=
github.com/dolthub/vitess v0.0.0-20231207010700-88fb35413580/go.mod h1:IwjNXSQPymrja5pVqmfnYdcy7Uv7eNJNBPK/MEh9OOw=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
@@ -321,13 +321,18 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v57 v57.0.0 h1:L+Y3UPTY8ALM8x+TV0lg+IEBI+upibemtBD8Q9u7zHs=
github.com/google/go-github/v57 v57.0.0/go.mod h1:s0omdnye0hvK/ecLvpsGfJMiRt85PimQh4oygmLIxHw=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -126,7 +126,10 @@ func (binlog *Binlog) Deserialize(fb *serial.BranchControlBinlog) error {
// Read the rows
for i := 0; i < fb.RowsLength(); i++ {
serialBinlogRow := &serial.BranchControlBinlogRow{}
fb.Rows(serialBinlogRow, i)
_, err := fb.TryRows(serialBinlogRow, i)
if err != nil {
return fmt.Errorf("cannot deserialize binlog, it was created with a later version of Dolt")
}
binlog.rows[i] = BinlogRow{
IsInsert: serialBinlogRow.IsInsert(),
Database: string(serialBinlogRow.Database()),
@@ -233,31 +233,46 @@ func (tbl *Namespace) Deserialize(fb *serial.BranchControlNamespace) error {
// Read the databases
for i := 0; i < fb.DatabasesLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Databases(serialMatchExpr, i)
_, err = fb.TryDatabases(serialMatchExpr, i)
if err != nil {
return err
}
tbl.Databases[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the branches
for i := 0; i < fb.BranchesLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Branches(serialMatchExpr, i)
_, err = fb.TryBranches(serialMatchExpr, i)
if err != nil {
return err
}
tbl.Branches[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the users
for i := 0; i < fb.UsersLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Users(serialMatchExpr, i)
_, err = fb.TryUsers(serialMatchExpr, i)
if err != nil {
return err
}
tbl.Users[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the hosts
for i := 0; i < fb.HostsLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Hosts(serialMatchExpr, i)
_, err = fb.TryHosts(serialMatchExpr, i)
if err != nil {
return err
}
tbl.Hosts[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the values
for i := 0; i < fb.ValuesLength(); i++ {
serialNamespaceValue := &serial.BranchControlNamespaceValue{}
fb.Values(serialNamespaceValue, i)
_, err = fb.TryValues(serialNamespaceValue, i)
if err != nil {
return err
}
tbl.Values[i] = NamespaceValue{
Database: string(serialNamespaceValue.Database()),
Branch: string(serialNamespaceValue.Branch()),
@@ -0,0 +1,293 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cherry_pick
import (
"errors"
"fmt"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
// ErrCherryPickUncommittedChanges is returned when a cherry-pick is attempted without a clean working set.
var ErrCherryPickUncommittedChanges = errors.New("cannot cherry-pick with uncommitted changes")
// CherryPickOptions specifies optional parameters specifying how a cherry-pick is performed.
type CherryPickOptions struct {
// Amend controls whether the commit at HEAD is amended and combined with the commit to be cherry-picked.
Amend bool
// CommitMessage is optional, and controls the message for the new commit.
CommitMessage string
}
// CherryPick replays a commit, specified by |options.Commit|, and applies it as a new commit to the current HEAD. If
// successful, the hash of the new commit is returned. If the cherry-pick results in merge conflicts, the merge result
// is returned. If any unexpected error occur, it is returned.
func CherryPick(ctx *sql.Context, commit string, options CherryPickOptions) (string, *merge.Result, error) {
doltSession := dsess.DSessFromSess(ctx.Session)
dbName := ctx.GetCurrentDatabase()
roots, ok := doltSession.GetRoots(ctx, dbName)
if !ok {
return "", nil, fmt.Errorf("failed to get roots for current session")
}
mergeResult, commitMsg, err := cherryPick(ctx, doltSession, roots, dbName, commit)
if err != nil {
return "", nil, err
}
newWorkingRoot := mergeResult.Root
err = doltSession.SetRoot(ctx, dbName, newWorkingRoot)
if err != nil {
return "", nil, err
}
err = stageCherryPickedTables(ctx, mergeResult.Stats)
if err != nil {
return "", nil, err
}
// If there were merge conflicts, just return the merge result.
if mergeResult.HasMergeArtifacts() {
return "", mergeResult, nil
}
commitProps := actions.CommitStagedProps{
Date: ctx.QueryTime(),
Name: ctx.Client().User,
Email: fmt.Sprintf("%s@%s", ctx.Client().User, ctx.Client().Address),
Message: commitMsg,
}
if options.CommitMessage != "" {
commitProps.Message = options.CommitMessage
}
if options.Amend {
commitProps.Amend = true
}
// NOTE: roots are old here (after staging the tables) and need to be refreshed
roots, ok = doltSession.GetRoots(ctx, dbName)
if !ok {
return "", nil, fmt.Errorf("failed to get roots for current session")
}
pendingCommit, err := doltSession.NewPendingCommit(ctx, dbName, roots, commitProps)
if err != nil {
return "", nil, err
}
if pendingCommit == nil {
return "", nil, errors.New("nothing to commit")
}
newCommit, err := doltSession.DoltCommit(ctx, dbName, doltSession.GetTransaction(), pendingCommit)
if err != nil {
return "", nil, err
}
h, err := newCommit.HashOf()
if err != nil {
return "", nil, err
}
return h.String(), nil, nil
}
// AbortCherryPick aborts a cherry-pick merge, if one is in progress. If unable to abort for any reason
// (e.g. if there is not cherry-pick merge in progress), an error is returned.
func AbortCherryPick(ctx *sql.Context, dbName string) error {
doltSession := dsess.DSessFromSess(ctx.Session)
ws, err := doltSession.WorkingSet(ctx, dbName)
if err != nil {
return fmt.Errorf("fatal: unable to load working set: %v", err)
}
if !ws.MergeActive() {
return fmt.Errorf("error: There is no cherry-pick merge to abort")
}
roots, ok := doltSession.GetRoots(ctx, dbName)
if !ok {
return fmt.Errorf("fatal: unable to load roots for %s", dbName)
}
newWs, err := merge.AbortMerge(ctx, ws, roots)
if err != nil {
return fmt.Errorf("fatal: unable to abort merge: %v", err)
}
return doltSession.SetWorkingSet(ctx, dbName, newWs)
}
// cherryPick checks that the current working set is clean, verifies the cherry-pick commit is not a merge commit
// or a commit without parent commit, performs merge and returns the new working set root value and
// the commit message of cherry-picked commit as the commit message of the new commit created during this command.
func cherryPick(ctx *sql.Context, dSess *dsess.DoltSession, roots doltdb.Roots, dbName, cherryStr string) (*merge.Result, string, error) {
// check for clean working set
wsOnlyHasIgnoredTables, err := diff.WorkingSetContainsOnlyIgnoredTables(ctx, roots)
if err != nil {
return nil, "", err
}
if !wsOnlyHasIgnoredTables {
return nil, "", ErrCherryPickUncommittedChanges
}
headRootHash, err := roots.Head.HashOf()
if err != nil {
return nil, "", err
}
workingRootHash, err := roots.Working.HashOf()
if err != nil {
return nil, "", err
}
doltDB, ok := dSess.GetDoltDB(ctx, dbName)
if !ok {
return nil, "", fmt.Errorf("failed to get DoltDB")
}
dbData, ok := dSess.GetDbData(ctx, dbName)
if !ok {
return nil, "", fmt.Errorf("failed to get dbData")
}
cherryCommitSpec, err := doltdb.NewCommitSpec(cherryStr)
if err != nil {
return nil, "", err
}
headRef, err := dbData.Rsr.CWBHeadRef()
if err != nil {
return nil, "", err
}
cherryCommit, err := doltDB.Resolve(ctx, cherryCommitSpec, headRef)
if err != nil {
return nil, "", err
}
if len(cherryCommit.DatasParents()) > 1 {
return nil, "", fmt.Errorf("cherry-picking a merge commit is not supported")
}
if len(cherryCommit.DatasParents()) == 0 {
return nil, "", fmt.Errorf("cherry-picking a commit without parents is not supported")
}
cherryRoot, err := cherryCommit.GetRootValue(ctx)
if err != nil {
return nil, "", err
}
// When cherry-picking, we need to use the parent of the cherry-picked commit as the ancestor. This
// ensures that only the delta from the cherry-pick commit is applied.
parentCommit, err := doltDB.ResolveParent(ctx, cherryCommit, 0)
if err != nil {
return nil, "", err
}
parentRoot, err := parentCommit.GetRootValue(ctx)
if err != nil {
return nil, "", err
}
dbState, ok, err := dSess.LookupDbState(ctx, dbName)
if err != nil {
return nil, "", err
} else if !ok {
return nil, "", sql.ErrDatabaseNotFound.New(dbName)
}
mo := merge.MergeOpts{
IsCherryPick: true,
KeepSchemaConflicts: false,
}
result, err := merge.MergeRoots(ctx, roots.Working, cherryRoot, parentRoot, cherryCommit, parentCommit, dbState.EditOpts(), mo)
if err != nil {
return nil, "", err
}
workingRootHash, err = result.Root.HashOf()
if err != nil {
return nil, "", err
}
if headRootHash.Equal(workingRootHash) {
return nil, "", fmt.Errorf("no changes were made, nothing to commit")
}
cherryCommitMeta, err := cherryCommit.GetCommitMeta(ctx)
if err != nil {
return nil, "", err
}
// If any of the merge stats show a data or schema conflict or a constraint
// violation, record that a merge is in progress.
for _, stats := range result.Stats {
if stats.HasArtifacts() {
ws, err := dSess.WorkingSet(ctx, dbName)
if err != nil {
return nil, "", err
}
newWorkingSet := ws.StartCherryPick(cherryCommit, cherryStr)
err = dSess.SetWorkingSet(ctx, dbName, newWorkingSet)
if err != nil {
return nil, "", err
}
break
}
}
return result, cherryCommitMeta.Description, nil
}
// stageCherryPickedTables stages the tables from |mergeStats| that don't have any merge artifacts i.e.
// tables that don't have any data or schema conflicts and don't have any constraint violations.
func stageCherryPickedTables(ctx *sql.Context, mergeStats map[string]*merge.MergeStats) (err error) {
tablesToAdd := make([]string, 0, len(mergeStats))
for tableName, mergeStats := range mergeStats {
if mergeStats.HasArtifacts() {
continue
}
// Find any tables being deleted and make sure we stage those tables first
if mergeStats.Operation == merge.TableRemoved {
tablesToAdd = append([]string{tableName}, tablesToAdd...)
} else {
tablesToAdd = append(tablesToAdd, tableName)
}
}
doltSession := dsess.DSessFromSess(ctx.Session)
dbName := ctx.GetCurrentDatabase()
roots, ok := doltSession.GetRoots(ctx, dbName)
if !ok {
return fmt.Errorf("unable to get roots for database '%s' from session", dbName)
}
roots, err = actions.StageTables(ctx, roots, tablesToAdd, true)
if err != nil {
return err
}
return doltSession.SetRoots(ctx, dbName, roots)
}
@@ -490,11 +490,11 @@ func (is doltDevIndexSet) GetIndex(ctx context.Context, sch schema.Schema, name
if addr.IsEmpty() {
return nil, fmt.Errorf("index %s not found in IndexSet", name)
}
idxSch := sch.Indexes().GetByName(name)
if idxSch == nil {
idx := sch.Indexes().GetByName(name)
if idx == nil {
return nil, fmt.Errorf("index schema not found: %s", name)
}
return indexFromAddr(ctx, is.vrw, is.ns, idxSch.Schema(), addr)
return indexFromAddr(ctx, is.vrw, is.ns, idx.Schema(), addr)
}
func (is doltDevIndexSet) PutIndex(ctx context.Context, name string, idx Index) (IndexSet, error) {
+20 -5
View File
@@ -897,7 +897,10 @@ func (t doltDevTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table,
}
func (t doltDevTable) GetConflicts(ctx context.Context) (conflict.ConflictSchema, ConflictIndex, error) {
conflicts := t.msg.Conflicts(nil)
conflicts, err := t.msg.TryConflicts(nil)
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
ouraddr := hash.New(conflicts.OurSchemaBytes())
theiraddr := hash.New(conflicts.TheirSchemaBytes())
@@ -997,7 +1000,10 @@ func (t doltDevTable) SetArtifacts(ctx context.Context, artifacts ArtifactIndex)
func (t doltDevTable) HasConflicts(ctx context.Context) (bool, error) {
conflicts := t.msg.Conflicts(nil)
conflicts, err := t.msg.TryConflicts(nil)
if err != nil {
return false, err
}
addr := hash.New(conflicts.OurSchemaBytes())
return !addr.IsEmpty(), nil
}
@@ -1023,7 +1029,10 @@ func (t doltDevTable) SetConflicts(ctx context.Context, sch conflict.ConflictSch
}
msg := t.clone()
cmsg := msg.Conflicts(nil)
cmsg, err := msg.TryConflicts(nil)
if err != nil {
return nil, err
}
copy(cmsg.DataBytes(), conflictsAddr[:])
copy(cmsg.OurSchemaBytes(), ouraddr[:])
copy(cmsg.TheirSchemaBytes(), theiraddr[:])
@@ -1034,7 +1043,10 @@ func (t doltDevTable) SetConflicts(ctx context.Context, sch conflict.ConflictSch
func (t doltDevTable) ClearConflicts(ctx context.Context) (Table, error) {
msg := t.clone()
conflicts := msg.Conflicts(nil)
conflicts, err := msg.TryConflicts(nil)
if err != nil {
return nil, err
}
var emptyhash hash.Hash
copy(conflicts.DataBytes(), emptyhash[:])
copy(conflicts.OurSchemaBytes(), emptyhash[:])
@@ -1110,7 +1122,10 @@ func (t doltDevTable) fields() (serialTableFields, error) {
}
ns := t.ns
conflicts := t.msg.Conflicts(nil)
conflicts, err := t.msg.TryConflicts(nil)
if err != nil {
return serialTableFields{}, err
}
am, err := prolly.NewAddressMap(node, ns)
if err != nil {
return serialTableFields{}, err
@@ -100,7 +100,10 @@ func deserializeFlatbufferForeignKeys(msg types.SerialMessage) (*ForeignKeyColle
var fk serial.ForeignKey
for i := 0; i < c.ForeignKeysLength(); i++ {
c.ForeignKeys(&fk, i)
_, err = c.TryForeignKeys(&fk, i)
if err != nil {
return nil, err
}
childCols := make([]uint64, fk.ChildTableColumnsLength())
for j := range childCols {
+4
View File
@@ -106,6 +106,10 @@ func (dEnv *DoltEnv) GetConfig() config.ReadableConfig {
return dEnv.Config
}
func (dEnv *DoltEnv) UrlStr() string {
return dEnv.urlStr
}
func createRepoState(fs filesys.Filesys) (*RepoState, error) {
repoState, rsErr := LoadRepoState(fs)
+47
View File
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -281,3 +282,49 @@ func mergedRootToWorking(
}
return
}
// AbortMerge returns a new WorkingSet instance, with the active merge aborted, by clearing and
// resetting the merge state in |workingSet| and using |roots| to identify the existing tables
// and reset them, excluding any ignored tables. The caller must then set the new WorkingSet in
// the session before the aborted merge is finalized. If no merge is in progress, this function
// returns an error.
func AbortMerge(ctx *sql.Context, workingSet *doltdb.WorkingSet, roots doltdb.Roots) (*doltdb.WorkingSet, error) {
if !workingSet.MergeActive() {
return nil, fmt.Errorf("there is no merge to abort")
}
tbls, err := doltdb.UnionTableNames(ctx, roots.Working, roots.Staged, roots.Head)
if err != nil {
return nil, err
}
roots, err = actions.MoveTablesFromHeadToWorking(ctx, roots, tbls)
if err != nil {
return nil, err
}
preMergeWorkingRoot := workingSet.MergeState().PreMergeWorkingRoot()
preMergeWorkingTables, err := preMergeWorkingRoot.GetTableNames(ctx)
if err != nil {
return nil, err
}
nonIgnoredTables, err := doltdb.ExcludeIgnoredTables(ctx, roots, preMergeWorkingTables)
if err != nil {
return nil, err
}
someTablesAreIgnored := len(nonIgnoredTables) != len(preMergeWorkingTables)
if someTablesAreIgnored {
newWorking, err := actions.MoveTablesBetweenRoots(ctx, nonIgnoredTables, preMergeWorkingRoot, roots.Working)
if err != nil {
return nil, err
}
workingSet = workingSet.WithWorkingRoot(newWorking)
} else {
workingSet = workingSet.WithWorkingRoot(preMergeWorkingRoot)
}
workingSet = workingSet.WithStagedRoot(workingSet.WorkingRoot())
workingSet = workingSet.ClearMerge()
return workingSet, nil
}
+16 -9
View File
@@ -438,10 +438,10 @@ func mergeColumns(tblName string, format *storetypes.NomsBinFormat, ourCC, their
// otherwise, we have two valid columns and we need to figure out which one to use
if anc != nil {
oursChanged := !anc.Equals(*ours)
diffInfo.LeftSchemaChange = diffInfo.LeftSchemaChange || oursChanged
theirsChanged := !anc.Equals(*theirs)
diffInfo.RightSchemaChange = diffInfo.RightSchemaChange || theirsChanged
if oursChanged && theirsChanged {
diffInfo.LeftSchemaChange = true
diffInfo.RightSchemaChange = true
// If both columns changed in the same way, the modifications converge, so accept the column.
// If not, don't report a conflict, since this case is already handled in checkForColumnConflicts.
if ours.Equals(*theirs) {
@@ -453,12 +453,15 @@ func mergeColumns(tblName string, format *storetypes.NomsBinFormat, ourCC, their
diffInfo.LeftAndRightSchemasDiffer = true
// In this case, only theirsChanged, so we need to check if moving from ours->theirs
// is valid, otherwise it's a conflict
mergeInfo.LeftNeedsRewrite = true
compatible, rewrite := compatChecker.IsTypeChangeCompatible(ours.TypeInfo, theirs.TypeInfo)
if rewrite {
compatibilityInfo := compatChecker.IsTypeChangeCompatible(ours.TypeInfo, theirs.TypeInfo)
if compatibilityInfo.invalidateSecondaryIndexes {
mergeInfo.InvalidateSecondaryIndexes = true
}
if compatible {
if compatibilityInfo.rewriteRows {
mergeInfo.LeftNeedsRewrite = true
diffInfo.RightSchemaChange = true
}
if compatibilityInfo.compatible {
mergedColumns = append(mergedColumns, *theirs)
} else {
conflicts = append(conflicts, ColConflict{
@@ -472,11 +475,15 @@ func mergeColumns(tblName string, format *storetypes.NomsBinFormat, ourCC, their
// In this case, only oursChanged, so we need to check if moving from theirs->ours
// is valid, otherwise it's a conflict
mergeInfo.RightNeedsRewrite = true
compatible, rewrite := compatChecker.IsTypeChangeCompatible(theirs.TypeInfo, ours.TypeInfo)
if rewrite {
compatibilityInfo := compatChecker.IsTypeChangeCompatible(theirs.TypeInfo, ours.TypeInfo)
if compatibilityInfo.invalidateSecondaryIndexes {
mergeInfo.InvalidateSecondaryIndexes = true
}
if compatible {
if compatibilityInfo.rewriteRows {
mergeInfo.RightNeedsRewrite = true
diffInfo.LeftSchemaChange = true
}
if compatibilityInfo.compatible {
mergedColumns = append(mergedColumns, *ours)
} else {
conflicts = append(conflicts, ColConflict{
+236 -21
View File
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
@@ -50,13 +51,14 @@ type schemaMergeTest struct {
}
type dataTest struct {
name string
ancestor []sql.Row
left, right []sql.Row
merged []sql.Row
dataConflict bool
skip bool
skipFlip bool
name string
ancestor []sql.Row
left, right []sql.Row
merged []sql.Row
constraintViolations []constraintViolation
dataConflict bool
skip bool
skipFlip bool
}
type table struct {
@@ -78,6 +80,9 @@ func TestSchemaMerge(t *testing.T) {
t.Run("column default tests", func(t *testing.T) {
testSchemaMerge(t, columnDefaultTests)
})
t.Run("collation tests", func(t *testing.T) {
testSchemaMerge(t, collationTests)
})
t.Run("nullability tests", func(t *testing.T) {
testSchemaMerge(t, nullabilityTests)
})
@@ -544,6 +549,94 @@ var columnAddDropTests = []schemaMergeTest{
},
}
type constraintViolation struct {
violationType merge.CvType
key, value sql.Row
}
var collationTests = []schemaMergeTest{
{
name: "left side changes collation",
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) collate utf8mb4_0900_bin unique)")),
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) collate utf8mb4_0900_ai_ci unique)")),
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) collate utf8mb4_0900_bin unique)")),
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) collate utf8mb4_0900_ai_ci unique)")),
dataTests: []dataTest{
{
name: "no data change",
ancestor: singleRow(1, "hello"),
left: singleRow(1, "hello"),
right: singleRow(1, "hello"),
merged: singleRow(1, "hello"),
},
{
name: "right side insert",
ancestor: []sql.Row{{1, "hello"}},
left: []sql.Row{{1, "hello"}},
right: []sql.Row{{1, "hello"}, {2, "world"}},
merged: []sql.Row{{1, "hello"}, {2, "world"}},
},
{
name: "right side delete",
ancestor: []sql.Row{{1, "hello"}, {2, "world"}},
left: []sql.Row{{1, "hello"}, {2, "world"}},
right: []sql.Row{{1, "hello"}},
merged: []sql.Row{{1, "hello"}},
},
{
name: "right side insert causes unique violation",
ancestor: []sql.Row{{1, "hello"}},
left: []sql.Row{{1, "hello"}},
right: []sql.Row{{1, "hello"}, {2, "HELLO"}},
constraintViolations: []constraintViolation{
{merge.CvType_UniqueIndex, sql.Row{int32(1)}, sql.Row{"hello"}},
{merge.CvType_UniqueIndex, sql.Row{int32(2)}, sql.Row{"HELLO"}},
},
},
},
},
{
name: "left side changes table collation",
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) unique) collate utf8mb4_0900_bin")),
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) unique) collate utf8mb4_0900_ai_ci")),
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) unique) collate utf8mb4_0900_bin")),
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(10) unique) collate utf8mb4_0900_ai_ci")),
dataTests: []dataTest{
{
name: "no data change",
ancestor: singleRow(1, "hello"),
left: singleRow(1, "hello"),
right: singleRow(1, "hello"),
merged: singleRow(1, "hello"),
},
{
name: "right side insert",
ancestor: []sql.Row{{1, "hello"}},
left: []sql.Row{{1, "hello"}},
right: []sql.Row{{1, "hello"}, {2, "world"}},
merged: []sql.Row{{1, "hello"}, {2, "world"}},
},
{
name: "right side delete",
ancestor: []sql.Row{{1, "hello"}, {2, "world"}},
left: []sql.Row{{1, "hello"}, {2, "world"}},
right: []sql.Row{{1, "hello"}},
merged: []sql.Row{{1, "hello"}},
},
{
name: "right side insert causes unique violation",
ancestor: []sql.Row{{1, "hello"}},
left: []sql.Row{{1, "hello"}},
right: []sql.Row{{1, "hello"}, {2, "HELLO"}},
constraintViolations: []constraintViolation{
{merge.CvType_UniqueIndex, sql.Row{int32(1)}, sql.Row{"hello"}},
{merge.CvType_UniqueIndex, sql.Row{int32(2)}, sql.Row{"HELLO"}},
},
},
},
},
}
var columnDefaultTests = []schemaMergeTest{
{
name: "left side add default",
@@ -753,6 +846,99 @@ var typeChangeTests = []schemaMergeTest{
},
},
},
{
name: "modify column type on the left side between compatible string types with unique secondary index",
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(20) unique, b int, c varchar(20) unique)")),
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a text, b int, c text)")),
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a varchar(20) unique, b int, c varchar(20) unique)")),
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a text, b int, c text)")),
dataTests: []dataTest{
{
name: "schema change, no data change",
ancestor: singleRow(1, "test", 1, "test"),
left: singleRow(1, "test", 1, "test"),
right: singleRow(1, "test", 1, "test"),
merged: singleRow(1, "test", 1, "test"),
},
{
name: "insert and schema change on left, no change on right",
ancestor: nil,
left: singleRow(1, "test", 1, "test"),
right: nil,
merged: singleRow(1, "test", 1, "test"),
},
{
name: "insert on right, schema change on left",
ancestor: nil,
left: nil,
right: singleRow(1, "test", 1, "test"),
merged: singleRow(1, "test", 1, "test"),
},
{
name: "data and schema change on left, no change on right",
ancestor: singleRow(1, "test", 1, "test"),
left: singleRow(1, "hello world", 1, "hello world"),
right: singleRow(1, "test", 1, "test"),
merged: singleRow(1, "hello world", 1, "hello world"),
},
{
name: "data change on right, schema change on left",
ancestor: singleRow(1, "test", 1, "test"),
left: singleRow(1, "test", 1, "test"),
right: singleRow(1, "hello world", 1, "hello world"),
merged: singleRow(1, "hello world", 1, "hello world"),
},
{
name: "data set and schema change on left, no change on right",
ancestor: singleRow(1, nil, 1, nil),
left: singleRow(1, "hello world", 1, "hello world"),
right: singleRow(1, nil, 1, nil),
merged: singleRow(1, "hello world", 1, "hello world"),
},
{
name: "data set on right, schema change on left",
ancestor: singleRow(1, nil, 1, nil),
left: singleRow(1, nil, 1, nil),
right: singleRow(1, "hello world", 1, "hello world"),
merged: singleRow(1, "hello world", 1, "hello world"),
},
{
name: "convergent inserts",
ancestor: nil,
left: singleRow(1, "test", 1, "test"),
right: singleRow(1, "test", 1, "test"),
merged: singleRow(1, "test", 1, "test"),
},
{
name: "conflicting inserts",
ancestor: nil,
left: singleRow(1, "test", 1, "test"),
right: singleRow(1, "hello world", 1, "hello world"),
dataConflict: true,
},
{
name: "delete and schema change on left",
ancestor: singleRow(1, "test", 1, "test"),
left: nil,
right: singleRow(1, "test", 1, "test"),
merged: nil,
},
{
name: "schema change on left, delete on right",
ancestor: singleRow(1, "test", 1, "test"),
left: singleRow(1, "test", 1, "test"),
right: nil,
merged: nil,
},
{
name: "schema and value change on left, delete on right",
ancestor: singleRow(1, "test", 1, "test"),
left: singleRow(1, "hello", 1, "hello"),
right: nil,
dataConflict: true,
},
},
},
}
var keyChangeTests = []schemaMergeTest{
@@ -934,7 +1120,7 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
}
t.Run(test.name, func(t *testing.T) {
runTest := func(t *testing.T, test schemaMergeTest, expectDataConflict bool) {
runTest := func(t *testing.T, test schemaMergeTest, expectDataConflict bool, expConstraintViolations []constraintViolation) {
a, l, r, m := setupSchemaMergeTest(t, test)
ctx := context.Background()
@@ -980,33 +1166,62 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
require.NoError(t, err)
require.False(t, hasConflict, "Unexpected data conflict")
if !assert.Equal(t, addr, a) {
expTbl, _, err := m.GetTable(ctx, name)
numConstraintViolations, err := actTbl.NumConstraintViolations(ctx)
require.NoError(t, err)
require.EqualValues(t, numConstraintViolations, len(expConstraintViolations))
if len(expConstraintViolations) > 0 {
artifacts, err := actTbl.GetArtifacts(ctx)
require.NoError(t, err)
t.Logf("expected rows: %s", expTbl.DebugString(ctx))
t.Logf("actual rows: %s", actTbl.DebugString(ctx))
artifactMap := durable.ProllyMapFromArtifactIndex(artifacts)
artifactIter, err := artifactMap.IterAllCVs(ctx)
require.NoError(t, err)
sch, err := actTbl.GetSchema(ctx)
require.NoError(t, err)
kd, vd := sch.GetMapDescriptors()
// value tuples encoded in ConstraintViolationMeta may
// violate the not null constraints assumed by fixed access
kd = kd.WithoutFixedAccess()
vd = vd.WithoutFixedAccess()
for _, expectedViolation := range expConstraintViolations {
violationType, key, value, err := merge.NextConstraintViolation(ctx, artifactIter, kd, vd, artifactMap.NodeStore())
require.NoError(t, err)
require.EqualValues(t, expectedViolation.violationType, violationType)
require.EqualValues(t, expectedViolation.key, key)
require.EqualValues(t, expectedViolation.value, value)
}
} else {
if !assert.Equal(t, addr, a) {
expTbl, _, err := m.GetTable(ctx, name)
require.NoError(t, err)
t.Logf("expected rows: %s", expTbl.DebugString(ctx))
t.Logf("actual rows: %s", actTbl.DebugString(ctx))
}
}
}
}
}
}
t.Run("test schema merge", func(t *testing.T) {
runTest(t, test, false)
runTest(t, test, false, nil)
})
for _, data := range test.dataTests {
// Copy the test so that the values from one data test don't affect subsequent data tests.
dataDest := test
dataDest.ancestor.rows = data.ancestor
dataDest.left.rows = data.left
dataDest.right.rows = data.right
dataDest.merged.rows = data.merged
dataDest.skipNewFmt = dataDest.skipNewFmt || data.skip
dataDest.skipFlipOnNewFormat = dataDest.skipFlipOnNewFormat || data.skipFlip
dataTest := test
dataTest.ancestor.rows = data.ancestor
dataTest.left.rows = data.left
dataTest.right.rows = data.right
dataTest.merged.rows = data.merged
dataTest.skipNewFmt = dataTest.skipNewFmt || data.skip
dataTest.skipFlipOnNewFormat = dataTest.skipFlipOnNewFormat || data.skipFlip
t.Run(data.name, func(t *testing.T) {
if data.skip {
t.Skip()
}
runTest(t, dataDest, data.dataConflict)
runTest(t, dataTest, data.dataConflict, data.constraintViolations)
})
}
})
@@ -38,7 +38,7 @@ type TypeCompatibilityChecker interface {
// For the DOLT storage format, very few cases (outside of the types being exactly identical) are considered
// compatible without requiring a full table rewrite. The older LD_1 storage format, has a more forgiving storage
// layout, so more type changes are considered compatible, generally as long as they are in the same type family/kind.
IsTypeChangeCompatible(from, to typeinfo.TypeInfo) (compatible bool, tableRewrite bool)
IsTypeChangeCompatible(from, to typeinfo.TypeInfo) TypeChangeInfo
}
// newTypeCompatabilityCheckerForStorageFormat returns a new TypeCompatibilityChecker
@@ -63,26 +63,28 @@ var _ TypeCompatibilityChecker = ld1TypeCompatibilityChecker{}
// IsTypeChangeCompatible implements TypeCompatibilityChecker.IsTypeChangeCompatible for the
// deprecated LD_1 storage format.
func (l ld1TypeCompatibilityChecker) IsTypeChangeCompatible(from, to typeinfo.TypeInfo) (compatible bool, tableRewrite bool) {
func (l ld1TypeCompatibilityChecker) IsTypeChangeCompatible(from, to typeinfo.TypeInfo) (res TypeChangeInfo) {
// If the types are exactly identical, then they are always compatible
fromSqlType := from.ToSqlType()
toSqlType := to.ToSqlType()
if fromSqlType.Equals(toSqlType) {
return true, false
res.compatible = true
return res
}
// For the older, LD_1 storage format, our compatibility rules are looser
if from.NomsKind() != to.NomsKind() {
return false, false
return res
}
if to.ToSqlType().Type() == query.Type_GEOMETRY {
// We need to do this because some spatial type changes require a full table check, but not all.
// TODO: This could be narrowed down to a smaller set of spatial type changes
return false, false
return res
}
return true, false
res.compatible = true
return res
}
// doltTypeCompatibilityChecker implements TypeCompatibilityChecker for the DOLT storage
@@ -108,24 +110,36 @@ var _ TypeCompatibilityChecker = (*doltTypeCompatibilityChecker)(nil)
// IsTypeChangeCompatible implements TypeCompatibilityChecker.IsTypeChangeCompatible for the
// DOLT storage format.
func (d doltTypeCompatibilityChecker) IsTypeChangeCompatible(from, to typeinfo.TypeInfo) (compatible bool, tableRewrite bool) {
func (d doltTypeCompatibilityChecker) IsTypeChangeCompatible(from, to typeinfo.TypeInfo) (res TypeChangeInfo) {
// If the types are exactly identical, then they are always compatible
fromSqlType := from.ToSqlType()
toSqlType := to.ToSqlType()
if fromSqlType.Equals(toSqlType) {
return true, false
res.compatible = true
return res
}
for _, checker := range d.checkers {
if checker.canHandle(fromSqlType, toSqlType) {
compatible, requiresRewrite := checker.isCompatible(fromSqlType, toSqlType)
if compatible {
return compatible, requiresRewrite
subcheckerResult := checker.isCompatible(fromSqlType, toSqlType)
if subcheckerResult.compatible {
return subcheckerResult
}
}
}
return false, false
return res
}
// TypeChangeInfo contains details about how a column's type changing during the merge impacts the merge.
// |compatible| stores whether the merge is still possible.
// |rewriteRows| stores whether the primary index will need to be rewritten.
// |invalidateSecondaryIndexes| stores whether all secondary indexes will need to be rewritten.
// Typically adding removing, or changing the type of columns will trigger a rewrite of all indexes, because it is
// nontrivial to determine which secondary indexes have been invalidated. However, some changes do not affect the
// primary index, such as collation changes to non-pk columns.
type TypeChangeInfo struct {
compatible, rewriteRows, invalidateSecondaryIndexes bool
}
// typeChangeHandler has the logic to determine if a specific change from one type to another is a compatible
@@ -137,7 +151,7 @@ type typeChangeHandler interface {
// isCompatible returns two booleans the first boolean response parameter indicates if a type change from
// |fromType| to |toType| is compatible and safe to perform automatically. The second boolean response parameter
// indicates if the type conversion requires a full table rewrite.
isCompatible(fromSqlType, toSqlType sql.Type) (compatible bool, tableRewrite bool)
isCompatible(fromSqlType, toSqlType sql.Type) TypeChangeInfo
}
// stringTypeChangeHandler handles type change compatibility between from string types, i.e. VARCHAR, VARBINARY,
@@ -167,22 +181,28 @@ func (s stringTypeChangeHandler) canHandle(fromSqlType, toSqlType sql.Type) bool
}
}
func (s stringTypeChangeHandler) isCompatible(fromSqlType, toSqlType sql.Type) (compatible bool, tableRewrite bool) {
func (s stringTypeChangeHandler) isCompatible(fromSqlType, toSqlType sql.Type) (res TypeChangeInfo) {
fromStringType := fromSqlType.(types.StringType)
toStringType := toSqlType.(types.StringType)
compatible = toStringType.MaxByteLength() >= fromStringType.MaxByteLength() &&
toStringType.Collation() == fromStringType.Collation()
res.compatible = toStringType.CharacterSet() == fromStringType.CharacterSet() &&
toStringType.MaxByteLength() >= fromStringType.MaxByteLength()
tableRewrite = false
if compatible {
collationChanged := toStringType.Collation() != fromStringType.Collation()
// If the collation changed, we will need to rebuild any secondary indexes on this column.
if collationChanged {
res.invalidateSecondaryIndexes = true
}
if res.compatible {
// Because inline string types (e.g. VARCHAR, CHAR) have the same encoding, the main case
// when a table rewrite is required is when moving between an inline string type (e.g. CHAR)
// and an out-of-band string type (e.g. TEXT).
fromTypeOutOfBand := outOfBandType(fromSqlType)
toTypeOutOfBand := outOfBandType(toSqlType)
if fromTypeOutOfBand != toTypeOutOfBand {
tableRewrite = true
res.rewriteRows = true
res.invalidateSecondaryIndexes = true
}
// The exception to this is when converting to a fixed width BINARY(N) field, which requires rewriting the
@@ -190,11 +210,12 @@ func (s stringTypeChangeHandler) isCompatible(fromSqlType, toSqlType sql.Type) (
// or its indexes, need to be right padded up to N bytes. Note that MySQL does NOT do a similar conversion
// when converting to VARBINARY(N).
if toSqlType.Type() == sqltypes.Binary {
tableRewrite = true
res.rewriteRows = true
res.invalidateSecondaryIndexes = true
}
}
return compatible, tableRewrite
return res
}
// outOfBandType returns true if the specified type |t| is stored outside of a table's index file, for example
@@ -220,32 +241,33 @@ func (e enumTypeChangeHandler) canHandle(from, to sql.Type) bool {
}
// isCompatible implements the typeChangeHandler interface.
func (e enumTypeChangeHandler) isCompatible(fromSqlType, toSqlType sql.Type) (compatible bool, tableRewrite bool) {
func (e enumTypeChangeHandler) isCompatible(fromSqlType, toSqlType sql.Type) (res TypeChangeInfo) {
fromEnumType := fromSqlType.(sql.EnumType)
toEnumType := toSqlType.(sql.EnumType)
if fromEnumType.NumberOfElements() > toEnumType.NumberOfElements() {
return false, false
return res
}
// TODO: charset/collation changes may require a table or index rewrite; for now, consider them incompatible
fromCharSet, fromCollation := fromEnumType.CharacterSet(), fromEnumType.Collation()
toCharSet, toCollation := toEnumType.CharacterSet(), toEnumType.Collation()
if fromCharSet != toCharSet || fromCollation != toCollation {
return false, false
return res
}
// if values have only been added at the end, consider it compatible (i.e. no reordering or removal)
toEnumValues := toEnumType.Values()
for i, fromEnumValue := range fromEnumType.Values() {
if toEnumValues[i] != fromEnumValue {
return false, false
return res
}
}
// MySQL uses 1 byte to store enum values that have <= 255 values, and 2 bytes for > 255 values
// The DOLT storage format *always* uses 2 bytes for all enum values, so table data never needs
// to be rewritten in this additive case.
return true, false
res.compatible = true
return res
}
// setTypeChangeHandler handles type change compatibility checking for changes to set types. If a new set value
@@ -261,29 +283,30 @@ func (s setTypeChangeHandler) canHandle(fromType, toType sql.Type) bool {
}
// isCompatible implements the typeChangeHandler interface.
func (s setTypeChangeHandler) isCompatible(fromType, toType sql.Type) (compatible bool, tableRewrite bool) {
func (s setTypeChangeHandler) isCompatible(fromType, toType sql.Type) (res TypeChangeInfo) {
fromSetType := fromType.(sql.SetType)
toSetType := toType.(sql.SetType)
if fromSetType.NumberOfElements() > toSetType.NumberOfElements() {
return false, false
return res
}
// TODO: charset/collation changes may require a table or index rewrite; for now, consider them incompatible
fromCharSet, fromCollation := fromSetType.CharacterSet(), fromSetType.Collation()
toCharSet, toCollation := toSetType.CharacterSet(), toSetType.Collation()
if fromCharSet != toCharSet || fromCollation != toCollation {
return false, false
return res
}
// Ensure only new values have been added to the end of the set
toSetValues := toSetType.Values()
for i, fromSetValue := range fromSetType.Values() {
if toSetValues[i] != fromSetValue {
return false, false
return res
}
}
// The DOLT storage format *always* uses 8 bytes for all set values, so the table data never needs
// to be rewritten in this additive case.
return true, false
res.compatible = true
return res
}
@@ -27,11 +27,12 @@ import (
)
type typeChangeCompatibilityTest struct {
name string
from typeinfo.TypeInfo
to typeinfo.TypeInfo
compatible bool
rewrite bool
name string
from typeinfo.TypeInfo
to typeinfo.TypeInfo
compatible bool
rewrite bool
invalidateSecondaryIndexes bool
}
// Enum test data
@@ -54,6 +55,9 @@ var point = typeinfo.CreatePointTypeFromSqlPointType(gmstypes.PointType{SRID: ui
// String type test data
var varchar10 = typeinfo.CreateVarStringTypeFromSqlType(gmstypes.MustCreateString(sqltypes.VarChar, 10, sql.Collation_Default))
var varchar10ci = typeinfo.CreateVarStringTypeFromSqlType(gmstypes.MustCreateString(sqltypes.VarChar, 10, sql.Collation_utf8mb4_0900_ai_ci))
var varchar10bin = typeinfo.CreateVarStringTypeFromSqlType(gmstypes.MustCreateString(sqltypes.VarChar, 10, sql.Collation_utf8mb4_0900_bin))
var varchar10utf16bin = typeinfo.CreateVarStringTypeFromSqlType(gmstypes.MustCreateString(sqltypes.VarChar, 10, sql.Collation_utf16_bin))
var varchar20 = typeinfo.CreateVarStringTypeFromSqlType(gmstypes.MustCreateString(sqltypes.VarChar, 20, sql.Collation_Default))
var varchar300 = typeinfo.CreateVarStringTypeFromSqlType(gmstypes.MustCreateString(sqltypes.VarChar, 300, sql.Collation_Default))
var varchar10BinaryCollation = typeinfo.CreateVarStringTypeFromSqlType(gmstypes.MustCreateString(sqltypes.VarChar, 10, sql.Collation_binary))
@@ -200,6 +204,24 @@ func TestDoltIsTypeChangeCompatible(t *testing.T) {
compatible: false,
},
// Charset changes
{
name: "incompatible: VARCHAR(10) charset change",
from: varchar10bin,
to: varchar10utf16bin,
compatible: false,
},
// Collation changes
{
name: "compatible: VARCHAR(10) collation change",
from: varchar10ci,
to: varchar10bin,
compatible: true,
rewrite: false,
invalidateSecondaryIndexes: true,
},
// Type width changes
{
name: "type widening: VARCHAR(10) to VARCHAR(20)",
@@ -213,50 +235,55 @@ func TestDoltIsTypeChangeCompatible(t *testing.T) {
to: varchar10,
compatible: false,
}, {
name: "type widening: VARCHAR to TEXT",
from: varchar10,
to: text,
compatible: true,
rewrite: true,
name: "type widening: VARCHAR to TEXT",
from: varchar10,
to: text,
compatible: true,
rewrite: true,
invalidateSecondaryIndexes: true,
}, {
name: "type narrowing: TEXT to VARCHAR(10)",
from: text,
to: varchar10,
compatible: false,
}, {
name: "type widening: TINYTEXT to VARCHAR(300)",
from: tinyText,
to: varchar300,
compatible: true,
rewrite: true,
name: "type widening: TINYTEXT to VARCHAR(300)",
from: tinyText,
to: varchar300,
compatible: true,
rewrite: true,
invalidateSecondaryIndexes: true,
}, {
name: "type widening: varbinary to BLOB",
from: varbinary10,
to: blob,
compatible: true,
rewrite: true,
name: "type widening: varbinary to BLOB",
from: varbinary10,
to: blob,
compatible: true,
rewrite: true,
invalidateSecondaryIndexes: true,
}, {
name: "type narrowing: BLOB to varbinary",
from: blob,
to: varbinary10,
compatible: false,
}, {
name: "type widening: TEXT to MEDIUMTEXT",
from: text,
to: mediumText,
compatible: true,
rewrite: false,
name: "type widening: TEXT to MEDIUMTEXT",
from: text,
to: mediumText,
compatible: true,
rewrite: false,
invalidateSecondaryIndexes: false,
}, {
name: "type narrowing: MEDIUMTEXT to TEXT",
from: mediumText,
to: text,
compatible: false,
}, {
name: "type widening: BLOB to MEDIUMBLOB",
from: blob,
to: mediumBlob,
compatible: true,
rewrite: false,
name: "type widening: BLOB to MEDIUMBLOB",
from: blob,
to: mediumBlob,
compatible: true,
rewrite: false,
invalidateSecondaryIndexes: false,
}, {
name: "type narrowing: MEDIUMBLOB to BLOB",
from: mediumBlob,
@@ -292,9 +319,11 @@ func TestDoltIsTypeChangeCompatible(t *testing.T) {
func runTypeCompatibilityTests(t *testing.T, compatChecker TypeCompatibilityChecker, tests []typeChangeCompatibilityTest) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
compatible, requiresRewrite := compatChecker.IsTypeChangeCompatible(tt.from, tt.to)
assert.Equal(t, tt.compatible, compatible, "expected compatible to be %t, but was %t", tt.compatible, compatible)
assert.Equal(t, tt.rewrite, requiresRewrite, "expected rewrite required to be %t, but was %t", tt.rewrite, requiresRewrite)
compatibilityResults := compatChecker.IsTypeChangeCompatible(tt.from, tt.to)
assert.Equal(t, tt.compatible, compatibilityResults.compatible, "expected compatible to be %t, but was %t", tt.compatible, compatibilityResults.compatible)
assert.Equal(t, tt.rewrite, compatibilityResults.rewriteRows, "expected rewrite required to be %t, but was %t", tt.rewrite, compatibilityResults.rewriteRows)
assert.Equal(t, tt.invalidateSecondaryIndexes, compatibilityResults.invalidateSecondaryIndexes, "expected secondary index rewrite to be %t, but was %t", tt.invalidateSecondaryIndexes, compatibilityResults.invalidateSecondaryIndexes)
})
}
}
@@ -0,0 +1,90 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package merge
import (
"context"
"encoding/json"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
func NextConstraintViolation(ctx context.Context, itr prolly.ArtifactIter, kd, vd val.TupleDesc, ns tree.NodeStore) (violationType uint64, key sql.Row, value sql.Row, err error) {
art, err := itr.Next(ctx)
if err != nil {
return
}
key = make(sql.Row, kd.Count())
for i := 0; i < kd.Count(); i++ {
key[i], err = index.GetField(ctx, kd, i, art.SourceKey, ns)
if err != nil {
return
}
}
var meta prolly.ConstraintViolationMeta
err = json.Unmarshal(art.Metadata, &meta)
if err != nil {
return
}
value = make(sql.Row, vd.Count())
for i := 0; i < vd.Count(); i++ {
value[i], err = index.GetField(ctx, vd, i, meta.Value, ns)
if err != nil {
return
}
}
return MapCVType(art.ArtType), key, value, nil
}
func MapCVType(artifactType prolly.ArtifactType) (outType uint64) {
switch artifactType {
case prolly.ArtifactTypeForeignKeyViol:
outType = uint64(CvType_ForeignKey)
case prolly.ArtifactTypeUniqueKeyViol:
outType = uint64(CvType_UniqueIndex)
case prolly.ArtifactTypeChkConsViol:
outType = uint64(CvType_CheckConstraint)
case prolly.ArtifactTypeNullViol:
outType = uint64(CvType_NotNull)
default:
panic("unhandled cv type")
}
return
}
func UnmapCVType(in CvType) (out prolly.ArtifactType) {
switch in {
case CvType_ForeignKey:
out = prolly.ArtifactTypeForeignKeyViol
case CvType_UniqueIndex:
out = prolly.ArtifactTypeUniqueKeyViol
case CvType_CheckConstraint:
out = prolly.ArtifactTypeChkConsViol
case CvType_NotNull:
out = prolly.ArtifactTypeNullViol
default:
panic("unhandled cv type")
}
return
}
@@ -92,7 +92,11 @@ func deserializeSchemaFromFlatbuffer(ctx context.Context, buf []byte) (schema.Sc
return nil, err
}
err = sch.SetPkOrdinals(deserializeClusteredIndex(s))
dci, err := deserializeClusteredIndex(s)
if err != nil {
return nil, err
}
err = sch.SetPkOrdinals(dci)
if err != nil {
return nil, err
}
@@ -171,18 +175,25 @@ func serializeClusteredIndex(b *fb.Builder, sch schema.Schema) fb.UOffsetT {
return serial.IndexEnd(b)
}
func deserializeClusteredIndex(s *serial.TableSchema) []int {
func deserializeClusteredIndex(s *serial.TableSchema) ([]int, error) {
// check for keyless schema
if keylessSerialSchema(s) {
return nil
kss, err := keylessSerialSchema(s)
if err != nil {
return nil, err
}
if kss {
return nil, nil
}
ci := s.ClusteredIndex(nil)
ci, err := s.TryClusteredIndex(nil)
if err != nil {
return nil, err
}
pkOrdinals := make([]int, ci.KeyColumnsLength())
for i := range pkOrdinals {
pkOrdinals[i] = int(ci.KeyColumns(i))
}
return pkOrdinals
return pkOrdinals, nil
}
func serializeSchemaColumns(b *fb.Builder, sch schema.Schema) fb.UOffsetT {
@@ -281,7 +292,11 @@ func serializeHiddenKeylessColumns(b *fb.Builder) (id, card fb.UOffsetT) {
func deserializeColumns(ctx context.Context, s *serial.TableSchema) ([]schema.Column, error) {
length := s.ColumnsLength()
if keylessSerialSchema(s) {
isKeyless, err := keylessSerialSchema(s)
if err != nil {
return nil, err
}
if isKeyless {
// (6/15/22)
// currently, keyless id and cardinality columns
// do not exist in schema.Schema
@@ -295,7 +310,10 @@ func deserializeColumns(ctx context.Context, s *serial.TableSchema) ([]schema.Co
cols := make([]schema.Column, length)
c := serial.Column{}
for i := range cols {
s.Columns(&c, i)
_, err := s.TryColumns(&c, i)
if err != nil {
return nil, err
}
sqlType, err := typeinfoFromSqlType(string(c.SqlType()))
if err != nil {
return nil, err
@@ -395,9 +413,17 @@ func deserializeSecondaryIndexes(sch schema.Schema, s *serial.TableSchema) error
idx := serial.Index{}
col := serial.Column{}
for i := 0; i < s.SecondaryIndexesLength(); i++ {
s.SecondaryIndexes(&idx, i)
_, err := s.TrySecondaryIndexes(&idx, i)
if err != nil {
return err
}
assertTrue(!idx.PrimaryKey(), "cannot deserialize secondary index with PrimaryKey() == true")
fti, err := deserializeFullTextInfo(&idx)
if err != nil {
return err
}
name := string(idx.Name())
props := schema.IndexProperties{
IsUnique: idx.UniqueKey(),
@@ -405,13 +431,16 @@ func deserializeSecondaryIndexes(sch schema.Schema, s *serial.TableSchema) error
IsFullText: idx.FulltextKey(),
IsUserDefined: !idx.SystemDefined(),
Comment: string(idx.Comment()),
FullTextProperties: deserializeFullTextInfo(&idx),
FullTextProperties: fti,
}
tags := make([]uint64, idx.IndexColumnsLength())
for j := range tags {
pos := idx.IndexColumns(j)
s.Columns(&col, int(pos))
_, err := s.TryColumns(&col, int(pos))
if err != nil {
return err
}
tags[j] = col.Tag()
}
@@ -424,7 +453,7 @@ func deserializeSecondaryIndexes(sch schema.Schema, s *serial.TableSchema) error
}
}
_, err := sch.Indexes().AddIndexByColTags(name, tags, prefixLengths, props)
_, err = sch.Indexes().AddIndexByColTags(name, tags, prefixLengths, props)
if err != nil {
return err
}
@@ -455,7 +484,10 @@ func deserializeChecks(sch schema.Schema, s *serial.TableSchema) error {
coll := sch.Checks()
c := serial.CheckConstraint{}
for i := 0; i < s.ChecksLength(); i++ {
s.Checks(&c, i)
_, err := s.TryChecks(&c, i)
if err != nil {
return err
}
n, e := string(c.Name()), string(c.Expression())
if _, err := coll.AddCheck(n, e, c.Enforced()); err != nil {
return err
@@ -493,10 +525,14 @@ func serializeFullTextInfo(b *fb.Builder, idx schema.Index) fb.UOffsetT {
return serial.FulltextInfoEnd(b)
}
func deserializeFullTextInfo(idx *serial.Index) schema.FullTextProperties {
func deserializeFullTextInfo(idx *serial.Index) (schema.FullTextProperties, error) {
fulltext := serial.FulltextInfo{}
if idx.FulltextInfo(&fulltext) == nil {
return schema.FullTextProperties{}
has, err := idx.TryFulltextInfo(&fulltext)
if err != nil {
return schema.FullTextProperties{}, err
}
if has == nil {
return schema.FullTextProperties{}, nil
}
var keyPositions []uint16
@@ -517,24 +553,27 @@ func deserializeFullTextInfo(idx *serial.Index) schema.FullTextProperties {
KeyType: fulltext.KeyType(),
KeyName: string(fulltext.KeyName()),
KeyPositions: keyPositions,
}
}, nil
}
func keylessSerialSchema(s *serial.TableSchema) bool {
func keylessSerialSchema(s *serial.TableSchema) (bool, error) {
n := s.ColumnsLength()
if n < 2 {
return false
return false, nil
}
// keyless id is the 2nd to last column
// in the columns vector (by convention)
// and the only field in key tuples of
// the clustered index.
id := serial.Column{}
s.Columns(&id, n-2)
_, err := s.TryColumns(&id, n-2)
if err != nil {
return false, err
}
ok := id.Generated() && id.Hidden() &&
string(id.Name()) == keylessIdCol
if !ok {
return false
return false, nil
}
// keyless cardinality is the last column
@@ -542,9 +581,12 @@ func keylessSerialSchema(s *serial.TableSchema) bool {
// and the first field in value tuples of
// the clustered index.
card := serial.Column{}
s.Columns(&card, n-1)
_, err = s.TryColumns(&card, n-1)
if err != nil {
return false, err
}
return card.Generated() && card.Hidden() &&
string(card.Name()) == keylessCardCol
string(card.Name()) == keylessCardCol, nil
}
func sqlTypeString(t typeinfo.TypeInfo) string {
+18 -5
View File
@@ -226,6 +226,7 @@ func (ix *indexImpl) PrimaryKeyTags() []uint64 {
// Schema implements Index.
func (ix *indexImpl) Schema() Schema {
contentHashedFields := make([]uint64, 0)
cols := make([]Column, len(ix.allTags))
for i, tag := range ix.allTags {
col := ix.indexColl.colColl.TagToCol[tag]
@@ -237,15 +238,27 @@ func (ix *indexImpl) Schema() Schema {
TypeInfo: col.TypeInfo,
Constraints: nil,
}
// contentHashedFields is the collection of column tags for columns in a unique index that do
// not have a prefix length specified and should be stored as a content hash. This information
// is needed to later identify that an index is using content-hashed encoding.
prefixLength := uint16(0)
if len(ix.PrefixLengths()) > i {
prefixLength = ix.PrefixLengths()[i]
}
if ix.IsUnique() && prefixLength == 0 {
contentHashedFields = append(contentHashedFields, tag)
}
}
allCols := NewColCollection(cols...)
nonPkCols := NewColCollection()
return &schemaImpl{
pkCols: allCols,
nonPKCols: nonPkCols,
allCols: allCols,
indexCollection: NewIndexCollection(nil, nil),
checkCollection: NewCheckCollection(),
pkCols: allCols,
nonPKCols: nonPkCols,
allCols: allCols,
indexCollection: NewIndexCollection(nil, nil),
checkCollection: NewCheckCollection(),
contentHashedFields: contentHashedFields,
}
}
+15 -3
View File
@@ -44,6 +44,7 @@ type schemaImpl struct {
checkCollection CheckCollection
pkOrdinals []int
collation Collation
contentHashedFields []uint64
}
var _ Schema = (*schemaImpl)(nil)
@@ -422,6 +423,11 @@ func (si *schemaImpl) getKeyColumnsDescriptor(convertAddressColumns bool) val.Tu
return val.KeylessTupleDesc
}
contentHashedFields := make(map[uint64]struct{})
for _, tag := range si.contentHashedFields {
contentHashedFields[tag] = struct{}{}
}
var tt []val.Type
useCollations := false // We only use collations if a string exists
var collations []sql.CollationID
@@ -429,17 +435,23 @@ func (si *schemaImpl) getKeyColumnsDescriptor(convertAddressColumns bool) val.Tu
sqlType := col.TypeInfo.ToSqlType()
queryType := sqlType.Type()
var t val.Type
if convertAddressColumns && queryType == query.Type_BLOB {
contentHashedField := false
if _, ok := contentHashedFields[tag]; ok {
contentHashedField = true
}
if convertAddressColumns && !contentHashedField && queryType == query.Type_BLOB {
t = val.Type{
Enc: val.Encoding(EncodingFromSqlType(query.Type_VARBINARY)),
Nullable: columnMissingNotNullConstraint(col),
}
} else if convertAddressColumns && queryType == query.Type_TEXT {
} else if convertAddressColumns && !contentHashedField && queryType == query.Type_TEXT {
t = val.Type{
Enc: val.Encoding(EncodingFromSqlType(query.Type_VARCHAR)),
Nullable: columnMissingNotNullConstraint(col),
}
} else if convertAddressColumns && queryType == query.Type_GEOMETRY {
} else if convertAddressColumns && !contentHashedField && queryType == query.Type_GEOMETRY {
t = val.Type{
Enc: val.Encoding(serial.EncodingCell),
Nullable: columnMissingNotNullConstraint(col),
@@ -216,7 +216,7 @@ func (p *branchControlReplication) Run() {
for _, r := range p.replicas {
r := r
wg.Add(1)
func() {
go func() {
defer wg.Done()
r.Run()
}()
@@ -233,7 +233,7 @@ func (p *replicatingMySQLDbPersister) Run() {
for _, r := range p.replicas {
r := r
wg.Add(1)
func() {
go func() {
defer wg.Done()
r.Run()
}()
+1 -1
View File
@@ -1130,7 +1130,7 @@ func (db Database) GetViewDefinition(ctx *sql.Context, viewName string) (sql.Vie
if err != nil {
return sql.ViewDefinition{}, false, err
}
return sql.ViewDefinition{Name: viewName, TextDefinition: blameViewTextDef, CreateViewStatement: fmt.Sprintf("CREATE VIEW %s AS %s", viewName, blameViewTextDef)}, true, nil
return sql.ViewDefinition{Name: viewName, TextDefinition: blameViewTextDef, CreateViewStatement: fmt.Sprintf("CREATE VIEW `%s` AS %s", viewName, blameViewTextDef)}, true, nil
}
key, err := doltdb.NewDataCacheKey(root)
@@ -71,9 +71,14 @@ func doDoltCheckout(ctx *sql.Context, args []string) (statusCode int, successMes
return 1, "", err
}
branchOrTrack := apr.Contains(cli.CheckoutCreateBranch) || apr.Contains(cli.TrackFlag)
newBranch, _, err := parseBranchArgs(apr)
if err != nil {
return 1, "", err
}
branchOrTrack := newBranch != "" || apr.Contains(cli.TrackFlag)
if apr.Contains(cli.TrackFlag) && apr.NArg() > 0 {
return 1, "", errors.New("Improper usage.")
return 1, "", errors.New("Improper usage. Too many arguments provided.")
}
if (branchOrTrack && apr.NArg() > 1) || (!branchOrTrack && apr.NArg() == 0) {
return 1, "", errors.New("Improper usage.")
@@ -90,7 +95,7 @@ func doDoltCheckout(ctx *sql.Context, args []string) (statusCode int, successMes
if err != nil {
return 1, "", err
}
if apr.Contains(cli.CheckoutCreateBranch) && readOnlyDatabase {
if newBranch != "" && readOnlyDatabase {
return 1, "", fmt.Errorf("unable to create new branch in a read-only database")
}
@@ -199,6 +204,30 @@ func doDoltCheckout(ctx *sql.Context, args []string) (statusCode int, successMes
return 0, successMessage, nil
}
// parseBranchArgs returns the name of the new branch and whether or not it should be created forcibly. This asserts
// that the provided branch name may not be empty, so an empty string is returned where no -b or -B flag is provided.
func parseBranchArgs(apr *argparser.ArgParseResults) (newBranch string, createBranchForcibly bool, err error) {
if apr.Contains(cli.CheckoutCreateBranch) && apr.Contains(cli.CreateResetBranch) {
return "", false, errors.New("Improper usage. Cannot use both -b and -B.")
}
if newBranch, ok := apr.GetValue(cli.CheckoutCreateBranch); ok {
if len(newBranch) == 0 {
return "", false, ErrEmptyBranchName
}
return newBranch, false, nil
}
if newBranch, ok := apr.GetValue(cli.CreateResetBranch); ok {
if len(newBranch) == 0 {
return "", false, ErrEmptyBranchName
}
return newBranch, true, nil
}
return "", false, nil
}
// isReadOnlyDatabase returns true if the named database is a read-only database. An error is returned
// if any issues are encountered while looking up the named database.
func isReadOnlyDatabase(ctx *sql.Context, dbName string) (bool, error) {
@@ -346,14 +375,20 @@ func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, apr *
newBranchName = remoteBranchName
}
if newBranch, ok := apr.GetValue(cli.CheckoutCreateBranch); ok {
if len(newBranch) == 0 {
return "", "", ErrEmptyBranchName
}
newBranchName = newBranch
// A little wonky behavior here. parseBranchArgs is actually called twice because in this procedure we pass around
// the parse results, but we also needed to parse the -b and -B flags in the main procedure. It ended up being
// a little cleaner to just call it again here than to pass the results around.
var createBranchForcibly bool
var optionBBranch string
optionBBranch, createBranchForcibly, err = parseBranchArgs(apr)
if err != nil {
return "", "", err
}
if optionBBranch != "" {
newBranchName = optionBBranch
}
err = actions.CreateBranchWithStartPt(ctx, dbData, newBranchName, startPt, false, rsc)
err = actions.CreateBranchWithStartPt(ctx, dbData, newBranchName, startPt, createBranchForcibly, rsc)
if err != nil {
return "", "", err
}
@@ -23,14 +23,10 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/cherry_pick"
)
var ErrEmptyCherryPick = errors.New("cannot cherry-pick empty string")
var ErrCherryPickUncommittedChanges = errors.New("cannot cherry-pick with uncommitted changes")
var cherryPickSchema = []*sql.Column{
{
@@ -83,29 +79,8 @@ func doDoltCherryPick(ctx *sql.Context, args []string) (string, int, int, int, e
return "", 0, 0, 0, err
}
dSess := dsess.DSessFromSess(ctx.Session)
if apr.Contains(cli.AbortParam) {
ws, err := dSess.WorkingSet(ctx, dbName)
if err != nil {
return "", 0, 0, 0, fmt.Errorf("fatal: unable to load working set: %v", err)
}
if !ws.MergeActive() {
return "", 0, 0, 0, fmt.Errorf("error: There is no cherry-pick merge to abort")
}
roots, ok := dSess.GetRoots(ctx, dbName)
if !ok {
return "", 0, 0, 0, fmt.Errorf("fatal: unable to load roots for %s", dbName)
}
newWs, err := abortMerge(ctx, ws, roots)
if err != nil {
return "", 0, 0, 0, fmt.Errorf("fatal: unable to abort merge: %v", err)
}
return "", 0, 0, 0, dSess.SetWorkingSet(ctx, dbName, newWs)
return "", 0, 0, 0, cherry_pick.AbortCherryPick(ctx, dbName)
}
// we only support cherry-picking a single commit for now.
@@ -120,182 +95,18 @@ func doDoltCherryPick(ctx *sql.Context, args []string) (string, int, int, int, e
return "", 0, 0, 0, ErrEmptyCherryPick
}
roots, ok := dSess.GetRoots(ctx, dbName)
if !ok {
return "", 0, 0, 0, sql.ErrDatabaseNotFound.New(dbName)
}
mergeResult, commitMsg, err := cherryPick(ctx, dSess, roots, dbName, cherryStr)
commit, mergeResult, err := cherry_pick.CherryPick(ctx, cherryStr, cherry_pick.CherryPickOptions{})
if err != nil {
return "", 0, 0, 0, err
}
newWorkingRoot := mergeResult.Root
err = dSess.SetRoot(ctx, dbName, newWorkingRoot)
if err != nil {
return "", 0, 0, 0, err
if mergeResult != nil {
return "",
mergeResult.CountOfTablesWithDataConflicts(),
mergeResult.CountOfTablesWithSchemaConflicts(),
mergeResult.CountOfTablesWithConstraintViolations(),
nil
}
err = stageCherryPickedTables(ctx, mergeResult.Stats)
if err != nil {
return "", 0, 0, 0, err
}
if mergeResult.HasMergeArtifacts() {
return "", mergeResult.CountOfTablesWithDataConflicts(),
mergeResult.CountOfTablesWithSchemaConflicts(), mergeResult.CountOfTablesWithConstraintViolations(), nil
} else {
commitHash, _, err := doDoltCommit(ctx, []string{"-m", commitMsg})
return commitHash, 0, 0, 0, err
}
}
// stageCherryPickedTables stages the tables from |mergeStats| that don't have any merge artifacts i.e.
// tables that don't have any data or schema conflicts and don't have any constraint violations.
func stageCherryPickedTables(ctx *sql.Context, mergeStats map[string]*merge.MergeStats) error {
tablesToAdd := make([]string, 0, len(mergeStats))
for tableName, mergeStats := range mergeStats {
if mergeStats.HasArtifacts() {
continue
}
// Find any tables being deleted and make sure we stage those tables first
if mergeStats.Operation == merge.TableRemoved {
tablesToAdd = append([]string{tableName}, tablesToAdd...)
} else {
tablesToAdd = append(tablesToAdd, tableName)
}
}
for _, tableName := range tablesToAdd {
res, err := doDoltAdd(ctx, []string{tableName})
if err != nil {
return err
}
if res != 0 {
return fmt.Errorf("dolt add failed")
}
}
return nil
}
// cherryPick checks that the current working set is clean, verifies the cherry-pick commit is not a merge commit
// or a commit without parent commit, performs merge and returns the new working set root value and
// the commit message of cherry-picked commit as the commit message of the new commit created during this command.
func cherryPick(ctx *sql.Context, dSess *dsess.DoltSession, roots doltdb.Roots, dbName, cherryStr string) (*merge.Result, string, error) {
// check for clean working set
wsOnlyHasIgnoredTables, err := diff.WorkingSetContainsOnlyIgnoredTables(ctx, roots)
if err != nil {
return nil, "", err
}
if !wsOnlyHasIgnoredTables {
return nil, "", ErrCherryPickUncommittedChanges
}
headRootHash, err := roots.Head.HashOf()
if err != nil {
return nil, "", err
}
workingRootHash, err := roots.Working.HashOf()
if err != nil {
return nil, "", err
}
doltDB, ok := dSess.GetDoltDB(ctx, dbName)
if !ok {
return nil, "", fmt.Errorf("failed to get DoltDB")
}
dbData, ok := dSess.GetDbData(ctx, dbName)
if !ok {
return nil, "", fmt.Errorf("failed to get dbData")
}
cherryCommitSpec, err := doltdb.NewCommitSpec(cherryStr)
if err != nil {
return nil, "", err
}
headRef, err := dbData.Rsr.CWBHeadRef()
if err != nil {
return nil, "", err
}
cherryCommit, err := doltDB.Resolve(ctx, cherryCommitSpec, headRef)
if err != nil {
return nil, "", err
}
if len(cherryCommit.DatasParents()) > 1 {
return nil, "", fmt.Errorf("cherry-picking a merge commit is not supported")
}
if len(cherryCommit.DatasParents()) == 0 {
return nil, "", fmt.Errorf("cherry-picking a commit without parents is not supported")
}
cherryRoot, err := cherryCommit.GetRootValue(ctx)
if err != nil {
return nil, "", err
}
// When cherry-picking, we need to use the parent of the cherry-picked commit as the ancestor. This
// ensures that only the delta from the cherry-pick commit is applied.
parentCommit, err := doltDB.ResolveParent(ctx, cherryCommit, 0)
if err != nil {
return nil, "", err
}
parentRoot, err := parentCommit.GetRootValue(ctx)
if err != nil {
return nil, "", err
}
dbState, ok, err := dSess.LookupDbState(ctx, dbName)
if err != nil {
return nil, "", err
} else if !ok {
return nil, "", sql.ErrDatabaseNotFound.New(dbName)
}
mo := merge.MergeOpts{
IsCherryPick: true,
KeepSchemaConflicts: false,
}
result, err := merge.MergeRoots(ctx, roots.Working, cherryRoot, parentRoot, cherryCommit, parentCommit, dbState.EditOpts(), mo)
if err != nil {
return nil, "", err
}
workingRootHash, err = result.Root.HashOf()
if err != nil {
return nil, "", err
}
if headRootHash.Equal(workingRootHash) {
return nil, "", fmt.Errorf("no changes were made, nothing to commit")
}
cherryCommitMeta, err := cherryCommit.GetCommitMeta(ctx)
if err != nil {
return nil, "", err
}
// If any of the merge stats show a data or schema conflict or a constraint
// violation, record that a merge is in progress.
for _, stats := range result.Stats {
if stats.HasArtifacts() {
ws, err := dSess.WorkingSet(ctx, dbName)
if err != nil {
return nil, "", err
}
newWorkingSet := ws.StartCherryPick(cherryCommit, cherryStr)
err = dSess.SetWorkingSet(ctx, dbName, newWorkingSet)
if err != nil {
return nil, "", err
}
break
}
}
return result, cherryCommitMeta.Description, nil
return commit, 0, 0, 0, nil
}
@@ -117,7 +117,7 @@ func doDoltMerge(ctx *sql.Context, args []string) (string, int, int, error) {
return "", noConflictsOrViolations, threeWayMerge, fmt.Errorf("fatal: There is no merge to abort")
}
ws, err = abortMerge(ctx, ws, roots)
ws, err = merge.AbortMerge(ctx, ws, roots)
if err != nil {
return "", noConflictsOrViolations, threeWayMerge, err
}
@@ -278,43 +278,6 @@ func performMerge(
return ws, commit, noConflictsOrViolations, threeWayMerge, nil
}
func abortMerge(ctx *sql.Context, workingSet *doltdb.WorkingSet, roots doltdb.Roots) (*doltdb.WorkingSet, error) {
tbls, err := doltdb.UnionTableNames(ctx, roots.Working, roots.Staged, roots.Head)
if err != nil {
return nil, err
}
roots, err = actions.MoveTablesFromHeadToWorking(ctx, roots, tbls)
if err != nil {
return nil, err
}
preMergeWorkingRoot := workingSet.MergeState().PreMergeWorkingRoot()
preMergeWorkingTables, err := preMergeWorkingRoot.GetTableNames(ctx)
if err != nil {
return nil, err
}
nonIgnoredTables, err := doltdb.ExcludeIgnoredTables(ctx, roots, preMergeWorkingTables)
if err != nil {
return nil, err
}
someTablesAreIgnored := len(nonIgnoredTables) != len(preMergeWorkingTables)
if someTablesAreIgnored {
newWorking, err := actions.MoveTablesBetweenRoots(ctx, nonIgnoredTables, preMergeWorkingRoot, roots.Working)
if err != nil {
return nil, err
}
workingSet = workingSet.WithWorkingRoot(newWorking)
} else {
workingSet = workingSet.WithWorkingRoot(preMergeWorkingRoot)
}
workingSet = workingSet.WithStagedRoot(workingSet.WorkingRoot())
workingSet = workingSet.ClearMerge()
return workingSet, nil
}
func executeMerge(
ctx *sql.Context,
sess *dsess.DoltSession,
@@ -41,7 +41,11 @@ const (
)
var ErrRetryTransaction = errors.New("this transaction conflicts with a committed transaction from another client")
var ErrUnresolvedConflictsCommit = errors.New("Merge conflict detected, transaction rolled back. Merge conflicts must be resolved using the dolt_conflicts tables before committing a transaction. To commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1")
var ErrUnresolvedConflictsCommit = errors.New("Merge conflict detected, transaction rolled back. Merge conflicts must be resolved using the dolt_conflicts and dolt_schema_conflicts tables before committing a transaction. To commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1")
var ErrUnresolvedConflictsAutoCommit = errors.New("Merge conflict detected, @autocommit transaction rolled back. @autocommit must be disabled so that merge conflicts can be resolved using the dolt_conflicts and dolt_schema_conflicts tables before manually committing the transaction. Alternatively, to commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1")
var ErrUnresolvedConstraintViolationsCommit = errors.New("Committing this transaction resulted in a working set with constraint violations, transaction rolled back. " +
"This constraint violation may be the result of a previous merge or the result of transaction sequencing. " +
"Constraint violations from a merge can be resolved using the dolt_constraint_violations table before committing the transaction. " +
@@ -610,7 +614,17 @@ func (tx *DoltTransaction) validateWorkingSetForCommit(ctx *sql.Context, working
return rollbackErr
}
return ErrUnresolvedConflictsCommit
// Return a different error message depending on if @autocommit is enabled or not, to help
// users understand what steps to take
autocommit, err := isSessionAutocommit(ctx)
if err != nil {
return err
}
if autocommit {
return ErrUnresolvedConflictsAutoCommit
} else {
return ErrUnresolvedConflictsCommit
}
}
}
@@ -798,3 +812,12 @@ func rootsEqual(left, right *doltdb.RootValue) bool {
func workingAndStagedEqual(left, right *doltdb.WorkingSet) bool {
return rootsEqual(left.WorkingRoot(), right.WorkingRoot()) && rootsEqual(left.StagedRoot(), right.StagedRoot())
}
// isSessionAutocommit returns true if @autocommit is enabled.
func isSessionAutocommit(ctx *sql.Context) (bool, error) {
autoCommitSessionVar, err := ctx.GetSessionVariable(ctx, sql.AutoCommitSessionVar)
if err != nil {
return false, err
}
return sql.ConvertToBool(ctx, autoCommitSessionVar)
}
@@ -22,6 +22,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
)
var errUnblameableTable = errors.New("unable to generate blame view for table without primary key")
@@ -41,7 +42,7 @@ const (
coalesce(to_commit_date, from_commit_date) DESC
) row_num
FROM
dolt_diff_%s -- tableName
` + "`dolt_diff_%s`" + ` -- tableName
)
SELECT
%s -- pksSelectExpression
@@ -107,10 +108,13 @@ func createDoltBlameViewExpression(tableName string, pks []schema.Column) (strin
pksOrderByExpression += ", "
}
allToPks += "to_" + pk.Name
pksPartitionByExpression += "coalesce(to_" + pk.Name + ", from_" + pk.Name + ")"
pksOrderByExpression += "sd.to_" + pk.Name + " ASC "
pksSelectExpression += "sd.to_" + pk.Name + " AS " + pk.Name + ", "
toPk := sqlfmt.QuoteIdentifier("to_" + pk.Name)
fromPk := sqlfmt.QuoteIdentifier("from_" + pk.Name)
allToPks += toPk
pksPartitionByExpression += fmt.Sprintf("coalesce(%s, %s)", toPk, fromPk)
pksOrderByExpression += fmt.Sprintf("sd.%s ASC ", toPk)
pksSelectExpression += fmt.Sprintf("sd.%s AS %s, ", toPk, sqlfmt.QuoteIdentifier(pk.Name))
}
return fmt.Sprintf(viewExpressionTemplate, allToPks, pksPartitionByExpression, tableName,
@@ -553,7 +553,12 @@ func calculateColDelta(ctx *sql.Context, ddb *doltdb.DoltDB, delta *diff.TableDe
toIdx := diffTableCols.TagToIdx[toColTag]
fromIdx := diffTableCols.TagToIdx[fromColTag]
if r[toIdx] != r[fromIdx] {
toCol := delta.ToSch.GetAllCols().GetByIndex(toIdx)
cmp, err := toCol.TypeInfo.ToSqlType().Compare(r[toIdx], r[fromIdx])
if err != nil {
return nil, nil, err
}
if cmp != 0 {
colNamesSet[col] = struct{}{}
}
}
@@ -162,7 +162,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
r := make(sql.Row, itr.sch.GetAllCols().Size()+3)
r[0] = art.SourceRootish.String()
r[1] = mapCVType(art.ArtType)
r[1] = merge.MapCVType(art.ArtType)
var meta prolly.ConstraintViolationMeta
err = json.Unmarshal(art.Metadata, &meta)
@@ -258,7 +258,7 @@ func (d *prollyCVDeleter) Delete(ctx *sql.Context, r sql.Row) error {
d.kb.PutCommitAddr(d.kd.Count()-2, h)
// Finally the artifact type
artType := unmapCVType(merge.CvType(r[1].(uint64)))
artType := merge.UnmapCVType(merge.CvType(r[1].(uint64)))
d.kb.PutUint8(d.kd.Count()-1, uint8(artType))
key := d.kb.Build(d.pool)
@@ -307,38 +307,6 @@ func (d *prollyCVDeleter) Close(ctx *sql.Context) error {
return d.cvt.rs.SetRoot(ctx, updatedRoot)
}
func mapCVType(artifactType prolly.ArtifactType) (outType uint64) {
switch artifactType {
case prolly.ArtifactTypeForeignKeyViol:
outType = uint64(merge.CvType_ForeignKey)
case prolly.ArtifactTypeUniqueKeyViol:
outType = uint64(merge.CvType_UniqueIndex)
case prolly.ArtifactTypeChkConsViol:
outType = uint64(merge.CvType_CheckConstraint)
case prolly.ArtifactTypeNullViol:
outType = uint64(merge.CvType_NotNull)
default:
panic("unhandled cv type")
}
return
}
func unmapCVType(in merge.CvType) (out prolly.ArtifactType) {
switch in {
case merge.CvType_ForeignKey:
out = prolly.ArtifactTypeForeignKeyViol
case merge.CvType_UniqueIndex:
out = prolly.ArtifactTypeUniqueKeyViol
case merge.CvType_CheckConstraint:
out = prolly.ArtifactTypeChkConsViol
case merge.CvType_NotNull:
out = prolly.ArtifactTypeNullViol
default:
panic("unhandled cv type")
}
return
}
func (itr prollyCVIter) Close(ctx *sql.Context) error {
return nil
}
@@ -558,7 +558,7 @@ var BranchPlanTests = []struct {
},
Queries: []indexQuery{
{
Query: "select * from t1 t1a join t1 t1b on t1a.b = t1b.b order by 1",
Query: "select /*+ LOOKUP_JOIN(t1a,t1b) */ * from t1 t1a join t1 t1b on t1a.b = t1b.b order by 1",
Index: true,
},
{
@@ -569,11 +569,11 @@ var BranchPlanTests = []struct {
Query: "use mydb/main",
},
{
Query: "select * from t1 t1a join t1 t1b on t1a.b = t1b.b order by 1",
Query: "select /*+ LOOKUP_JOIN(t1a,t1b) */ * from t1 t1a join t1 t1b on t1a.b = t1b.b order by 1",
Index: true,
},
{
Query: "select * from `mydb/b1`.t1 t1a join `mydb/b1`.t1 t1b on t1a.b = t1b.b order by 1",
Query: "select /*+ LOOKUP_JOIN(t1a,t1b) */ * from `mydb/b1`.t1 t1a join `mydb/b1`.t1 t1b on t1a.b = t1b.b order by 1",
Index: true,
},
},
@@ -89,7 +89,7 @@ func TestSingleQuery(t *testing.T) {
}
for _, q := range setupQueries {
enginetest.RunQuery(t, engine, harness, q)
enginetest.RunQueryWithContext(t, engine, harness, nil, q)
}
// engine.EngineAnalyzer().Debug = true
@@ -331,7 +331,7 @@ func TestSingleQueryPrepared(t *testing.T) {
}
for _, q := range setupQueries {
enginetest.RunQuery(t, engine, harness, q)
enginetest.RunQueryWithContext(t, engine, harness, nil, q)
}
//engine.Analyzer.Debug = true
@@ -2340,7 +2340,7 @@ func TestSystemTableIndexes(t *testing.T) {
ctx := enginetest.NewContext(harness)
for _, q := range stt.setup {
enginetest.RunQuery(t, e, harness, q)
enginetest.RunQueryWithContext(t, e, harness, ctx, q)
}
for i, c := range []string{"inner", "lookup", "hash", "merge"} {
@@ -2375,7 +2375,7 @@ func TestSystemTableIndexesPrepared(t *testing.T) {
ctx := enginetest.NewContext(harness)
for _, q := range stt.setup {
enginetest.RunQuery(t, e, harness, q)
enginetest.RunQueryWithContext(t, e, harness, ctx, q)
}
for _, tt := range stt.queries {
@@ -1004,8 +1004,7 @@ var DoltScripts = []queries.ScriptTest{
"CREATE TABLE t(pk varchar(20), val int)",
"ALTER TABLE t ADD PRIMARY KEY (pk, val)",
"INSERT INTO t VALUES ('zzz',4),('mult',1),('sub',2),('add',5)",
"CALL dolt_add('.');",
"CALL dolt_commit('-am', 'add rows');",
"CALL dolt_commit('-Am', 'add rows');",
"INSERT INTO t VALUES ('dolt',0),('alt',12),('del',8),('ctl',3)",
"CALL dolt_commit('-am', 'add more rows');",
},
@@ -1025,6 +1024,25 @@ var DoltScripts = []queries.ScriptTest{
},
},
},
{
Name: "blame: table and pk require identifier quoting",
SetUpScript: []string{
"create table `t-1` (`p-k` int primary key, col1 varchar(100));",
"insert into `t-1` values (1, 'one');",
"CALL dolt_commit('-Am', 'adding table t-1');",
"insert into `t-1` values (2, 'two');",
"CALL dolt_commit('-Am', 'adding another row to t-1');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT `p-k`, message FROM `dolt_blame_t-1`;",
Expected: []sql.Row{
{1, "adding table t-1"},
{2, "adding another row to t-1"},
},
},
},
},
{
Name: "Nautobot FOREIGN KEY panic repro",
SetUpScript: []string{
@@ -2387,6 +2405,101 @@ var DoltCheckoutScripts = []queries.ScriptTest{
},
},
},
{
Name: "dolt_checkout with new branch forcefully",
SetUpScript: []string{
"create table t (s varchar(5) primary key);",
"insert into t values ('foo');",
"call dolt_commit('-Am', 'commit main~2');", // will be main~2
"insert into t values ('bar');",
"call dolt_commit('-Am', 'commit main~1');", // will be main~1
"insert into t values ('baz');",
"call dolt_commit('-Am', 'commit main');", // will be main~1
"call dolt_branch('testbr', 'main~1');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_checkout('-B', 'testbr', 'main~2');",
SkipResultsCheck: true,
},
{
Query: "select active_branch();",
Expected: []sql.Row{{"testbr"}},
},
{
Query: "select * from t order by s;",
Expected: []sql.Row{{"foo"}},
},
{
Query: "call dolt_checkout('main');",
SkipResultsCheck: true,
},
{
Query: "select active_branch();",
Expected: []sql.Row{{"main"}},
},
{
Query: "select * from t order by s;",
Expected: []sql.Row{{"bar"}, {"baz"}, {"foo"}},
},
{
Query: "call dolt_checkout('-B', 'testbr', 'main~1');",
SkipResultsCheck: true,
},
{
Query: "select active_branch();",
Expected: []sql.Row{{"testbr"}},
},
{
Query: "select * from t order by s;",
Expected: []sql.Row{{"bar"}, {"foo"}},
},
},
},
{
Name: "dolt_checkout with new branch forcefully with dirty working set",
SetUpScript: []string{
"create table t (s varchar(5) primary key);",
"insert into t values ('foo');",
"call dolt_commit('-Am', 'commit main~2');", // will be main~2
"insert into t values ('bar');",
"call dolt_commit('-Am', 'commit main~1');", // will be main~1
"insert into t values ('baz');",
"call dolt_commit('-Am', 'commit main');", // will be main~1
"call dolt_checkout('-b', 'testbr', 'main~1');",
"insert into t values ('qux');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "select active_branch();",
Expected: []sql.Row{{"testbr"}},
},
{
Query: "select * from t order by s;",
Expected: []sql.Row{{"bar"}, {"foo"}, {"qux"}}, // Dirty working set
},
{
Query: "call dolt_checkout('main');",
SkipResultsCheck: true,
},
{
Query: "select * from t order by s;",
Expected: []sql.Row{{"bar"}, {"baz"}, {"foo"}},
},
{
Query: "call dolt_checkout('-B', 'testbr', 'main~1');",
SkipResultsCheck: true,
},
{
Query: "select active_branch();",
Expected: []sql.Row{{"testbr"}},
},
{
Query: "select * from t order by s;",
Expected: []sql.Row{{"bar"}, {"foo"}}, // Dirty working set was forcefully overwritten
},
},
},
{
Name: "dolt_checkout mixed with USE statements",
SetUpScript: []string{
@@ -2768,6 +2881,15 @@ var DoltCheckoutReadOnlyScripts = []queries.ScriptTest{
},
},
},
{
Name: "dolt checkout -B returns an error for read-only databases",
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_checkout('-B', 'newBranch');",
ExpectedErrStr: "unable to create new branch in a read-only database",
},
},
},
}
var DoltInfoSchemaScripts = []queries.ScriptTest{
@@ -4503,9 +4625,7 @@ var DoltReflogTestScripts = []queries.ScriptTest{
// Calling dolt_gc() invalidates the session, so we have to ask this assertion to create a new session
NewSession: true,
Query: "select ref, commit_hash, commit_message from dolt_reflog('main')",
Expected: []sql.Row{
{"refs/heads/main", doltCommit, "Initialize data repository"},
},
Expected: []sql.Row{},
},
},
},
@@ -4538,9 +4658,7 @@ var DoltReflogTestScripts = []queries.ScriptTest{
// Calling dolt_gc() invalidates the session, so we have to force this test to create a new session
NewSession: true,
Query: "select ref, commit_hash, commit_message from dolt_reflog('main')",
Expected: []sql.Row{
{"refs/heads/main", doltCommit, "inserting row 2"},
},
Expected: []sql.Row{},
},
},
},
@@ -4623,6 +4623,29 @@ var ColumnDiffSystemTableScriptTests = []queries.ScriptTest{
},
},
},
{
Name: "json column change",
SetUpScript: []string{
"create table t (pk int primary key, j json);",
`insert into t values (1, '{"test": 123}');`,
"call dolt_add('.')",
"call dolt_commit('-m', 'commit1');",
`update t set j = '{"nottest": 321}'`,
"call dolt_add('.')",
"call dolt_commit('-m', 'commit2');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "select column_name, diff_type from dolt_column_diff;",
Expected: []sql.Row{
{"j", "modified"},
{"pk", "added"},
{"j", "added"},
},
},
},
},
}
var CommitDiffSystemTableScriptTests = []queries.ScriptTest{
@@ -556,7 +556,7 @@ var MergeScripts = []queries.ScriptTest{
{
// errors because creating a new branch implicitly commits the current transaction
Query: "CALL DOLT_CHECKOUT('-b', 'other-branch')",
ExpectedErrStr: "Merge conflict detected, transaction rolled back. Merge conflicts must be resolved using the dolt_conflicts tables before committing a transaction. To commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1",
ExpectedErrStr: dsess.ErrUnresolvedConflictsCommit.Error(),
},
},
},
@@ -944,7 +944,7 @@ var MergeScripts = []queries.ScriptTest{
},
{
Query: "CALL DOLT_MERGE('feature-branch')",
ExpectedErrStr: dsess.ErrUnresolvedConflictsCommit.Error(),
ExpectedErrStr: dsess.ErrUnresolvedConflictsAutoCommit.Error(),
},
{
Query: "SELECT count(*) from dolt_conflicts_test", // transaction has been rolled back, 0 results
@@ -3486,7 +3486,7 @@ var SchemaConflictScripts = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('other')",
ExpectedErrStr: "Merge conflict detected, transaction rolled back. Merge conflicts must be resolved using the dolt_conflicts tables before committing a transaction. To commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1",
ExpectedErrStr: dsess.ErrUnresolvedConflictsAutoCommit.Error(),
},
{
Query: "select * from dolt_schema_conflicts",
@@ -635,36 +635,6 @@ var SchemaChangeTestsCollations = []MergeScriptTest{
},
},
},
{
// TODO: Changing a column's collation may require rewriting the table and any indexes on that column.
// For now, we just detect the schema incompatibility and return schema conflict metadata, but we could
// go further here and automatically convert the data to the new collation.
Name: "changing the collation of a column",
AncSetUpScript: []string{
"set @@autocommit=0;",
"create table t (pk int primary key, col1 varchar(32) character set utf8mb4 collate utf8mb4_bin, index col1_idx (col1));",
"insert into t values (1, 'ab'), (2, 'Ab');",
},
RightSetUpScript: []string{
"alter table t modify col1 varchar(32) character set utf8mb4 collate utf8mb4_general_ci;",
},
LeftSetUpScript: []string{
"insert into t values (3, 'c');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{"", 0, 1}},
},
{
Query: "select table_name, our_schema, their_schema, base_schema from dolt_schema_conflicts;",
Expected: []sql.Row{{"t",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(32) COLLATE utf8mb4_bin,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(32) COLLATE utf8mb4_general_ci,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;",
"CREATE TABLE `t` (\n `pk` int NOT NULL,\n `col1` varchar(32) COLLATE utf8mb4_bin,\n PRIMARY KEY (`pk`),\n KEY `col1_idx` (`col1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
},
},
},
}
var SchemaChangeTestsConstraints = []MergeScriptTest{
@@ -1228,7 +1228,7 @@ var DoltConflictHandlingTests = []queries.TransactionTest{
{
Query: "/* client b */ COMMIT;",
// Retrying did not help. But at-least the error makes sense.
ExpectedErrStr: "Merge conflict detected, transaction rolled back. Merge conflicts must be resolved using the dolt_conflicts tables before committing a transaction. To commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1",
ExpectedErrStr: dsess.ErrUnresolvedConflictsCommit.Error(),
},
},
},
@@ -1419,7 +1419,7 @@ var DoltStoredProcedureTransactionTests = []queries.TransactionTest{
},
{
Query: "/* client a */ CALL DOLT_MERGE('feature-branch')",
ExpectedErrStr: dsess.ErrUnresolvedConflictsCommit.Error(),
ExpectedErrStr: dsess.ErrUnresolvedConflictsAutoCommit.Error(),
},
{ // client rolled back on merge with conflicts
Query: "/* client a */ SELECT count(*) from dolt_conflicts_test",
@@ -842,11 +842,49 @@ func (di *doltIndex) HandledFilters(filters []sql.Expression) []sql.Expression {
return handled
}
// HasContentHashedField returns true if any of the fields in this index are "content-hashed", meaning that the index
// stores a hash of the content, instead of the content itself. This is currently limited to unique indexes, which can
// use this property to store hashes of TEXT or BLOB fields and still efficiently detect uniqueness.
func (di *doltIndex) HasContentHashedField() bool {
// content-hashed fields can currently only be used in unique indexes
if !di.IsUnique() {
return false
}
contentHashedField := false
indexPkCols := di.indexSch.GetPKCols()
indexPkCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
i := indexPkCols.TagToIdx[tag]
prefixLength := uint16(0)
if len(di.prefixLengths) > i {
prefixLength = di.prefixLengths[i]
}
if sqltypes.IsTextBlob(col.TypeInfo.ToSqlType()) && prefixLength == 0 {
contentHashedField = true
return true, nil
}
return false, nil
})
return contentHashedField
}
func (di *doltIndex) Order() sql.IndexOrder {
if di.HasContentHashedField() {
return sql.IndexOrderNone
}
return di.order
}
func (di *doltIndex) Reversible() bool {
if di.HasContentHashedField() {
return false
}
return di.doltBinFormat
}
+10 -5
View File
@@ -820,17 +820,17 @@ func (t *WritableDoltTable) truncate(
sch schema.Schema,
sess *dsess.DoltSession,
) (*doltdb.Table, error) {
empty, err := durable.NewEmptyIndex(ctx, table.ValueReadWriter(), table.NodeStore(), sch)
if err != nil {
return nil, err
}
idxSet, err := table.GetIndexSet(ctx)
if err != nil {
return nil, err
}
for _, idx := range sch.Indexes().AllIndexes() {
empty, err := durable.NewEmptyIndex(ctx, table.ValueReadWriter(), table.NodeStore(), idx.Schema())
if err != nil {
return nil, err
}
idxSet, err = idxSet.PutIndex(ctx, idx.Name(), empty)
if err != nil {
return nil, err
@@ -850,6 +850,11 @@ func (t *WritableDoltTable) truncate(
}
}
empty, err := durable.NewEmptyIndex(ctx, table.ValueReadWriter(), table.NodeStore(), sch)
if err != nil {
return nil, err
}
// truncate table resets auto-increment value
newEmptyTable, err := doltdb.NewTable(ctx, table.ValueReadWriter(), table.NodeStore(), sch, empty, idxSet, nil)
if err != nil {
@@ -22,6 +22,8 @@ import (
"github.com/bcicen/jstream"
"github.com/dolthub/go-mysql-server/sql"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
@@ -52,12 +54,17 @@ func OpenJSONReader(vrw types.ValueReadWriter, path string, fs filesys.ReadableF
return NewJSONReader(vrw, r, sch)
}
// The bytes of the supplied reader are treated as UTF-8. If there is a UTF8,
// UTF16LE or UTF16BE BOM at the first bytes read, then it is stripped and the
// remaining contents of the reader are treated as that encoding.
func NewJSONReader(vrw types.ValueReadWriter, r io.ReadCloser, sch schema.Schema) (*JSONReader, error) {
if sch == nil {
return nil, errors.New("schema must be provided to JsonReader")
}
decoder := jstream.NewDecoder(r, 2) // extract JSON values at a depth level of 1
textReader := transform.NewReader(r, unicode.BOMOverride(unicode.UTF8.NewDecoder()))
decoder := jstream.NewDecoder(textReader, 2) // extract JSON values at a depth level of 1
return &JSONReader{vrw: vrw, closer: r, sch: sch, jsonStream: decoder}, nil
}
@@ -15,6 +15,7 @@
package json
import (
"bytes"
"context"
"io"
"os"
@@ -24,6 +25,8 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
@@ -33,25 +36,7 @@ import (
"github.com/dolthub/dolt/go/store/types"
)
func TestReader(t *testing.T) {
testJSON := `{
"rows": [
{
"id": 0,
"first name": "tim",
"last name": "sehn"
},
{
"id": 1,
"first name": "brian",
"last name": "hendriks"
}
]
}`
fs := filesys.EmptyInMemFS("/")
require.NoError(t, fs.WriteFile("file.json", []byte(testJSON), os.ModePerm))
func testGoodJSON(t *testing.T, getReader func(types.ValueReadWriter, schema.Schema) (*JSONReader, error)) {
colColl := schema.NewColCollection(
schema.Column{
Name: "id",
@@ -83,7 +68,7 @@ func TestReader(t *testing.T) {
require.NoError(t, err)
vrw := types.NewMemoryValueStore()
reader, err := OpenJSONReader(vrw, "file.json", fs, sch)
reader, err := getReader(vrw, sch)
require.NoError(t, err)
verifySchema, err := reader.VerifySchema(sch)
@@ -109,6 +94,75 @@ func TestReader(t *testing.T) {
assert.Equal(t, enginetest.WidenRows(sqlSch.Schema, expectedRows), rows)
}
func TestReader(t *testing.T) {
testJSON := `{
"rows": [
{
"id": 0,
"first name": "tim",
"last name": "sehn"
},
{
"id": 1,
"first name": "brian",
"last name": "hendriks"
}
]
}`
fs := filesys.EmptyInMemFS("/")
require.NoError(t, fs.WriteFile("file.json", []byte(testJSON), os.ModePerm))
testGoodJSON(t, func(vrw types.ValueReadWriter, sch schema.Schema) (*JSONReader, error) {
return OpenJSONReader(vrw, "file.json", fs, sch)
})
}
func TestReaderBOMHandling(t *testing.T) {
testJSON := `{
"rows": [
{
"id": 0,
"first name": "tim",
"last name": "sehn"
},
{
"id": 1,
"first name": "brian",
"last name": "hendriks"
}
]
}`
t.Run("UTF-8", func(t *testing.T) {
bs := bytes.NewBuffer([]byte(testJSON))
reader := transform.NewReader(bs, unicode.UTF8.NewEncoder())
testGoodJSON(t, func(vrw types.ValueReadWriter, sch schema.Schema) (*JSONReader, error) {
return NewJSONReader(vrw, io.NopCloser(reader), sch)
})
})
t.Run("UTF-8 BOM", func(t *testing.T) {
bs := bytes.NewBuffer([]byte(testJSON))
reader := transform.NewReader(bs, unicode.UTF8BOM.NewEncoder())
testGoodJSON(t, func(vrw types.ValueReadWriter, sch schema.Schema) (*JSONReader, error) {
return NewJSONReader(vrw, io.NopCloser(reader), sch)
})
})
t.Run("UTF-16 LE BOM", func(t *testing.T) {
bs := bytes.NewBuffer([]byte(testJSON))
reader := transform.NewReader(bs, unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewEncoder())
testGoodJSON(t, func(vrw types.ValueReadWriter, sch schema.Schema) (*JSONReader, error) {
return NewJSONReader(vrw, io.NopCloser(reader), sch)
})
})
t.Run("UTF-16 BE BOM", func(t *testing.T) {
bs := bytes.NewBuffer([]byte(testJSON))
reader := transform.NewReader(bs, unicode.UTF16(unicode.BigEndian, unicode.UseBOM).NewEncoder())
testGoodJSON(t, func(vrw types.ValueReadWriter, sch schema.Schema) (*JSONReader, error) {
return NewJSONReader(vrw, io.NopCloser(reader), sch)
})
})
}
func TestReaderBadJson(t *testing.T) {
testJSON := ` {
"rows": [
@@ -27,6 +27,8 @@ import (
"unicode/utf8"
"github.com/dolthub/go-mysql-server/sql"
textunicode "golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
@@ -73,6 +75,14 @@ func OpenCSVReader(nbf *types.NomsBinFormat, path string, fs filesys.ReadableFS,
}
// NewCSVReader creates a CSVReader from a given ReadCloser. The CSVFileInfo should describe the csv file being read.
//
// The interpretation of the bytes of the supplied reader is a little murky. If
// there is a UTF8, UTF16LE or UTF16BE BOM as the first bytes read, then the
// BOM is stripped and the remaining contents of the reader are treated as that
// encoding. If we are not in any of those marked encodings, then some of the
// bytes go uninterpreted until we get to the SQL layer. It is currently the
// case that newlines must be encoded as a '0xa' byte and the delimiter must
// match |info.Delim|.
func NewCSVReader(nbf *types.NomsBinFormat, r io.ReadCloser, info *CSVFileInfo) (*CSVReader, error) {
if len(info.Delim) < 1 {
return nil, fmt.Errorf("delimiter '%s' has invalid length", info.Delim)
@@ -81,7 +91,9 @@ func NewCSVReader(nbf *types.NomsBinFormat, r io.ReadCloser, info *CSVFileInfo)
return nil, fmt.Errorf("invalid delimiter: %s", string(info.Delim))
}
br := bufio.NewReaderSize(r, ReadBufSize)
textReader := transform.NewReader(r, textunicode.BOMOverride(transform.Nop))
br := bufio.NewReaderSize(textReader, ReadBufSize)
colStrs, err := getColHeaders(br, info)
if err != nil {
@@ -102,18 +114,6 @@ func NewCSVReader(nbf *types.NomsBinFormat, r io.ReadCloser, info *CSVFileInfo)
}, nil
}
// trimBOM checks if the given string has the Byte Order Mark, and removes it if it is
// the BOM is there if the first 3 bytes are xEF\xBB\xBF and indicates that a file is in UTF-8 encoding
func trimBOM(s string) string {
if len(s) < 3 {
return s
}
if s[0] == '\xEF' && s[1] == '\xBB' && s[2] == '\xBF' {
return s[3:]
}
return s
}
func getColHeaders(br *bufio.Reader, info *CSVFileInfo) ([]string, error) {
colStrs := info.Columns
if info.HasHeaderLine {
@@ -124,7 +124,6 @@ func getColHeaders(br *bufio.Reader, info *CSVFileInfo) ([]string, error) {
} else if strings.TrimSpace(line) == "" {
return nil, errors.New("Header line is empty")
}
line = trimBOM(line)
colStrsFromFile, err := csvSplitLine(line, info.Delim, info.EscapeQuotes)
if err != nil {
@@ -20,6 +20,11 @@ import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped"
@@ -67,6 +72,13 @@ func mustRow(r row.Row, err error) row.Row {
return r
}
func mustEncodeBytes(t *testing.T, bs []byte, enc encoding.Encoding) []byte {
ret, n, err := transform.Bytes(enc.NewEncoder(), bs)
require.NoError(t, err)
require.Equal(t, n, len(bs))
return ret
}
func TestReader(t *testing.T) {
colNames := []string{"name", "age", "title"}
_, sch := untyped.NewUntypedSchema(colNames...)
@@ -82,33 +94,42 @@ func TestReader(t *testing.T) {
mustRow(untyped.NewRowFromStrings(types.Format_Default, sch, []string{"Jack Jackson", "27"})),
}
utf8bomBytes := mustEncodeBytes(t, []byte(PersonDB1), unicode.UTF8BOM)
require.Equal(t, utf8bomBytes[0:3], []byte{0xEF, 0xBB, 0xBF})
utf16leBytes := mustEncodeBytes(t, []byte(PersonDB1), unicode.UTF16(unicode.LittleEndian, unicode.UseBOM))
utf16beBytes := mustEncodeBytes(t, []byte(PersonDB1), unicode.UTF16(unicode.BigEndian, unicode.UseBOM))
tests := []struct {
inputStr string
input []byte
expectedRows []row.Row
info *CSVFileInfo
}{
{PersonDB1, goodExpectedRows, NewCSVInfo()},
{PersonDB2, goodExpectedRows, NewCSVInfo()},
{PersonDB3, goodExpectedRows, NewCSVInfo()},
{[]byte(PersonDB1), goodExpectedRows, NewCSVInfo()},
{[]byte(PersonDB2), goodExpectedRows, NewCSVInfo()},
{[]byte(PersonDB3), goodExpectedRows, NewCSVInfo()},
{PersonDBWithBadRow, badExpectedRows, NewCSVInfo()},
{PersonDBWithBadRow2, badExpectedRows, NewCSVInfo()},
{PersonDBWithBadRow3, badExpectedRows, NewCSVInfo()},
{utf8bomBytes, goodExpectedRows, NewCSVInfo()},
{utf16leBytes, goodExpectedRows, NewCSVInfo()},
{utf16beBytes, goodExpectedRows, NewCSVInfo()},
{[]byte(PersonDBWithBadRow), badExpectedRows, NewCSVInfo()},
{[]byte(PersonDBWithBadRow2), badExpectedRows, NewCSVInfo()},
{[]byte(PersonDBWithBadRow3), badExpectedRows, NewCSVInfo()},
{
PersonDBWithoutHeaders,
[]byte(PersonDBWithoutHeaders),
goodExpectedRows,
NewCSVInfo().SetHasHeaderLine(false).SetColumns(colNames),
},
{
PersonDBDifferentHeaders,
[]byte(PersonDBDifferentHeaders),
goodExpectedRows,
NewCSVInfo().SetHasHeaderLine(true).SetColumns(colNames),
},
}
for _, test := range tests {
rows, numBad, err := readTestRows(t, test.inputStr, test.info)
rows, numBad, err := readTestRows(t, test.input, test.info)
if err != nil {
t.Fatal("Unexpected Error:", err)
@@ -136,11 +157,11 @@ func TestReader(t *testing.T) {
}
}
func readTestRows(t *testing.T, inputStr string, info *CSVFileInfo) ([]row.Row, int, error) {
func readTestRows(t *testing.T, input []byte, info *CSVFileInfo) ([]row.Row, int, error) {
const root = "/"
const path = "/file.csv"
fs := filesys.NewInMemFS(nil, map[string][]byte{path: []byte(inputStr)}, root)
fs := filesys.NewInMemFS(nil, map[string][]byte{path: input}, root)
csvR, err := OpenCSVReader(types.Format_Default, path, fs, info)
defer csvR.Close(context.Background())
+1 -1
View File
@@ -199,7 +199,7 @@ func (c *Controller) Stop() {
return
} else if c.state != controllerState_stopping {
// We should only do this transition once. We signal to |Start|
// by cloing the |stopCh|.
// by closing the |stopCh|.
close(c.stopCh)
c.state = controllerState_stopping
c.mu.Unlock()
@@ -52,7 +52,14 @@ RUN git clone https://github.com/Percona-Lab/sysbench-tpcc.git
WORKDIR /mysql
RUN curl -L -O https://dev.mysql.com/get/mysql-apt-config_0.8.22-1_all.deb
RUN dpkg -i mysql-apt-config_0.8.22-1_all.deb
RUN apt-get update && apt-get install -y mysql-server
# On 2023-12-14, the GPG key this repository uses to publish these packages expired. We make it insecure and install it anyway for now.
# See https://bugs.mysql.com/bug.php?id=113427
# Hopefully we can remove this soon.
RUN sed -i.bak \
-e 's|^deb |deb [allow-insecure=true allow-weak=true allow-downgrade-to-insecure=true] |' \
-e 's|^deb-src |deb-src [allow-insecure=true allow-weak=true allow-downgrade-to-insecure=true] |' \
/etc/apt/sources.list.d/mysql.list
RUN apt-get update && apt-get install -y --allow-unauthenticated mysql-server
RUN mysql --version
# Install dolt
+11 -4
View File
@@ -343,8 +343,12 @@ type serialWorkingSetHead struct {
addr hash.Hash
}
func newSerialWorkingSetHead(bs []byte, addr hash.Hash) serialWorkingSetHead {
return serialWorkingSetHead{serial.GetRootAsWorkingSet(bs, serial.MessagePrefixSz), addr}
func newSerialWorkingSetHead(bs []byte, addr hash.Hash) (serialWorkingSetHead, error) {
fb, err := serial.TryGetRootAsWorkingSet(bs, serial.MessagePrefixSz)
if err != nil {
return serialWorkingSetHead{}, err
}
return serialWorkingSetHead{fb, addr}, nil
}
func (h serialWorkingSetHead) TypeName() string {
@@ -376,7 +380,10 @@ func (h serialWorkingSetHead) HeadWorkingSet() (*WorkingSetHead, error) {
ret.StagedAddr = new(hash.Hash)
*ret.StagedAddr = hash.New(h.msg.StagedRootAddrBytes())
}
mergeState := h.msg.MergeState(nil)
mergeState, err := h.msg.TryMergeState(nil)
if err != nil {
return nil, err
}
if mergeState != nil {
ret.MergeState = &MergeState{
preMergeWorkingAddr: new(hash.Hash),
@@ -503,7 +510,7 @@ func newHead(ctx context.Context, head types.Value, addr hash.Hash) (dsHead, err
return newSerialTagHead(data, addr)
}
if fid == serial.WorkingSetFileID {
return newSerialWorkingSetHead(data, addr), nil
return newSerialWorkingSetHead(data, addr)
}
if fid == serial.CommitFileID {
return newSerialCommitHead(sm, addr), nil
+2 -4
View File
@@ -403,11 +403,9 @@ func (j *ChunkJournal) UpdateGCGen(ctx context.Context, lastLock addr, next mani
}
}
// Truncate the in-memory root and root timestamp metadata to the most recent
// entry, and double check that it matches the root stored in the manifest.
// Truncate the in-memory root and root timestamp metadata
if !reflogDisabled {
j.reflogRingBuffer.TruncateToLastRecord()
// TODO: sanity check that j.reflogRingBuffer.Peek matches latest.root ?
j.reflogRingBuffer.Truncate()
}
return latest, nil
+3 -10
View File
@@ -118,18 +118,11 @@ func (rb *reflogRingBuffer) Iterate(f func(item reflogRootHashEntry) error) erro
return nil
}
// TruncateToLastRecord resets this ring buffer so that it only exposes the single, most recently written record.
// If this ring buffer has not already had any records pushed in it, then this is a no-op and the ring buffer
// remains with a zero item count.
func (rb *reflogRingBuffer) TruncateToLastRecord() {
// Truncate resets this ring buffer so that it is empty.
func (rb *reflogRingBuffer) Truncate() {
rb.mu.Lock()
defer rb.mu.Unlock()
if rb.itemCount == 0 {
return
}
rb.itemCount = 1
rb.itemCount = 0
}
// getIterationIndexes returns the start (inclusive) and end (exclusive) positions for iterating over the
+16 -16
View File
@@ -72,32 +72,32 @@ func TestIteration(t *testing.T) {
assertExpectedIterationOrder(t, buffer, []string{"tttt", "uuuu", "vvvv", "wwww", "xxxx"})
}
// TestTruncateToLastRecord asserts that the TruncateToLastRecord works correctly regardless of how much data
// TestTruncate asserts that the Truncate works correctly regardless of how much data
// is currently stored in the buffer.
func TestTruncateToLastRecord(t *testing.T) {
func TestTruncate(t *testing.T) {
buffer := newReflogRingBuffer(5)
// When the buffer is empty, TruncateToLastRecord is a no-op
buffer.TruncateToLastRecord()
// When the buffer is empty, Truncate is a no-op
buffer.Truncate()
assertExpectedIterationOrder(t, buffer, []string{})
buffer.TruncateToLastRecord()
buffer.Truncate()
assertExpectedIterationOrder(t, buffer, []string{})
// When the buffer contains only a single item, TruncateToLastRecord is a no-op
// When the buffer contains a single item
insertTestRecord(buffer, "aaaa")
buffer.TruncateToLastRecord()
assertExpectedIterationOrder(t, buffer, []string{"aaaa"})
buffer.TruncateToLastRecord()
assertExpectedIterationOrder(t, buffer, []string{"aaaa"})
buffer.Truncate()
assertExpectedIterationOrder(t, buffer, []string{})
buffer.Truncate()
assertExpectedIterationOrder(t, buffer, []string{})
// When the buffer is not full, TruncateToLastRecord reduces the buffer to the most recent logical record
// When the buffer is not full, Truncate empties the buffer
insertTestRecord(buffer, "bbbb")
insertTestRecord(buffer, "cccc")
insertTestRecord(buffer, "dddd")
buffer.TruncateToLastRecord()
assertExpectedIterationOrder(t, buffer, []string{"dddd"})
buffer.Truncate()
assertExpectedIterationOrder(t, buffer, []string{})
// When the buffer is full, TruncateToLastRecord reduces the buffer to the most recent logical record
// When the buffer is full, Truncate empties the buffer
insertTestRecord(buffer, "aaaa")
insertTestRecord(buffer, "bbbb")
insertTestRecord(buffer, "cccc")
@@ -111,8 +111,8 @@ func TestTruncateToLastRecord(t *testing.T) {
insertTestRecord(buffer, "kkkk")
insertTestRecord(buffer, "llll")
insertTestRecord(buffer, "mmmm")
buffer.TruncateToLastRecord()
assertExpectedIterationOrder(t, buffer, []string{"mmmm"})
buffer.Truncate()
assertExpectedIterationOrder(t, buffer, []string{})
}
// TestIterationConflict asserts that when iterating through a reflog ring buffer and new items are written to the
+8 -2
View File
@@ -100,12 +100,18 @@ func getCommitClosureSubtrees(msg serial.Message) ([]uint64, error) {
return nil, err
}
counts := make([]uint64, cnt)
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
m, err := serial.TryGetRootAsCommitClosure(msg, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
return decodeVarints(m.SubtreeCountsBytes(), counts), nil
}
func walkCommitClosureAddresses(ctx context.Context, msg serial.Message, cb func(ctx context.Context, addr hash.Hash) error) error {
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
m, err := serial.TryGetRootAsCommitClosure(msg, serial.MessagePrefixSz)
if err != nil {
return err
}
arr := m.AddressArrayBytes()
for i := 0; i < len(arr)/hash.ByteLen; i++ {
addr := hash.New(arr[i*addrSize : (i+1)*addrSize])
+8 -6
View File
@@ -42,12 +42,14 @@ type ThreeWayDiffer[K ~[]byte, O Ordering[K]] struct {
type resolveCb func(context.Context, val.Tuple, val.Tuple, val.Tuple) (val.Tuple, bool, error)
// ThreeWayDiffInfo stores contextual data that can influence the diff.
// If |LeftSchemaChange| is true, then the left side has a different schema from the base, and every row
// in both Left and Base should be considered a modification, even if they have the same bytes.
// If |RightSchemaChange| is true, then the right side has a different schema from the base, and every row
// in both Right and Base should be considered a modification, even if they have the same bytes.
// If |LeftAndRightSchemasDiffer| is true, then the left and right sides of the diff have a different schema,
// so there cannot be any convergent edits, even if two rows in Left and Right have the same bytes.
// If |LeftSchemaChange| is true, then the left side's bytes have a different interpretation from the base,
// so every row in both Left and Base should be considered a modification, even if they have the same bytes.
// If |RightSchemaChange| is true, then the right side's bytes have a different interpretation from the base,
// so every row in both Right and Base should be considered a modification, even if they have the same bytes.
// Note that these values aren't set for schema changes that have no effect on the meaning of the bytes,
// such as collation.
// If |LeftAndRightSchemasDiffer| is true, then the left and right sides of the diff have a different interpretation
// of their bytes, so there cannot be any convergent edits, even if two rows in Left and Right have the same bytes.
type ThreeWayDiffInfo struct {
LeftSchemaChange bool
RightSchemaChange bool
+17 -10
View File
@@ -65,22 +65,23 @@ func (sm SerialMessage) humanReadableStringAtIndentationLevel(level int) string
id := serial.GetFileID(sm)
switch id {
// NOTE: splunk uses a separate path for some printing
// NOTE: We ignore the errors from field number checks here...
case serial.StoreRootFileID:
msg := serial.GetRootAsStoreRoot([]byte(sm), serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsStoreRoot([]byte(sm), serial.MessagePrefixSz)
ret := &strings.Builder{}
mapbytes := msg.AddressMapBytes()
printWithIndendationLevel(level, ret, "StoreRoot{%s}",
SerialMessage(mapbytes).humanReadableStringAtIndentationLevel(level+1))
return ret.String()
case serial.StashListFileID:
msg := serial.GetRootAsStashList([]byte(sm), serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsStashList([]byte(sm), serial.MessagePrefixSz)
ret := &strings.Builder{}
mapbytes := msg.AddressMapBytes()
printWithIndendationLevel(level, ret, "StashList{%s}",
SerialMessage(mapbytes).humanReadableStringAtIndentationLevel(level+1))
return ret.String()
case serial.StashFileID:
msg := serial.GetRootAsStash(sm, serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsStash(sm, serial.MessagePrefixSz)
ret := &strings.Builder{}
printWithIndendationLevel(level, ret, "{\n")
printWithIndendationLevel(level, ret, "\tBranchName: %s\n", msg.BranchName())
@@ -90,7 +91,7 @@ func (sm SerialMessage) humanReadableStringAtIndentationLevel(level int) string
printWithIndendationLevel(level, ret, "}")
return ret.String()
case serial.TagFileID:
msg := serial.GetRootAsTag(sm, serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsTag(sm, serial.MessagePrefixSz)
ret := &strings.Builder{}
printWithIndendationLevel(level, ret, "{\n")
printWithIndendationLevel(level, ret, "\tName: %s\n", msg.Name())
@@ -101,7 +102,7 @@ func (sm SerialMessage) humanReadableStringAtIndentationLevel(level int) string
printWithIndendationLevel(level, ret, "}")
return ret.String()
case serial.WorkingSetFileID:
msg := serial.GetRootAsWorkingSet(sm, serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsWorkingSet(sm, serial.MessagePrefixSz)
ret := &strings.Builder{}
printWithIndendationLevel(level, ret, "{\n")
printWithIndendationLevel(level, ret, "\tName: %s\n", msg.Name())
@@ -113,7 +114,7 @@ func (sm SerialMessage) humanReadableStringAtIndentationLevel(level int) string
printWithIndendationLevel(level, ret, "}")
return ret.String()
case serial.CommitFileID:
msg := serial.GetRootAsCommit(sm, serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsCommit(sm, serial.MessagePrefixSz)
ret := &strings.Builder{}
printWithIndendationLevel(level, ret, "{\n")
printWithIndendationLevel(level, ret, "\tName: %s\n", msg.Name())
@@ -150,7 +151,7 @@ func (sm SerialMessage) humanReadableStringAtIndentationLevel(level int) string
printWithIndendationLevel(level, ret, "}")
return ret.String()
case serial.RootValueFileID:
msg := serial.GetRootAsRootValue(sm, serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsRootValue(sm, serial.MessagePrefixSz)
ret := &strings.Builder{}
printWithIndendationLevel(level, ret, "{\n")
printWithIndendationLevel(level, ret, "\tFeatureVersion: %d\n", msg.FeatureVersion())
@@ -160,7 +161,7 @@ func (sm SerialMessage) humanReadableStringAtIndentationLevel(level int) string
printWithIndendationLevel(level, ret, "}")
return ret.String()
case serial.TableFileID:
msg := serial.GetRootAsTable(sm, serial.MessagePrefixSz)
msg, _ := serial.TryGetRootAsTable(sm, serial.MessagePrefixSz)
ret := &strings.Builder{}
printWithIndendationLevel(level, ret, "{\n")
@@ -274,7 +275,10 @@ func (sm SerialMessage) WalkAddrs(nbf *NomsBinFormat, cb func(addr hash.Hash) er
return err
}
}
mergeState := msg.MergeState(nil)
mergeState, err := msg.TryMergeState(nil)
if err != nil {
return err
}
if mergeState != nil {
if err = cb(hash.New(mergeState.PreWorkingRootAddrBytes())); err != nil {
return err
@@ -310,7 +314,10 @@ func (sm SerialMessage) WalkAddrs(nbf *NomsBinFormat, cb func(addr hash.Hash) er
return err
}
confs := msg.Conflicts(nil)
confs, err := msg.TryConflicts(nil)
if err != nil {
return err
}
addr := hash.New(confs.DataBytes())
if !addr.IsEmpty() {
if err = cb(addr); err != nil {
+40
View File
@@ -314,6 +314,46 @@ SQL
[[ ! "$output" =~ "4" ]] || false
}
@test "checkout: -B flag will forcefully reset an existing branch" {
dolt sql -q 'create table test (id int primary key);'
dolt sql -q 'insert into test (id) values (89012);'
dolt commit -Am 'first change.'
dolt sql -q 'insert into test (id) values (76543);'
dolt commit -Am 'second change.'
dolt checkout -b testbr main~1
run dolt sql -q "select * from test;"
[[ "$output" =~ "89012" ]] || false
[[ ! "$output" =~ "76543" ]] || false
# make a change to the branch which we'll lose
dolt sql -q 'insert into test (id) values (19283);'
dolt commit -Am 'change to testbr.'
dolt checkout main
dolt checkout -B testbr main
run dolt sql -q "select * from test;"
[[ "$output" =~ "89012" ]] || false
[[ "$output" =~ "76543" ]] || false
[[ ! "$output" =~ "19283" ]] || false
}
@test "checkout: -B will create a branch that does not exist" {
dolt sql -q 'create table test (id int primary key);'
dolt sql -q 'insert into test (id) values (89012);'
dolt commit -Am 'first change.'
dolt sql -q 'insert into test (id) values (76543);'
dolt commit -Am 'second change.'
dolt checkout -B testbr main~1
run dolt sql -q "select * from test;"
[[ "$output" =~ "89012" ]] || false
[[ ! "$output" =~ "76543" ]] || false
}
@test "checkout: attempting to checkout a detached head shows a suggestion instead" {
dolt sql -q "create table test (id int primary key);"
dolt add .
Binary file not shown.
1 id title start date end date first name last name
2 0 ceo tim sehn
3 1 founder aaron son
4 2 founder brian hendriks
Binary file not shown.
1 id title start date end date first name last name
2 0 ceo tim sehn
3 1 founder aaron son
4 2 founder brian hendriks
@@ -0,0 +1,4 @@
id, title, start date, end date, first name, last name
0, "ceo", "", "", "tim", "sehn"
1, "founder", "", "", "aaron", "son"
2, "founder", "", "", "brian", "hendriks"
1 id title start date end date first name last name
2 0 ceo tim sehn
3 1 founder aaron son
4 2 founder brian hendriks
@@ -0,0 +1,28 @@
{
"rows": [
{
"id": 0,
"first name": "tim",
"last name": "sehn",
"title": "ceo",
"start date": "",
"end date": ""
},
{
"id": 1,
"first name": "aaron",
"last name": "son",
"title": "founder",
"start date": "",
"end date": ""
},
{
"id": 2,
"first name": "brian",
"last name": "hendricks",
"title": "founder",
"start date": "",
"end date": ""
}
]
}
@@ -118,3 +118,18 @@ CSV
[[ "$output" =~ "color: green" ]] || false
[[ "$output" =~ "Errors during import can be ignored using '--continue'" ]] || false
}
@test "import-append-tables: different schema warning lists differing columns" {
dolt sql -q "CREATE TABLE t (pk int primary key, col1 int);"
run dolt table import -a t <<CSV
pk, col2
1, 1
CSV
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "If unintentional, check for any typos in the import file's header" ]] || false
[[ "$output" =~ "Missing columns in t:" ]] || false
[[ "$output" =~ " col1" ]] || false
[[ "$output" =~ "Extra columns in import file:" ]] || false
[[ "$output" =~ " col2" ]] || false
}
@@ -60,23 +60,6 @@ teardown() {
teardown_common
}
@test "import-create-tables: correctly ignores byte order mark (BOM)" {
printf '\xEF\xBB\xBF' > bom.csv
cat <<DELIM >> bom.csv
c1,c2
1,2
DELIM
run dolt table import -c bom bom.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt sql -q "select c1 from bom"
[ "$status" -eq 0 ]
[[ "$output" =~ "1" ]] || false
}
@test "import-create-tables: create a table with json import" {
run dolt table import -c -s `batshelper employees-sch.sql` employees `batshelper employees-tbl.json`
[ "$status" -eq 0 ]
@@ -90,6 +73,46 @@ DELIM
[ "${#lines[@]}" -eq 7 ]
}
@test "import-create-tables: create a table with json import, utf8 with bom" {
run dolt table import -c -s `batshelper employees-sch.sql` employees `batshelper employees-tbl.utf8bom.json`
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt ls
[ "$status" -eq 0 ]
[[ "$output" =~ "employees" ]] || false
run dolt sql -q "select * from employees"
[ "$status" -eq 0 ]
[[ "$output" =~ "tim" ]] || false
[ "${#lines[@]}" -eq 7 ]
}
@test "import-create-tables: create a table with json import, utf16le with bom" {
run dolt table import -c -s `batshelper employees-sch.sql` employees `batshelper employees-tbl.utf16lebom.json`
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt ls
[ "$status" -eq 0 ]
[[ "$output" =~ "employees" ]] || false
run dolt sql -q "select * from employees"
[ "$status" -eq 0 ]
[[ "$output" =~ "tim" ]] || false
[ "${#lines[@]}" -eq 7 ]
}
@test "import-create-tables: create a table with json import, utf16be with bom" {
run dolt table import -c -s `batshelper employees-sch.sql` employees `batshelper employees-tbl.utf16bebom.json`
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt ls
[ "$status" -eq 0 ]
[[ "$output" =~ "employees" ]] || false
run dolt sql -q "select * from employees"
[ "$status" -eq 0 ]
[[ "$output" =~ "tim" ]] || false
[ "${#lines[@]}" -eq 7 ]
}
@test "import-create-tables: create a table with json import. no schema." {
run dolt table import -c employees `batshelper employees-tbl.json`
[ "$status" -ne 0 ]
@@ -130,7 +153,7 @@ DELIM
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false
# Sanity Check
! [[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
! [[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
run dolt sql -q "select * from test"
[ "$status" -eq 0 ]
@@ -696,7 +719,7 @@ DELIM
run dolt table import -s schema.sql -c subset data.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
# schema argument subsets the data and adds empty column
run dolt sql -r csv -q "select * from subset ORDER BY pk"
@@ -348,7 +348,7 @@ DELIM
run dolt table import -r test 1pk5col-ints-updt.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
@@ -375,8 +375,11 @@ DELIM
dolt sql -q "insert into subset values (1000, 100, 1000, 10000)"
run dolt table import -r subset data.csv
! [[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "If unintentional, check for any typos in the import file's header" ]] || false
[[ "$output" =~ "Extra columns in import file:" ]] || false
[[ "$output" =~ " c4" ]] || false
# schema argument subsets the data and adds empty column
run dolt sql -r csv -q "select * from subset ORDER BY pk"
@@ -385,6 +388,34 @@ DELIM
[ "${lines[1]}" = "0,1,2,3" ]
}
@test "import-replace-tables: different schema warning lists differing columns" {
cat <<SQL > schema.sql
CREATE TABLE t (
pk INT NOT NULL,
c1 INT,
c2 INT,
c3 INT,
PRIMARY KEY (pk)
);
SQL
cat <<DELIM > data.csv
pk,c4,c1,c3
0,4,1,3
DELIM
dolt sql < schema.sql
dolt sql -q "insert into t values (1000, 100, 1000, 10000)"
run dolt table import -r t data.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "If unintentional, check for any typos in the import file's header" ]] || false
[[ "$output" =~ "Missing columns in t:" ]] || false
[[ "$output" =~ " c2" ]] || false
[[ "$output" =~ "Extra columns in import file:" ]] || false
[[ "$output" =~ " c4" ]] || false
}
@test "import-replace-tables: Replace that breaks fk constraints correctly errors" {
dolt sql <<SQL
CREATE TABLE colors (
+121 -14
View File
@@ -119,7 +119,7 @@ teardown() {
[[ "$output" =~ "Rows Processed: 2, Additions: 2, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
# Sanity check
! [[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
! [[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
# Validate that a successful import with no bad rows does not print the following
! [[ "$output" =~ "The following rows were skipped:" ]] || false
@@ -270,6 +270,84 @@ SQL
[[ "${lines[6]}" =~ "end date" ]] || false
}
@test "import-update-tables: update table with a csv with columns in different order, utf8 with bom" {
dolt sql <<SQL
CREATE TABLE employees (
\`id\` varchar(20) NOT NULL COMMENT 'tag:0',
\`first name\` LONGTEXT COMMENT 'tag:1',
\`last name\` LONGTEXT COMMENT 'tag:2',
\`title\` LONGTEXT COMMENT 'tag:3',
\`start date\` LONGTEXT COMMENT 'tag:4',
\`end date\` LONGTEXT COMMENT 'tag:5',
PRIMARY KEY (id)
);
SQL
run dolt table import -u employees `batshelper employees-tbl-schema-unordered.utf8bom.csv`
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 3, Additions: 3, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt schema export employees
[[ "$status" -eq 0 ]] || false
[[ "${lines[1]}" =~ "id" ]] || false
[[ "${lines[2]}" =~ "first name" ]] || false
[[ "${lines[3]}" =~ "last name" ]] || false
[[ "${lines[4]}" =~ "title" ]] || false
[[ "${lines[5]}" =~ "start date" ]] || false
[[ "${lines[6]}" =~ "end date" ]] || false
}
@test "import-update-tables: update table with a csv with columns in different order, utf16le with bom" {
dolt sql <<SQL
CREATE TABLE employees (
\`id\` varchar(20) NOT NULL COMMENT 'tag:0',
\`first name\` LONGTEXT COMMENT 'tag:1',
\`last name\` LONGTEXT COMMENT 'tag:2',
\`title\` LONGTEXT COMMENT 'tag:3',
\`start date\` LONGTEXT COMMENT 'tag:4',
\`end date\` LONGTEXT COMMENT 'tag:5',
PRIMARY KEY (id)
);
SQL
run dolt table import -u employees `batshelper employees-tbl-schema-unordered.utf16lebom.csv`
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 3, Additions: 3, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt schema export employees
[[ "$status" -eq 0 ]] || false
[[ "${lines[1]}" =~ "id" ]] || false
[[ "${lines[2]}" =~ "first name" ]] || false
[[ "${lines[3]}" =~ "last name" ]] || false
[[ "${lines[4]}" =~ "title" ]] || false
[[ "${lines[5]}" =~ "start date" ]] || false
[[ "${lines[6]}" =~ "end date" ]] || false
}
@test "import-update-tables: update table with a csv with columns in different order, utf16be with bom" {
dolt sql <<SQL
CREATE TABLE employees (
\`id\` varchar(20) NOT NULL COMMENT 'tag:0',
\`first name\` LONGTEXT COMMENT 'tag:1',
\`last name\` LONGTEXT COMMENT 'tag:2',
\`title\` LONGTEXT COMMENT 'tag:3',
\`start date\` LONGTEXT COMMENT 'tag:4',
\`end date\` LONGTEXT COMMENT 'tag:5',
PRIMARY KEY (id)
);
SQL
run dolt table import -u employees `batshelper employees-tbl-schema-unordered.utf16bebom.csv`
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 3, Additions: 3, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
run dolt schema export employees
[[ "$status" -eq 0 ]] || false
[[ "${lines[1]}" =~ "id" ]] || false
[[ "${lines[2]}" =~ "first name" ]] || false
[[ "${lines[3]}" =~ "last name" ]] || false
[[ "${lines[4]}" =~ "title" ]] || false
[[ "${lines[5]}" =~ "start date" ]] || false
[[ "${lines[6]}" =~ "end date" ]] || false
}
@test "import-update-tables: updating table by inputting string longer than char column throws an error" {
cat <<DELIM > 1pk1col-rpt-chars.csv
pk,c
@@ -567,7 +645,7 @@ DELIM
run dolt table import -u test 1pk5col-ints-updt.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
@@ -578,7 +656,7 @@ DELIM
[[ "$output" =~ "1" ]] || false
}
@test "import-update-tables: csv files has less columns that schema -u" {
@test "import-update-tables: csv file has less columns than schema -u" {
cat <<DELIM > 1pk5col-ints-updt.csv
pk,c1,c2,c5,c3
0,1,2,6,3
@@ -588,7 +666,7 @@ DELIM
run dolt table import -u test 1pk5col-ints-updt.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
@@ -611,7 +689,7 @@ DELIM
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
! [[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
! [[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
run dolt sql -r csv -q "select * from test"
[ "${lines[1]}" = "0,1,2,3,4,6" ]
@@ -630,9 +708,12 @@ DELIM
run dolt table import -u test 1pk5col-ints-updt.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "If unintentional, check for any typos in the import file's header" ]] || false
[[ "$output" =~ "Extra columns in import file:" ]] || false
[[ "$output" =~ " c7" ]] || false
[[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
! [[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
run dolt sql -r csv -q "select * from test"
[ "${lines[1]}" = "0,1,2,3,4,6" ]
@@ -641,6 +722,32 @@ DELIM
[[ "$output" =~ "1" ]] || false
}
@test "import-update-tables: different schema warning lists differing columns" {
cat <<DELIM > 1pk5col-ints-updt.csv
pk,c4,c5,c1,c3,c7
0,4,6,1,3,100
DELIM
dolt sql < 1pk5col-ints-sch.sql
run dolt table import -u test 1pk5col-ints-updt.csv
[ "$status" -eq 0 ]
[[ "${lines[0]}" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "${lines[1]}" =~ "If unintentional, check for any typos in the import file's header" ]] || false
[[ "${lines[2]}" =~ "Missing columns in test:" ]] || false
[[ "${lines[3]}" =~ " c2" ]] || false
[[ "${lines[4]}" =~ "Extra columns in import file:" ]] || false
[[ "${lines[5]}" =~ " c7" ]] || false
[[ "${lines[6]}" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "${lines[7]}" =~ "Import completed successfully." ]] || false
run dolt sql -r csv -q "select * from test"
[ "${lines[1]}" = "0,1,,3,4,6" ]
run dolt sql -q "select count(*) from test"
[[ "$output" =~ "1" ]] || false
}
@test "import-update-tables: just update one column in a big table" {
cat <<DELIM > 1pk5col-ints-updt.csv
pk,c2
@@ -653,7 +760,7 @@ DELIM
run dolt table import -u test 1pk5col-ints-updt.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "Rows Processed: 1, Additions: 0, Modifications: 1, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
@@ -693,7 +800,7 @@ DELIM
run dolt table import -u keyless data.csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: There are fewer columns in the import file's schema than the table's schema" ]] || false
[[ "$output" =~ "Warning: The import file's schema does not match the table's schema" ]] || false
[[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
[[ "$output" =~ "Import completed successfully." ]] || false
@@ -1224,12 +1331,12 @@ DELIM
# Add a continue statement
run dolt table import -u --continue test bad-updates.csv
[ "$status" -eq 0 ]
[[ "${lines[2]}" =~ "The following rows were skipped:" ]] || false
[[ "${lines[3]}" =~ '[5,7,5]' ]] || false
[[ "${lines[4]}" =~ '[6,5,5]' ]] || false
[[ "${lines[5]}" =~ "Rows Processed: 0, Additions: 0, Modifications: 0, Had No Effect: 0" ]] || false
[[ "${lines[6]}" =~ "Lines skipped: 2" ]] || false
[[ "${lines[7]}" =~ "Import completed successfully." ]] || false
[[ "${lines[4]}" =~ "The following rows were skipped:" ]] || false
[[ "${lines[5]}" =~ '[5,7,5]' ]] || false
[[ "${lines[6]}" =~ '[6,5,5]' ]] || false
[[ "${lines[7]}" =~ "Rows Processed: 0, Additions: 0, Modifications: 0, Had No Effect: 0" ]] || false
[[ "${lines[8]}" =~ "Lines skipped: 2" ]] || false
[[ "${lines[9]}" =~ "Import completed successfully." ]] || false
}
@test "import-update-tables: test error when import bad csv with nulls" {
+25 -1
View File
@@ -59,7 +59,7 @@ teardown() {
[[ "$output" =~ "gc - Cleans up unreferenced data from the repository." ]] || false
[[ "$output" =~ "filter-branch - Edits the commit history using the provided query." ]] || false
[[ "$output" =~ "merge-base - Find the common ancestor of two commits." ]] || false
[[ "$output" =~ "version - Displays the current Dolt cli version." ]] || false
[[ "$output" =~ "version - Displays the version for the Dolt binary." ]] || false
[[ "$output" =~ "dump - Export all tables in the working set into a file." ]] || false
}
@@ -140,6 +140,30 @@ teardown() {
[ "$status" -eq 0 ]
}
@test "no-repo: dolt version prints out of date warning" {
echo "2.0.0" > $DOLT_ROOT_PATH/.dolt/version_check.txt
run dolt version
[ "$status" -eq 0 ]
[[ "$output" =~ "Warning: you are on an old version of Dolt" ]] || false
}
@test "no-repo: dolt version ahead of saved version does not print warning" {
echo "1.27.0" > $DOLT_ROOT_PATH/.dolt/version_check.txt
run dolt version
[ "$status" -eq 0 ]
[[ ! "$output" =~ "Warning: you are on an old version of Dolt" ]] || false
}
@test "no-repo: dolt version with bad version_check.txt does not print error" {
echo "bad version" > $DOLT_ROOT_PATH/.dolt/version_check.txt
run dolt version
[ "$status" -eq 0 ]
[[ ! "$output" =~ "failed to parse version number" ]] || false
}
# Tests for dolt commands outside of a dolt repository
NOT_VALID_REPO_ERROR="The current directory is not a valid dolt repository."
@test "no-repo: dolt status outside of a dolt repository" {
+3
View File
@@ -1,6 +1,8 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
# NOTE: These are currently disabled because the high variance in GitHub CI makes them unreliable.
# This BATS test attempts to detect performance regressions when using standard workflows on large datasets.
# Please note that this is a rough approach that is not designed to detect all performance issues, merely an extra
# safeguard against bugs that cause large (order-of-magnitude+) regressions.
@@ -31,6 +33,7 @@ create_repo() {
}
setup() {
skip
cp -r $BATS_TEST_DIRNAME/performance-repo/ $BATS_TMPDIR/dolt-repo-$$
cd $BATS_TMPDIR/dolt-repo-$$
}
+2 -12
View File
@@ -190,12 +190,7 @@ SQL
run dolt reflog
[ "$status" -eq 0 ]
if [ "$SQL_ENGINE" = "remote-engine" ]; then
[ "${#lines[@]}" -eq 1 ]
[[ "$output" =~ "Initialize data repository" ]] || false
else
[ "${#lines[@]}" -eq 0 ]
fi
[ "${#lines[@]}" -eq 0 ]
}
@test "reflog: garbage collection with newgen data" {
@@ -224,12 +219,7 @@ SQL
run dolt reflog main
[ "$status" -eq 0 ]
if [ "$SQL_ENGINE" = "remote-engine" ]; then
[ "${#lines[@]}" -eq 1 ]
[[ "$output" =~ "inserting row 2" ]] || false
else
[ "${#lines[@]}" -eq 0 ]
fi
[ "${#lines[@]}" -eq 0 ]
}
@test "reflog: too many arguments given" {
+2 -2
View File
@@ -340,7 +340,7 @@ call dolt_checkout('main');
call dolt_merge('feature-branch');
SQL
log_status_eq 1
[[ $output =~ "Merge conflict detected, transaction rolled back. Merge conflicts must be resolved using the dolt_conflicts tables before committing a transaction. To commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1" ]] || false
[[ $output =~ "Merge conflict detected, @autocommit transaction rolled back. @autocommit must be disabled so that merge conflicts can be resolved using the dolt_conflicts and dolt_schema_conflicts tables before manually committing the transaction. Alternatively, to commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1" ]] || false
run dolt status
log_status_eq 0
@@ -391,7 +391,7 @@ call dolt_checkout('main');
call dolt_merge('feature-branch');
SQL
log_status_eq 1
[[ $output =~ "Merge conflict detected, transaction rolled back. Merge conflicts must be resolved using the dolt_conflicts tables before committing a transaction. To commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1" ]] || false
[[ $output =~ "Merge conflict detected, @autocommit transaction rolled back. @autocommit must be disabled so that merge conflicts can be resolved using the dolt_conflicts and dolt_schema_conflicts tables before manually committing the transaction. Alternatively, to commit transactions with merge conflicts, set @@dolt_allow_commit_conflicts = 1" ]] || false
# back on the command line, our session state is clean
run dolt status
@@ -345,3 +345,108 @@ tests:
result:
columns: ["Level", "Code", "Message"]
rows: []
- name: users and grants and branch control replicate to multiple standbys
multi_repos:
- name: server1
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby1
remote_url_template: http://localhost:3852/{database}
- name: standby2
remote_url_template: http://localhost:3853/{database}
bootstrap_role: primary
bootstrap_epoch: 1
remotesapi:
port: 3851
server:
args: ["--config", "server.yaml"]
port: 3309
- name: server2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3310
cluster:
standby_remotes:
- name: standby1
remote_url_template: http://localhost:3851/{database}
- name: standby2
remote_url_template: http://localhost:3853/{database}
bootstrap_role: standby
bootstrap_epoch: 1
remotesapi:
port: 3852
server:
args: ["--config", "server.yaml"]
port: 3310
- name: server3
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3311
cluster:
standby_remotes:
- name: standby1
remote_url_template: http://localhost:3851/{database}
- name: standby2
remote_url_template: http://localhost:3852/{database}
bootstrap_role: standby
bootstrap_epoch: 1
remotesapi:
port: 3853
server:
args: ["--config", "server.yaml"]
port: 3311
connections:
- on: server1
queries:
- exec: 'SET @@PERSIST.dolt_cluster_ack_writes_timeout_secs = 10'
- exec: 'create database repo1'
- exec: "use repo1"
- exec: 'create table vals (i int primary key)'
- exec: 'insert into vals values (0),(1),(2),(3),(4)'
- exec: 'create user "aaron"@"%" IDENTIFIED BY "aaronspassword"'
- exec: 'grant ALL ON *.* to "aaron"@"%"'
- exec: 'create user "brian"@"%" IDENTIFIED BY "brianpassword"'
- exec: 'grant ALL ON *.* to "brian"@"%"'
- exec: 'delete from dolt_branch_control'
- exec: 'insert into dolt_branch_control values ("repo1", "main", "aaron", "%", "admin")'
- on: server2
user: 'aaron'
password: 'aaronspassword'
queries:
- exec: "use repo1"
- query: 'select count(*) from vals'
result:
columns: ["count(*)"]
rows: [["5"]]
- query: 'select count(*) from dolt_branch_control'
result:
columns: ["count(*)"]
rows: [["1"]]
- on: server3
user: 'aaron'
password: 'aaronspassword'
queries:
- exec: "use repo1"
- query: 'select count(*) from vals'
result:
columns: ["count(*)"]
rows: [["5"]]
- query: 'select count(*) from dolt_branch_control'
result:
columns: ["count(*)"]
rows: [["1"]]
@@ -197,7 +197,7 @@ export const diffTests = [
to_name: "myview",
to_fragment: "CREATE VIEW `myview` AS SELECT * FROM test",
to_extra: { CreatedAt: 0 },
to_sql_mode: 'STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,ONLY_FULL_GROUP_BY',
to_sql_mode: 'NO_ENGINE_SUBSTITUTION,ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES',
to_commit: "WORKING",
to_commit_date: "2023-03-09T07:56:29.035Z",
from_type: null,
@@ -34,7 +34,7 @@ export const viewsTests = [
name: "myview",
fragment: "CREATE VIEW `myview` AS SELECT * FROM test",
extra: { CreatedAt: 0 },
sql_mode: 'STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,ONLY_FULL_GROUP_BY',
sql_mode: 'NO_ENGINE_SUBSTITUTION,ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES',
},
],
},