Merge branch 'main' into james/conflicts

This commit is contained in:
James Cor
2022-09-26 10:03:11 -07:00
55 changed files with 2307 additions and 369 deletions

View File

@@ -26,7 +26,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
"github.com/dolthub/dolt/go/libraries/events"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/earl"
@@ -203,15 +202,8 @@ func createRemote(ctx context.Context, remoteName, remoteUrl string, params map[
r := env.NewRemote(remoteName, remoteUrl, params)
ddb, err := r.GetRemoteDB(ctx, types.Format_Default, dEnv)
if err != nil {
bdr := errhand.BuildDError("error: failed to get remote db").AddCause(err)
if err == remotestorage.ErrInvalidDoltSpecPath {
urlObj, _ := earl.Parse(remoteUrl)
bdr.AddDetails("'%s' should be in the format 'organization/repo'", urlObj.Path)
}
return env.NoRemote, nil, bdr.Build()
}

View File

@@ -193,7 +193,7 @@ func performCommit(ctx context.Context, commandStr string, args []string, dEnv *
mergeParentCommits = parentsHeadForAmend
}
pendingCommit, err := actions.GetCommitStaged(ctx, roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData(), actions.CommitStagedProps{
pendingCommit, err := actions.GetCommitStaged(ctx, roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData().Ddb, actions.CommitStagedProps{
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag) || apr.Contains(cli.AmendFlag),

View File

@@ -391,8 +391,7 @@ func diffUserTable(
}
if dArgs.diffParts&Summary != 0 {
numCols := fromSch.GetAllCols().Size()
return printDiffSummary(ctx, td, numCols)
return printDiffSummary(ctx, td, fromSch.GetAllCols().Size(), toSch.GetAllCols().Size())
}
if dArgs.diffParts&SchemaOnlyDiff != 0 {

View File

@@ -65,7 +65,7 @@ func newDiffWriter(diffOutput diffOutput) (diffWriter, error) {
}
}
func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errhand.VerboseError {
func printDiffSummary(ctx context.Context, td diff.TableDelta, oldColLen, newColLen int) errhand.VerboseError {
// todo: use errgroup.Group
ae := atomicerr.New()
ch := make(chan diff.DiffSummaryProgress)
@@ -89,11 +89,13 @@ func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errha
acc.Removes += p.Removes
acc.Changes += p.Changes
acc.CellChanges += p.CellChanges
acc.NewSize += p.NewSize
acc.OldSize += p.OldSize
acc.NewRowSize += p.NewRowSize
acc.OldRowSize += p.OldRowSize
acc.NewCellSize += p.NewCellSize
acc.OldCellSize += p.OldCellSize
if count%10000 == 0 {
eP.Printf("prev size: %d, new size: %d, adds: %d, deletes: %d, modifications: %d\n", acc.OldSize, acc.NewSize, acc.Adds, acc.Removes, acc.Changes)
eP.Printf("prev size: %d, new size: %d, adds: %d, deletes: %d, modifications: %d\n", acc.OldRowSize, acc.NewRowSize, acc.Adds, acc.Removes, acc.Changes)
eP.Display()
}
@@ -108,10 +110,10 @@ func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errha
keyless, err := td.IsKeyless(ctx)
if err != nil {
return nil
return errhand.BuildDError("").AddCause(err).Build()
}
if (acc.Adds + acc.Removes + acc.Changes) == 0 {
if (acc.Adds+acc.Removes+acc.Changes) == 0 && (acc.OldCellSize-acc.NewCellSize) == 0 {
cli.Println("No data changes. See schema changes by using -s or --schema.")
return nil
}
@@ -119,24 +121,27 @@ func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errha
if keyless {
printKeylessSummary(acc)
} else {
printSummary(acc, colLen)
printSummary(acc, oldColLen, newColLen)
}
return nil
}
func printSummary(acc diff.DiffSummaryProgress, colLen int) {
rowsUnmodified := uint64(acc.OldSize - acc.Changes - acc.Removes)
func printSummary(acc diff.DiffSummaryProgress, oldColLen, newColLen int) {
numCellInserts, numCellDeletes := sqle.GetCellsAddedAndDeleted(acc, newColLen)
rowsUnmodified := uint64(acc.OldRowSize - acc.Changes - acc.Removes)
unmodified := pluralize("Row Unmodified", "Rows Unmodified", rowsUnmodified)
insertions := pluralize("Row Added", "Rows Added", acc.Adds)
deletions := pluralize("Row Deleted", "Rows Deleted", acc.Removes)
changes := pluralize("Row Modified", "Rows Modified", acc.Changes)
cellInsertions := pluralize("Cell Added", "Cells Added", numCellInserts)
cellDeletions := pluralize("Cell Deleted", "Cells Deleted", numCellDeletes)
cellChanges := pluralize("Cell Modified", "Cells Modified", acc.CellChanges)
oldValues := pluralize("Entry", "Entries", acc.OldSize)
newValues := pluralize("Entry", "Entries", acc.NewSize)
oldValues := pluralize("Row Entry", "Row Entries", acc.OldRowSize)
newValues := pluralize("Row Entry", "Row Entries", acc.NewRowSize)
percentCellsChanged := float64(100*acc.CellChanges) / (float64(acc.OldSize) * float64(colLen))
percentCellsChanged := float64(100*acc.CellChanges) / (float64(acc.OldRowSize) * float64(oldColLen))
safePercent := func(num, dom uint64) float64 {
// returns +Inf for x/0 where x > 0
@@ -146,10 +151,12 @@ func printSummary(acc diff.DiffSummaryProgress, colLen int) {
return float64(100*num) / (float64(dom))
}
cli.Printf("%s (%.2f%%)\n", unmodified, safePercent(rowsUnmodified, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", insertions, safePercent(acc.Adds, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", deletions, safePercent(acc.Removes, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", changes, safePercent(acc.Changes, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", unmodified, safePercent(rowsUnmodified, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", insertions, safePercent(acc.Adds, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", deletions, safePercent(acc.Removes, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", changes, safePercent(acc.Changes, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", cellInsertions, safePercent(numCellInserts, acc.OldCellSize))
cli.Printf("%s (%.2f%%)\n", cellDeletions, safePercent(numCellDeletes, acc.OldCellSize))
cli.Printf("%s (%.2f%%)\n", cellChanges, percentCellsChanged)
cli.Printf("(%s vs %s)\n\n", oldValues, newValues)
}

View File

@@ -33,6 +33,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/mysql_file_handler"
"github.com/dolthub/dolt/go/libraries/utils/config"
@@ -48,17 +49,18 @@ type SqlEngine struct {
}
type SqlEngineConfig struct {
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
PrivFilePath string
ServerUser string
ServerPass string
ServerHost string
Autocommit bool
Bulk bool
JwksConfig []JwksConfig
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
PrivFilePath string
ServerUser string
ServerPass string
ServerHost string
Autocommit bool
Bulk bool
JwksConfig []JwksConfig
ClusterController *cluster.Controller
}
// NewSqlEngine returns a SqlEngine
@@ -97,6 +99,11 @@ func NewSqlEngine(
}
pro = pro.WithRemoteDialer(mrEnv.RemoteDialProvider())
if config.ClusterController != nil {
config.ClusterController.ManageSystemVariables(sql.SystemVariables)
config.ClusterController.RegisterStoredProcedures(pro)
}
// Load in privileges from file, if it exists
persister := mysql_file_handler.NewPersister(config.PrivFilePath, config.DoltCfgDirPath)
data, err := persister.LoadData()

View File

@@ -111,9 +111,7 @@ func (cmd PushCmd) Exec(ctx context.Context, commandStr string, args []string, d
remoteDB, err := opts.Remote.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), dEnv)
if err != nil {
if err == remotestorage.ErrInvalidDoltSpecPath {
err = actions.HandleInvalidDoltSpecPathErr(opts.Remote.Name, opts.Remote.Url, err)
}
err = actions.HandleInitRemoteStorageClientErr(opts.Remote.Name, opts.Remote.Url, err)
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}

View File

@@ -35,6 +35,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/remotesrv"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
)
@@ -123,6 +124,11 @@ func Serve(
}
}
clusterController, err := cluster.NewController(serverConfig.ClusterConfig(), mrEnv.Config())
if err != nil {
return err, nil
}
serverConf, sErr, cErr := getConfigFromServerConfig(serverConfig)
if cErr != nil {
return nil, cErr
@@ -132,15 +138,16 @@ func Serve(
// Create SQL Engine with users
config := &engine.SqlEngineConfig{
InitialDb: "",
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
DoltCfgDirPath: serverConfig.CfgDir(),
ServerUser: serverConfig.User(),
ServerPass: serverConfig.Password(),
ServerHost: serverConfig.Host(),
Autocommit: serverConfig.AutoCommit(),
JwksConfig: serverConfig.JwksConfig(),
InitialDb: "",
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
DoltCfgDirPath: serverConfig.CfgDir(),
ServerUser: serverConfig.User(),
ServerPass: serverConfig.Password(),
ServerHost: serverConfig.Host(),
Autocommit: serverConfig.AutoCommit(),
JwksConfig: serverConfig.JwksConfig(),
ClusterController: clusterController,
}
sqlEngine, err := engine.NewSqlEngine(
ctx,

View File

@@ -16,12 +16,15 @@ package sqlserver
import (
"crypto/tls"
"errors"
"fmt"
"net"
"path/filepath"
"strings"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
)
// LogLevel defines the available levels of logging for the server.
@@ -146,6 +149,8 @@ type ServerConfig interface {
// as a dolt remote for things like `clone`, `fetch` and read
// replication.
RemotesapiPort() *int
// ClusterConfig is the configuration for clustering in this sql-server.
ClusterConfig() cluster.Config
}
type commandLineServerConfig struct {
@@ -273,6 +278,10 @@ func (cfg *commandLineServerConfig) RemotesapiPort() *int {
return cfg.remotesapiPort
}
func (cfg *commandLineServerConfig) ClusterConfig() cluster.Config {
return nil
}
// PrivilegeFilePath returns the path to the file which contains all needed privilege information in the form of a
// JSON string.
func (cfg *commandLineServerConfig) PrivilegeFilePath() string {
@@ -453,6 +462,34 @@ func ValidateConfig(config ServerConfig) error {
if config.RequireSecureTransport() && config.TLSCert() == "" && config.TLSKey() == "" {
return fmt.Errorf("require_secure_transport can only be `true` when a tls_key and tls_cert are provided.")
}
return ValidateClusterConfig(config.ClusterConfig())
}
func ValidateClusterConfig(config cluster.Config) error {
if config == nil {
return nil
}
remotes := config.StandbyRemotes()
if len(remotes) == 0 {
return errors.New("cluster config: must supply standby_remotes when supplying cluster configuration.")
}
for i := range remotes {
if remotes[i].Name() == "" {
return fmt.Errorf("cluster: standby_remotes[%d]: name: Cannot be empty", i)
}
if strings.Index(remotes[i].RemoteURLTemplate(), "{database}") == -1 {
return fmt.Errorf("cluster: standby_remotes[%d]: remote_url_template: is \"%s\" but must include the {database} template parameter", i, remotes[i].RemoteURLTemplate())
}
}
if config.BootstrapRole() != "" && config.BootstrapRole() != "primary" && config.BootstrapRole() != "standby" {
return fmt.Errorf("cluster: boostrap_role: is \"%s\" but must be \"primary\" or \"standby\"", config.BootstrapRole())
}
if config.BootstrapEpoch() < 0 {
return fmt.Errorf("cluster: boostrap_epoch: is %d but must be >= 0", config.BootstrapEpoch())
}
if config.RemotesAPIConfig().Port() < 0 || config.RemotesAPIConfig().Port() > 65535 {
return fmt.Errorf("cluster: remotesapi: port: is not in range 0-65535: %d", config.RemotesAPIConfig().Port())
}
return nil
}

View File

@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
)
func strPtr(s string) *string {
@@ -114,7 +115,11 @@ type MetricsYAMLConfig struct {
}
type RemotesapiYAMLConfig struct {
Port *int `yaml:"port"`
Port_field *int `yaml:"port"`
}
func (r RemotesapiYAMLConfig) Port() int {
return *r.Port_field
}
type UserSessionVars struct {
@@ -134,6 +139,7 @@ type YAMLConfig struct {
CfgDirStr *string `yaml:"cfg_dir"`
MetricsConfig MetricsYAMLConfig `yaml:"metrics"`
RemotesapiConfig RemotesapiYAMLConfig `yaml:"remotesapi"`
ClusterCfg *ClusterYAMLConfig `yaml:"cluster"`
PrivilegeFile *string `yaml:"privilege_file"`
Vars []UserSessionVars `yaml:"user_session_vars"`
Jwks []engine.JwksConfig `yaml:"jwks"`
@@ -340,7 +346,7 @@ func (cfg YAMLConfig) MetricsPort() int {
}
func (cfg YAMLConfig) RemotesapiPort() *int {
return cfg.RemotesapiConfig.Port
return cfg.RemotesapiConfig.Port_field
}
// PrivilegeFilePath returns the path to the file which contains all needed privilege information in the form of a
@@ -444,3 +450,58 @@ func (cfg YAMLConfig) Socket() string {
}
return *cfg.ListenerConfig.Socket
}
func (cfg YAMLConfig) ClusterConfig() cluster.Config {
if cfg.ClusterCfg == nil {
return nil
}
return cfg.ClusterCfg
}
type ClusterYAMLConfig struct {
StandbyRemotes_field []standbyRemoteYAMLConfig `yaml:"standby_remotes"`
BootstrapRole_field string `yaml:"bootstrap_role"`
BootstrapEpoch_field int `yaml:"bootstrap_epoch"`
Remotesapi clusterRemotesAPIYAMLConfig `yaml:"remotesapi"`
}
type standbyRemoteYAMLConfig struct {
Name_field string `yaml:"name"`
RemoteURLTemplate_field string `yaml:"remote_url_template"`
}
func (c standbyRemoteYAMLConfig) Name() string {
return c.Name_field
}
func (c standbyRemoteYAMLConfig) RemoteURLTemplate() string {
return c.RemoteURLTemplate_field
}
func (c *ClusterYAMLConfig) StandbyRemotes() []cluster.StandbyRemoteConfig {
ret := make([]cluster.StandbyRemoteConfig, len(c.StandbyRemotes_field))
for i := range c.StandbyRemotes_field {
ret[i] = c.StandbyRemotes_field[i]
}
return ret
}
func (c *ClusterYAMLConfig) BootstrapRole() string {
return c.BootstrapRole_field
}
func (c *ClusterYAMLConfig) BootstrapEpoch() int {
return c.BootstrapEpoch_field
}
func (c *ClusterYAMLConfig) RemotesAPIConfig() cluster.RemotesAPIConfig {
return c.Remotesapi
}
type clusterRemotesAPIYAMLConfig struct {
P int `yaml:"port"`
}
func (c clusterRemotesAPIYAMLConfig) Port() int {
return c.P
}

View File

@@ -161,6 +161,136 @@ remotesapi:
require.Equal(t, 8000, *config.RemotesapiPort())
}
func TestUnmarshallCluster(t *testing.T) {
testStr := `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://doltdb-1.doltdb:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`
config, err := NewYamlConfig([]byte(testStr))
require.NoError(t, err)
require.NotNil(t, config.ClusterConfig())
require.NotNil(t, config.ClusterConfig().RemotesAPIConfig())
require.Equal(t, 50051, config.ClusterConfig().RemotesAPIConfig().Port())
require.Len(t, config.ClusterConfig().StandbyRemotes(), 1)
require.Equal(t, "primary", config.ClusterConfig().BootstrapRole())
require.Equal(t, 0, config.ClusterConfig().BootstrapEpoch())
require.Equal(t, "standby", config.ClusterConfig().StandbyRemotes()[0].Name())
require.Equal(t, "http://doltdb-1.doltdb:50051/{database}", config.ClusterConfig().StandbyRemotes()[0].RemoteURLTemplate())
}
func TestValidateClusterConfig(t *testing.T) {
cases := []struct {
Name string
Config string
Error bool
}{
{
Name: "no cluster: config",
Config: "",
Error: false,
},
{
Name: "all fields valid",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: false,
},
{
Name: "bad bootstrap_role",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: backup
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: true,
},
{
Name: "negative bootstrap_epoch",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: primary
bootstrap_epoch: -1
remotesapi:
port: 50051
`,
Error: true,
},
{
Name: "negative remotesapi port",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: -5
`,
Error: true,
},
{
Name: "bad remote_url_template",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: true,
},
{
Name: "no standby remotes",
Config: `
cluster:
standby_remotes:
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: true,
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
cfg, err := NewYamlConfig([]byte(c.Config))
require.NoError(t, err)
if c.Error {
require.Error(t, ValidateClusterConfig(cfg.ClusterConfig()))
} else {
require.NoError(t, ValidateClusterConfig(cfg.ClusterConfig()))
}
})
}
}
// Tests that a common YAML error (incorrect indentation) throws an error
func TestUnmarshallError(t *testing.T) {
testStr := `

View File

@@ -17,7 +17,7 @@ require (
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20220915235715-9064d89c3f99
github.com/dolthub/vitess v0.0.0-20220921212835-1df8e46181dc
github.com/dustin/go-humanize v1.0.0
github.com/fatih/color v1.13.0
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -56,7 +56,7 @@ require (
)
require (
github.com/dolthub/go-mysql-server v0.12.1-0.20220920214908-aa94dc1d23d7
github.com/dolthub/go-mysql-server v0.12.1-0.20220922215311-864bfa363c74
github.com/google/flatbuffers v2.0.6+incompatible
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/mitchellh/go-ps v1.0.0

View File

@@ -175,8 +175,8 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20220920214908-aa94dc1d23d7 h1:n6xuCTPWmyyHhkHNLl9fLVvw3sD+2cJSle4DjpbvanQ=
github.com/dolthub/go-mysql-server v0.12.1-0.20220920214908-aa94dc1d23d7/go.mod h1:JJtUZL+JLCimxbLiu4SqjgFefbxjvfpY7Z0i7Kcnm20=
github.com/dolthub/go-mysql-server v0.12.1-0.20220922215311-864bfa363c74 h1:y1Rl8/UgTcxgVDcUp1DpQpfzHDvy8wdwoJZS4RNDpag=
github.com/dolthub/go-mysql-server v0.12.1-0.20220922215311-864bfa363c74/go.mod h1:gQ14YRyXHtgPaDb2QgUof7319Mowrv6/KYijqrwFnEg=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
@@ -185,8 +185,8 @@ github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66 h1:WRPDbpJWEnPxP
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66/go.mod h1:N5ZIbMGuDUpTpOFQ7HcsN6WSIpTGQjHP+Mz27AfmAgk=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/vitess v0.0.0-20220915235715-9064d89c3f99 h1:XF5dT33M4olcTUzA0XynHdT3eISI0ecfVnEhZc+CFtE=
github.com/dolthub/vitess v0.0.0-20220915235715-9064d89c3f99/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dolthub/vitess v0.0.0-20220921212835-1df8e46181dc h1:/nf27HGOl186T2XuxJibsyxQdadpuLP/aQ8TBxLrMp8=
github.com/dolthub/vitess v0.0.0-20220921212835-1df8e46181dc/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=

View File

@@ -103,12 +103,8 @@ func (fact DoltRemoteFactory) newChunkStore(ctx context.Context, nbf *types.Noms
csClient := remotesapi.NewChunkStoreServiceClient(conn)
cs, err := remotestorage.NewDoltChunkStoreFromPath(ctx, nbf, urlObj.Path, urlObj.Host, csClient)
if err == remotestorage.ErrInvalidDoltSpecPath {
return nil, fmt.Errorf("invalid dolt url '%s'", urlObj.String())
} else if err != nil {
// TODO: Make this error more expressive
return nil, err
if err != nil {
return nil, fmt.Errorf("could not access dolt url '%s': %w", urlObj.String(), err)
}
if _, ok := params[NoCachingParameter]; ok {

View File

@@ -33,11 +33,11 @@ import (
)
type DiffSummaryProgress struct {
Adds, Removes, Changes, CellChanges, NewSize, OldSize uint64
Adds, Removes, Changes, CellChanges, NewRowSize, OldRowSize, NewCellSize, OldCellSize uint64
}
type prollyReporter func(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffSummaryProgress) error
type nomsReporter func(ctx context.Context, change *diff.Difference, ch chan<- DiffSummaryProgress) error
type nomsReporter func(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffSummaryProgress) error
// Summary reports a summary of diff changes between two values
// todo: make package private once dolthub is migrated
@@ -50,7 +50,7 @@ func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.
if err != nil {
return err
}
ch <- DiffSummaryProgress{OldSize: fc, NewSize: tc}
ch <- DiffSummaryProgress{OldRowSize: fc, NewRowSize: tc}
fk, tk := schema.IsKeyless(fromSch), schema.IsKeyless(toSch)
var keyless bool
@@ -64,7 +64,7 @@ func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.
return diffProllyTrees(ctx, ch, keyless, from, to, fromSch, toSch)
}
return diffNomsMaps(ctx, ch, keyless, from, to)
return diffNomsMaps(ctx, ch, keyless, from, to, fromSch, toSch)
}
// SummaryForTableDelta pushes diff summary progress messages for the table delta given to the channel given
@@ -91,7 +91,7 @@ func SummaryForTableDelta(ctx context.Context, ch chan DiffSummaryProgress, td T
if types.IsFormat_DOLT(td.Format()) {
return diffProllyTrees(ctx, ch, keyless, fromRows, toRows, fromSch, toSch)
} else {
return diffNomsMaps(ctx, ch, keyless, fromRows, toRows)
return diffNomsMaps(ctx, ch, keyless, fromRows, toRows, fromSch, toSch)
}
}
@@ -114,14 +114,18 @@ func diffProllyTrees(ctx context.Context, ch chan DiffSummaryProgress, keyless b
if err != nil {
return err
}
cfc := uint64(len(fromSch.GetAllCols().GetColumns())) * fc
tc, err := to.Count()
if err != nil {
return err
}
ctc := uint64(len(toSch.GetAllCols().GetColumns())) * tc
rpr = reportPkChanges
ch <- DiffSummaryProgress{
OldSize: fc,
NewSize: tc,
OldRowSize: fc,
NewRowSize: tc,
OldCellSize: cfc,
NewCellSize: ctc,
}
}
@@ -134,7 +138,7 @@ func diffProllyTrees(ctx context.Context, ch chan DiffSummaryProgress, keyless b
return nil
}
func diffNomsMaps(ctx context.Context, ch chan DiffSummaryProgress, keyless bool, fromRows durable.Index, toRows durable.Index) error {
func diffNomsMaps(ctx context.Context, ch chan DiffSummaryProgress, keyless bool, fromRows durable.Index, toRows durable.Index, fromSch, toSch schema.Schema) error {
var rpr nomsReporter
if keyless {
rpr = reportNomsKeylessChanges
@@ -143,21 +147,25 @@ func diffNomsMaps(ctx context.Context, ch chan DiffSummaryProgress, keyless bool
if err != nil {
return err
}
cfc := uint64(len(fromSch.GetAllCols().GetColumns())) * fc
tc, err := toRows.Count()
if err != nil {
return err
}
ctc := uint64(len(toSch.GetAllCols().GetColumns())) * tc
rpr = reportNomsPkChanges
ch <- DiffSummaryProgress{
OldSize: fc,
NewSize: tc,
OldRowSize: fc,
NewRowSize: tc,
OldCellSize: cfc,
NewCellSize: ctc,
}
}
return summaryWithReporter(ctx, ch, durable.NomsMapFromIndex(fromRows), durable.NomsMapFromIndex(toRows), rpr)
return summaryWithReporter(ctx, ch, durable.NomsMapFromIndex(fromRows), durable.NomsMapFromIndex(toRows), rpr, fromSch, toSch)
}
func summaryWithReporter(ctx context.Context, ch chan DiffSummaryProgress, from, to types.Map, rpr nomsReporter) (err error) {
func summaryWithReporter(ctx context.Context, ch chan DiffSummaryProgress, from, to types.Map, rpr nomsReporter, fromSch, toSch schema.Schema) (err error) {
ad := NewAsyncDiffer(1024)
ad.Start(ctx, from, to)
defer func() {
@@ -175,7 +183,7 @@ func summaryWithReporter(ctx context.Context, ch chan DiffSummaryProgress, from,
}
for _, df := range diffs {
err = rpr(ctx, df, ch)
err = rpr(ctx, df, fromSch, toSch, ch)
if err != nil {
return err
}
@@ -270,7 +278,7 @@ func prollyCountCellDiff(mapping val.OrdinalMapping, fromD, toD val.TupleDesc, f
return changed
}
func reportNomsPkChanges(ctx context.Context, change *diff.Difference, ch chan<- DiffSummaryProgress) error {
func reportNomsPkChanges(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffSummaryProgress) error {
var summary DiffSummaryProgress
switch change.ChangeType {
case types.DiffChangeAdded:
@@ -280,7 +288,7 @@ func reportNomsPkChanges(ctx context.Context, change *diff.Difference, ch chan<-
case types.DiffChangeModified:
oldTuple := change.OldValue.(types.Tuple)
newTuple := change.NewValue.(types.Tuple)
cellChanges, err := row.CountCellDiffs(oldTuple, newTuple)
cellChanges, err := row.CountCellDiffs(oldTuple, newTuple, fromSch, toSch)
if err != nil {
return err
}
@@ -296,7 +304,7 @@ func reportNomsPkChanges(ctx context.Context, change *diff.Difference, ch chan<-
}
}
func reportNomsKeylessChanges(ctx context.Context, change *diff.Difference, ch chan<- DiffSummaryProgress) error {
func reportNomsKeylessChanges(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffSummaryProgress) error {
var oldCard uint64
if change.OldValue != nil {
v, err := change.OldValue.(types.Tuple).Get(row.KeylessCardinalityValIdx)

View File

@@ -370,14 +370,20 @@ func (td TableDelta) IsKeyless(ctx context.Context) (bool, error) {
return false, err
}
// nil table is neither keyless nor keyed
from, to := schema.IsKeyless(f), schema.IsKeyless(t)
if from && to {
return true, nil
} else if !from && !to {
return false, nil
if td.FromTable == nil {
return to, nil
} else if td.ToTable == nil {
return from, nil
} else {
return false, fmt.Errorf("mismatched keyless and keyed schemas for table %s", td.CurName())
if from && to {
return true, nil
} else if !from && !to {
return false, nil
} else {
return false, fmt.Errorf("mismatched keyless and keyed schemas for table %s", td.CurName())
}
}
}

View File

@@ -18,7 +18,6 @@ import (
"context"
"errors"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
@@ -215,14 +214,13 @@ type PendingCommit struct {
// |roots| are the current roots to include in the PendingCommit. roots.Staged is used as the new root to package in the
// commit, once written.
// |headRef| is the ref of the HEAD the commit will update
// |parentCommits| are any additional merge parents for this commit. The current HEAD commit is always considered a
// parent.
// |mergeParentCommits| are any merge parents for this commit
// |cm| is the metadata for the commit
// The current branch head will be automatically filled in as the first parent at commit time.
func (ddb *DoltDB) NewPendingCommit(
ctx context.Context,
roots Roots,
headRef ref.DoltRef,
parentCommits []*Commit,
mergeParentCommits []*Commit,
cm *datas.CommitMeta,
) (*PendingCommit, error) {
newstaged, val, err := ddb.writeRootValue(ctx, roots.Staged)
@@ -231,21 +229,9 @@ func (ddb *DoltDB) NewPendingCommit(
}
roots.Staged = newstaged
ds, err := ddb.db.GetDataset(ctx, headRef.String())
if err != nil {
return nil, err
}
nomsHeadAddr, hasHead := ds.MaybeHeadAddr()
var parents []hash.Hash
if hasHead {
parents = append(parents, nomsHeadAddr)
}
for _, pc := range parentCommits {
if pc.dCommit.Addr() != nomsHeadAddr {
parents = append(parents, pc.dCommit.Addr())
}
for _, pc := range mergeParentCommits {
parents = append(parents, pc.dCommit.Addr())
}
commitOpts := datas.CommitOptions{Parents: parents, Meta: cm}

View File

@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/datas/pull"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
)
@@ -51,19 +50,18 @@ func NewPushOnWriteHook(destDB *DoltDB, tmpDir string) *PushOnWriteHook {
// Execute implements CommitHook, replicates head updates to the destDb field
func (ph *PushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error {
// TODO: this code and pushDataset are largely duplicated from doltDb.PullChunks.
// Clean it up, and preferably make more db stores capable of using the puller interface
if datas.CanUsePuller(db) && datas.CanUsePuller(ph.destDB) {
return pushDatasetWithPuller(ctx, ph.destDB, db, ph.tmpDir, ds)
}
return ph.pushDataset(ctx, ds, db)
return pushDataset(ctx, ph.destDB, db, ds, ph.tmpDir)
}
func (ph *PushOnWriteHook) pushDataset(ctx context.Context, ds datas.Dataset, db datas.Database) error {
func pushDataset(ctx context.Context, destDB, srcDB datas.Database, ds datas.Dataset, tmpDir string) error {
addr, ok := ds.MaybeHeadAddr()
if !ok {
_, err := ph.destDB.Delete(ctx, ds)
_, err := destDB.Delete(ctx, ds)
return err
}
err := pullHash(ctx, destDB, srcDB, addr, tmpDir, nil, nil)
if err != nil {
return err
}
@@ -72,21 +70,12 @@ func (ph *PushOnWriteHook) pushDataset(ctx context.Context, ds datas.Dataset, db
return err
}
srcCS := datas.ChunkStoreFromDatabase(db)
destCS := datas.ChunkStoreFromDatabase(ph.destDB)
waf := types.WalkAddrsForNBF(ph.fmt)
err = pull.Pull(ctx, srcCS, destCS, waf, addr, nil)
ds, err = destDB.GetDataset(ctx, rf.String())
if err != nil {
return err
}
ds, err = ph.destDB.GetDataset(ctx, rf.String())
if err != nil {
return err
}
_, err = ph.destDB.SetHead(ctx, ds, addr)
_, err = destDB.SetHead(ctx, ds, addr)
return err
}
@@ -107,46 +96,6 @@ func (ph *PushOnWriteHook) SetLogger(ctx context.Context, wr io.Writer) error {
return nil
}
// replicate pushes a dataset from srcDB to destDB and force sets the destDB ref to the new dataset value
func pushDatasetWithPuller(ctx context.Context, destDB, srcDB datas.Database, tempTableDir string, ds datas.Dataset) error {
addr, ok := ds.MaybeHeadAddr()
if !ok {
_, err := destDB.Delete(ctx, ds)
return err
}
rf, err := ref.Parse(ds.ID())
if err != nil {
return err
}
srcCS := datas.ChunkStoreFromDatabase(srcDB)
destCS := datas.ChunkStoreFromDatabase(destDB)
waf, err := types.WalkAddrsForChunkStore(srcCS)
if err != nil {
return err
}
puller, err := pull.NewPuller(ctx, tempTableDir, defaultChunksPerTF, srcCS, destCS, waf, addr, nil)
if err != nil && err != pull.ErrDBUpToDate {
return err
}
if err != pull.ErrDBUpToDate {
err = puller.Pull(ctx)
if err != nil {
return err
}
}
ds, err = destDB.GetDataset(ctx, rf.String())
if err != nil {
return err
}
_, err = destDB.SetHead(ctx, ds, addr)
return err
}
type PushArg struct {
ds datas.Dataset
db datas.Database
@@ -292,21 +241,15 @@ func RunAsyncReplicationThreads(bThreads *sql.BackgroundThreads, ch chan PushArg
return newHeadsCopy
}
isNewHeads := func(newHeads map[string]PushArg) bool {
defer mu.Unlock()
mu.Lock()
return len(newHeads) != 0
}
flush := func(newHeads map[string]PushArg, latestHeads map[string]hash.Hash) {
newHeadsCopy := getHeadsCopy()
if !isNewHeads(newHeadsCopy) {
if len(newHeadsCopy) == 0 {
return
}
for id, newCm := range newHeadsCopy {
if latest, ok := latestHeads[id]; !ok || latest != newCm.hash {
// use background context to drain after sql context is canceled
err := pushDatasetWithPuller(context.Background(), destDB.db, newCm.db, tmpDir, newCm.ds)
err := pushDataset(context.Background(), destDB.db, newCm.db, newCm.ds, tmpDir)
if err != nil {
logger.Write([]byte("replication failed: " + err.Error()))
}

View File

@@ -1238,11 +1238,15 @@ func (ddb *DoltDB) pruneUnreferencedDatasets(ctx context.Context) error {
// given, pulling all chunks reachable from the given targetHash. Pull progress
// is communicated over the provided channel.
func (ddb *DoltDB) PullChunks(ctx context.Context, tempDir string, srcDB *DoltDB, targetHash hash.Hash, progChan chan pull.PullProgress, statsCh chan pull.Stats) error {
srcCS := datas.ChunkStoreFromDatabase(srcDB.db)
destCS := datas.ChunkStoreFromDatabase(ddb.db)
return pullHash(ctx, ddb.db, srcDB.db, targetHash, tempDir, progChan, statsCh)
}
func pullHash(ctx context.Context, destDB, srcDB datas.Database, targetHash hash.Hash, tempDir string, progChan chan pull.PullProgress, statsCh chan pull.Stats) error {
srcCS := datas.ChunkStoreFromDatabase(srcDB)
destCS := datas.ChunkStoreFromDatabase(destDB)
waf := types.WalkAddrsForNBF(srcDB.Format())
if datas.CanUsePuller(srcDB.db) && datas.CanUsePuller(ddb.db) {
if datas.CanUsePuller(srcDB) && datas.CanUsePuller(destDB) {
puller, err := pull.NewPuller(ctx, tempDir, defaultChunksPerTF, srcCS, destCS, waf, targetHash, statsCh)
if err == pull.ErrDBUpToDate {
return nil

View File

@@ -26,7 +26,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
@@ -245,7 +244,7 @@ func (mr *MultiRepoTestSetup) CommitWithWorkingSet(dbName string) *doltdb.Commit
if err != nil {
panic("couldn't get roots: " + err.Error())
}
pendingCommit, err := actions.GetCommitStaged(ctx, roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData(), actions.CommitStagedProps{
pendingCommit, err := actions.GetCommitStaged(ctx, roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData().Ddb, actions.CommitStagedProps{
Message: "auto commit",
Date: t,
AllowEmpty: true,
@@ -322,10 +321,7 @@ func (mr *MultiRepoTestSetup) PushToRemote(dbName, remoteName, branchName string
remoteDB, err := opts.Remote.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), mr.MrEnv.GetEnv(dbName))
if err != nil {
if err == remotestorage.ErrInvalidDoltSpecPath {
mr.Errhand(actions.HandleInvalidDoltSpecPathErr(opts.Remote.Name, opts.Remote.Url, err))
}
mr.Errhand(fmt.Sprintf("Failed to get remote database: %s", err.Error()))
mr.Errhand(actions.HandleInitRemoteStorageClientErr(opts.Remote.Name, opts.Remote.Url, err))
}
tmpDir, err := dEnv.TempTableFilesDir()

View File

@@ -130,19 +130,15 @@ func CommitStaged(ctx context.Context, roots doltdb.Roots, mergeActive bool, mer
return c, nil
}
// GetCommitStaged adds a new commit to HEAD with the given props, returning it as a PendingCommit that can be
// committed with doltdb.CommitWithWorkingSet
// GetCommitStaged returns a new pending commit with the roots and commit properties given.
func GetCommitStaged(
ctx context.Context,
roots doltdb.Roots,
mergeActive bool,
mergeParents []*doltdb.Commit,
dbData env.DbData,
db *doltdb.DoltDB,
props CommitStagedProps,
) (*doltdb.PendingCommit, error) {
ddb := dbData.Ddb
rsr := dbData.Rsr
if props.Message == "" {
return nil, datas.ErrEmptyCommitMessage
}
@@ -194,5 +190,5 @@ func GetCommitStaged(
return nil, err
}
return ddb.NewPendingCommit(ctx, roots, rsr.CWBHeadRef(), mergeParents, meta)
return db.NewPendingCommit(ctx, roots, mergeParents, meta)
}

View File

@@ -471,13 +471,7 @@ func SyncRoots(ctx context.Context, srcDb, destDb *doltdb.DoltDB, tempTableDir s
return nil
}
func HandleInvalidDoltSpecPathErr(name, url string, err error) error {
urlObj, _ := earl.Parse(url)
path := urlObj.Path
if path[0] == '/' {
path = path[1:]
}
var detail = fmt.Sprintf("the remote: %s %s '%s' should be in the format 'organization/repo'", name, url, path)
func HandleInitRemoteStorageClientErr(name, url string, err error) error {
var detail = fmt.Sprintf("the remote: %s '%s' could not be accessed", name, url)
return fmt.Errorf("%w; %s; %s", ErrFailedToGetRemoteDb, detail, err.Error())
}

View File

@@ -21,7 +21,7 @@ import (
)
type DBCache interface {
Get(org, repo, nbfVerStr string) (RemoteSrvStore, error)
Get(path, nbfVerStr string) (RemoteSrvStore, error)
}
type RemoteSrvStore interface {

View File

@@ -29,7 +29,6 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
remotesapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/remotesapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
@@ -60,17 +59,33 @@ func NewHttpFSBackedChunkStore(lgr *logrus.Entry, httpHost string, csCache DBCac
}
}
type repoRequest interface {
GetRepoId() *remotesapi.RepoId
GetRepoPath() string
}
func getRepoPath(req repoRequest) string {
if req.GetRepoPath() != "" {
return req.GetRepoPath()
}
if req.GetRepoId() != nil {
return req.GetRepoId().Org + "/" + req.GetRepoId().RepoName
}
return ""
}
func (rs *RemoteChunkStore) HasChunks(ctx context.Context, req *remotesapi.HasChunksRequest) (*remotesapi.HasChunksResponse, error) {
logger := getReqLogger(rs.lgr, "HasChunks")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found repo %s/%s", req.RepoId.Org, req.RepoId.RepoName)
logger.Printf("found repo %s", repoPath)
hashes, hashToIndex := remotestorage.ParseByteSlices(req.Hashes)
@@ -117,13 +132,14 @@ func (rs *RemoteChunkStore) GetDownloadLocations(ctx context.Context, req *remot
logger := getReqLogger(rs.lgr, "GetDownloadLocations")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found repo %s/%s", req.RepoId.Org, req.RepoId.RepoName)
logger.Printf("found repo %s", repoPath)
hashes, _ := remotestorage.ParseByteSlices(req.ChunkHashes)
@@ -168,7 +184,7 @@ func (rs *RemoteChunkStore) StreamDownloadLocations(stream remotesapi.ChunkStore
md, _ := metadata.FromIncomingContext(stream.Context())
var repoID *remotesapi.RepoId
var repoPath string
var cs RemoteSrvStore
var prefix string
for {
@@ -180,13 +196,14 @@ func (rs *RemoteChunkStore) StreamDownloadLocations(stream remotesapi.ChunkStore
return err
}
if !proto.Equal(req.RepoId, repoID) {
repoID = req.RepoId
cs = rs.getStore(logger, repoID)
nextPath := getRepoPath(req)
if nextPath != repoPath {
repoPath = nextPath
cs = rs.getStore(logger, repoPath)
if cs == nil {
return status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found repo %s/%s", repoID.Org, repoID.RepoName)
logger.Printf("found repo %s", repoPath)
prefix, err = rs.getRelativeStorePath(cs)
if err != nil {
@@ -228,12 +245,12 @@ func (rs *RemoteChunkStore) StreamDownloadLocations(stream remotesapi.ChunkStore
func (rs *RemoteChunkStore) getHost(md metadata.MD) string {
host := rs.HttpHost
if strings.HasPrefix(rs.HttpHost, ":") && rs.HttpHost != ":80" {
if strings.HasPrefix(rs.HttpHost, ":") {
hosts := md.Get(":authority")
if len(hosts) > 0 {
host = strings.Split(hosts[0], ":")[0] + rs.HttpHost
}
} else if rs.HttpHost == "" || rs.HttpHost == ":80" {
} else if rs.HttpHost == "" {
hosts := md.Get(":authority")
if len(hosts) > 0 {
host = hosts[0]
@@ -274,16 +291,15 @@ func (rs *RemoteChunkStore) GetUploadLocations(ctx context.Context, req *remotes
logger := getReqLogger(rs.lgr, "GetUploadLocations")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found repo %s/%s", req.RepoId.Org, req.RepoId.RepoName)
logger.Printf("found repo %s", repoPath)
org := req.RepoId.Org
repoName := req.RepoId.RepoName
tfds := parseTableFileDetails(req)
md, _ := metadata.FromIncomingContext(ctx)
@@ -291,7 +307,7 @@ func (rs *RemoteChunkStore) GetUploadLocations(ctx context.Context, req *remotes
var locs []*remotesapi.UploadLoc
for _, tfd := range tfds {
h := hash.New(tfd.Id)
url, err := rs.getUploadUrl(logger, md, org, repoName, tfd)
url, err := rs.getUploadUrl(logger, md, repoPath, tfd)
if err != nil {
return nil, status.Error(codes.Internal, "Failed to get upload Url.")
@@ -306,7 +322,7 @@ func (rs *RemoteChunkStore) GetUploadLocations(ctx context.Context, req *remotes
return &remotesapi.GetUploadLocsResponse{Locs: locs}, nil
}
func (rs *RemoteChunkStore) getUploadUrl(logger *logrus.Entry, md metadata.MD, org, repoName string, tfd *remotesapi.TableFileDetails) (string, error) {
func (rs *RemoteChunkStore) getUploadUrl(logger *logrus.Entry, md metadata.MD, repoPath string, tfd *remotesapi.TableFileDetails) (string, error) {
fileID := hash.New(tfd.Id).String()
params := url.Values{}
params.Add("num_chunks", strconv.Itoa(int(tfd.NumChunks)))
@@ -315,7 +331,7 @@ func (rs *RemoteChunkStore) getUploadUrl(logger *logrus.Entry, md metadata.MD, o
return (&url.URL{
Scheme: "http",
Host: rs.getHost(md),
Path: fmt.Sprintf("%s/%s/%s", org, repoName, fileID),
Path: fmt.Sprintf("%s/%s", repoPath, fileID),
RawQuery: params.Encode(),
}).String(), nil
}
@@ -324,18 +340,19 @@ func (rs *RemoteChunkStore) Rebase(ctx context.Context, req *remotesapi.RebaseRe
logger := getReqLogger(rs.lgr, "Rebase")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found %s/%s", req.RepoId.Org, req.RepoId.RepoName)
logger.Printf("found %s", repoPath)
err := cs.Rebase(ctx)
if err != nil {
logger.Printf("error occurred during processing of Rebace rpc of %s/%s details: %v", req.RepoId.Org, req.RepoId.RepoName, err)
logger.Printf("error occurred during processing of Rebace rpc of %s details: %v", repoPath, err)
return nil, status.Errorf(codes.Internal, "failed to rebase: %v", err)
}
@@ -346,7 +363,8 @@ func (rs *RemoteChunkStore) Root(ctx context.Context, req *remotesapi.RootReques
logger := getReqLogger(rs.lgr, "Root")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
@@ -355,7 +373,7 @@ func (rs *RemoteChunkStore) Root(ctx context.Context, req *remotesapi.RootReques
h, err := cs.Root(ctx)
if err != nil {
logger.Printf("error occurred during processing of Root rpc of %s/%s details: %v", req.RepoId.Org, req.RepoId.RepoName, err)
logger.Printf("error occurred during processing of Root rpc of %s details: %v", repoPath, err)
return nil, status.Error(codes.Internal, "Failed to get root")
}
@@ -366,13 +384,14 @@ func (rs *RemoteChunkStore) Commit(ctx context.Context, req *remotesapi.CommitRe
logger := getReqLogger(rs.lgr, "Commit")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found %s/%s", req.RepoId.Org, req.RepoId.RepoName)
logger.Printf("found %s", repoPath)
//should validate
updates := make(map[string]int)
@@ -394,11 +413,11 @@ func (rs *RemoteChunkStore) Commit(ctx context.Context, req *remotesapi.CommitRe
ok, err = cs.Commit(ctx, currHash, lastHash)
if err != nil {
logger.Printf("error occurred during processing of Commit of %s/%s last %s curr: %s details: %v", req.RepoId.Org, req.RepoId.RepoName, lastHash.String(), currHash.String(), err)
logger.Printf("error occurred during processing of Commit of %s last %s curr: %s details: %v", repoPath, lastHash.String(), currHash.String(), err)
return nil, status.Errorf(codes.Internal, "failed to commit: %v", err)
}
logger.Printf("committed %s/%s moved from %s -> %s", req.RepoId.Org, req.RepoId.RepoName, currHash.String(), lastHash.String())
logger.Printf("committed %s moved from %s -> %s", repoPath, lastHash.String(), currHash.String())
return &remotesapi.CommitResponse{Success: ok}, nil
}
@@ -406,7 +425,8 @@ func (rs *RemoteChunkStore) GetRepoMetadata(ctx context.Context, req *remotesapi
logger := getReqLogger(rs.lgr, "GetRepoMetadata")
defer func() { logger.Println("finished") }()
cs := rs.getOrCreateStore(logger, req.RepoId, req.ClientRepoFormat.NbfVersion)
repoPath := getRepoPath(req)
cs := rs.getOrCreateStore(logger, repoPath, req.ClientRepoFormat.NbfVersion)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
@@ -432,13 +452,14 @@ func (rs *RemoteChunkStore) ListTableFiles(ctx context.Context, req *remotesapi.
logger := getReqLogger(rs.lgr, "ListTableFiles")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found repo %s/%s", req.RepoId.Org, req.RepoId.RepoName)
logger.Printf("found repo %s", repoPath)
root, tables, appendixTables, err := cs.Sources(ctx)
@@ -500,13 +521,14 @@ func (rs *RemoteChunkStore) AddTableFiles(ctx context.Context, req *remotesapi.A
logger := getReqLogger(rs.lgr, "AddTableFiles")
defer func() { logger.Println("finished") }()
cs := rs.getStore(logger, req.RepoId)
repoPath := getRepoPath(req)
cs := rs.getStore(logger, repoPath)
if cs == nil {
return nil, status.Error(codes.Internal, "Could not get chunkstore")
}
logger.Printf("found %s/%s", req.RepoId.Org, req.RepoId.RepoName)
logger.Printf("found %s", repoPath)
// should validate
updates := make(map[string]int)
@@ -524,18 +546,15 @@ func (rs *RemoteChunkStore) AddTableFiles(ctx context.Context, req *remotesapi.A
return &remotesapi.AddTableFilesResponse{Success: true}, nil
}
func (rs *RemoteChunkStore) getStore(logger *logrus.Entry, repoId *remotesapi.RepoId) RemoteSrvStore {
return rs.getOrCreateStore(logger, repoId, types.Format_Default.VersionString())
func (rs *RemoteChunkStore) getStore(logger *logrus.Entry, repoPath string) RemoteSrvStore {
return rs.getOrCreateStore(logger, repoPath, types.Format_Default.VersionString())
}
func (rs *RemoteChunkStore) getOrCreateStore(logger *logrus.Entry, repoId *remotesapi.RepoId, nbfVerStr string) RemoteSrvStore {
org := repoId.Org
repoName := repoId.RepoName
cs, err := rs.csCache.Get(org, repoName, nbfVerStr)
func (rs *RemoteChunkStore) getOrCreateStore(logger *logrus.Entry, repoPath, nbfVerStr string) RemoteSrvStore {
cs, err := rs.csCache.Get(repoPath, nbfVerStr)
if err != nil {
logger.Printf("Failed to retrieve chunkstore for %s/%s\n", org, repoName)
logger.Printf("Failed to retrieve chunkstore for %s\n", repoPath)
}
return cs

View File

@@ -100,16 +100,16 @@ func (fh filehandler) ServeHTTP(respWr http.ResponseWriter, req *http.Request) {
return
}
tokens := strings.Split(path, "/")
if len(tokens) != 3 {
i := strings.LastIndex(path, "/")
// a table file name is currently 32 characters, plus the '/' is 33.
if i < 0 || len(path[i:]) != 33 {
logger.Printf("response to: %v method: %v http response code: %v", req.RequestURI, req.Method, http.StatusNotFound)
respWr.WriteHeader(http.StatusNotFound)
return
}
org := tokens[0]
repo := tokens[1]
file := tokens[2]
filepath := path[:i]
file := path[i+1:]
q := req.URL.Query()
ncs := q.Get("num_chunks")
@@ -149,7 +149,7 @@ func (fh filehandler) ServeHTTP(respWr http.ResponseWriter, req *http.Request) {
return
}
statusCode = writeTableFile(req.Context(), logger, fh.dbCache, org, repo, file, num_chunks, content_hash, uint64(content_length), req.Body)
statusCode = writeTableFile(req.Context(), logger, fh.dbCache, filepath, file, num_chunks, content_hash, uint64(content_length), req.Body)
}
if statusCode != -1 {
@@ -248,7 +248,7 @@ func (u *uploadreader) Close() error {
return nil
}
func writeTableFile(ctx context.Context, logger *logrus.Entry, dbCache DBCache, org, repo, fileId string, numChunks int, contentHash []byte, contentLength uint64, body io.ReadCloser) int {
func writeTableFile(ctx context.Context, logger *logrus.Entry, dbCache DBCache, path, fileId string, numChunks int, contentHash []byte, contentLength uint64, body io.ReadCloser) int {
_, ok := hash.MaybeParse(fileId)
if !ok {
logger.Println(fileId, "is not a valid hash")
@@ -257,9 +257,9 @@ func writeTableFile(ctx context.Context, logger *logrus.Entry, dbCache DBCache,
logger.Println(fileId, "is valid")
cs, err := dbCache.Get(org, repo, types.Format_Default.VersionString())
cs, err := dbCache.Get(path, types.Format_Default.VersionString())
if err != nil {
logger.Println("failed to get", org+"/"+repo, "repository:", err.Error())
logger.Println("failed to get", path, "repository:", err.Error())
return http.StatusInternalServerError
}

View File

@@ -58,7 +58,6 @@ func init() {
}
var ErrUploadFailed = errors.New("upload failed")
var ErrInvalidDoltSpecPath = errors.New("invalid dolt spec path")
var globalHttpFetcher HTTPFetcher = &http.Client{}
@@ -104,8 +103,7 @@ type ConcurrencyParams struct {
}
type DoltChunkStore struct {
org string
repoName string
repoId *remotesapi.RepoId
repoPath string
repoToken *atomic.Value // string
host string
@@ -120,34 +118,33 @@ type DoltChunkStore struct {
}
func NewDoltChunkStoreFromPath(ctx context.Context, nbf *types.NomsBinFormat, path, host string, csClient remotesapi.ChunkStoreServiceClient) (*DoltChunkStore, error) {
tokens := strings.Split(strings.Trim(path, "/"), "/")
if len(tokens) != 2 {
return nil, ErrInvalidDoltSpecPath
}
var repoId *remotesapi.RepoId
// todo:
// this may just be a dolthub thing. Need to revisit how we do this.
org := tokens[0]
repoName := tokens[1]
metadata, err := csClient.GetRepoMetadata(ctx, &remotesapi.GetRepoMetadataRequest{
RepoId: &remotesapi.RepoId{
path = strings.Trim(path, "/")
tokens := strings.Split(path, "/")
if len(tokens) == 2 {
org := tokens[0]
repoName := tokens[1]
repoId = &remotesapi.RepoId{
Org: org,
RepoName: repoName,
},
}
}
metadata, err := csClient.GetRepoMetadata(ctx, &remotesapi.GetRepoMetadataRequest{
RepoId: repoId,
RepoPath: path,
ClientRepoFormat: &remotesapi.ClientRepoFormat{
NbfVersion: nbf.VersionString(),
NbsVersion: nbs.StorageVersion,
},
})
if err != nil {
return nil, err
}
cs := &DoltChunkStore{
org: org,
repoName: repoName,
repoId: repoId,
repoPath: path,
repoToken: new(atomic.Value),
host: host,
@@ -163,8 +160,7 @@ func NewDoltChunkStoreFromPath(ctx context.Context, nbf *types.NomsBinFormat, pa
func (dcs *DoltChunkStore) WithHTTPFetcher(fetcher HTTPFetcher) *DoltChunkStore {
return &DoltChunkStore{
org: dcs.org,
repoName: dcs.repoName,
repoId: dcs.repoId,
repoPath: dcs.repoPath,
repoToken: new(atomic.Value),
host: dcs.host,
@@ -180,8 +176,7 @@ func (dcs *DoltChunkStore) WithHTTPFetcher(fetcher HTTPFetcher) *DoltChunkStore
func (dcs *DoltChunkStore) WithNoopChunkCache() *DoltChunkStore {
return &DoltChunkStore{
org: dcs.org,
repoName: dcs.repoName,
repoId: dcs.repoId,
repoPath: dcs.repoPath,
repoToken: new(atomic.Value),
host: dcs.host,
@@ -198,8 +193,7 @@ func (dcs *DoltChunkStore) WithNoopChunkCache() *DoltChunkStore {
func (dcs *DoltChunkStore) WithChunkCache(cache ChunkCache) *DoltChunkStore {
return &DoltChunkStore{
org: dcs.org,
repoName: dcs.repoName,
repoId: dcs.repoId,
repoPath: dcs.repoPath,
repoToken: new(atomic.Value),
host: dcs.host,
@@ -216,8 +210,7 @@ func (dcs *DoltChunkStore) WithChunkCache(cache ChunkCache) *DoltChunkStore {
func (dcs *DoltChunkStore) WithDownloadConcurrency(concurrency ConcurrencyParams) *DoltChunkStore {
return &DoltChunkStore{
org: dcs.org,
repoName: dcs.repoName,
repoId: dcs.repoId,
repoPath: dcs.repoPath,
repoToken: new(atomic.Value),
host: dcs.host,
@@ -248,10 +241,7 @@ func (dcs *DoltChunkStore) getRepoId() (*remotesapi.RepoId, string) {
if curToken != nil {
token = curToken.(string)
}
return &remotesapi.RepoId{
Org: dcs.org,
RepoName: dcs.repoName,
}, token
return dcs.repoId, token
}
type cacheStats struct {
@@ -538,6 +528,12 @@ func (l *dlLocations) Add(resp *remotesapi.DownloadLoc) {
}
}
type RepoRequest interface {
SetRepoId(*remotesapi.RepoId)
SetRepoToken(string)
SetRepoPath(string)
}
func (dcs *DoltChunkStore) getDLLocs(ctx context.Context, hashes []hash.Hash) (dlLocations, error) {
ctx, span := tracer.Start(ctx, "remotestorage.getDLLocs", trace.WithAttributes(attribute.Int("num_hashes", len(hashes))))
defer span.End()
@@ -573,7 +569,7 @@ func (dcs *DoltChunkStore) getDLLocs(ctx context.Context, hashes []hash.Hash) (d
batchItr(len(hashesBytes), getLocsBatchSize, func(st, end int) (stop bool) {
batch := hashesBytes[st:end]
id, token := dcs.getRepoId()
req := &remotesapi.GetDownloadLocsRequest{RepoId: id, ChunkHashes: batch, RepoToken: token, RepoPath: dcs.repoPath}
req := &remotesapi.GetDownloadLocsRequest{RepoId: id, RepoPath: dcs.repoPath, RepoToken: token, ChunkHashes: batch}
reqs = append(reqs, req)
return false
})
@@ -809,10 +805,8 @@ func (dcs *DoltChunkStore) Rebase(ctx context.Context) error {
func (dcs *DoltChunkStore) refreshRepoMetadata(ctx context.Context) error {
mdReq := &remotesapi.GetRepoMetadataRequest{
RepoId: &remotesapi.RepoId{
Org: dcs.org,
RepoName: dcs.repoName,
},
RepoId: dcs.repoId,
RepoPath: dcs.repoPath,
ClientRepoFormat: &remotesapi.ClientRepoFormat{
NbfVersion: dcs.nbf.VersionString(),
NbsVersion: nbs.StorageVersion,
@@ -1270,7 +1264,7 @@ func (dcs *DoltChunkStore) AddTableFilesToManifest(ctx context.Context, fileIdTo
}
id, token := dcs.getRepoId()
dcs.logf("Adding Table files to repo: %s/%s -\n%s", id.Org, id.RepoName, debugStr)
dcs.logf("Adding Table files to repo: %s -\n%s", dcs.repoPath, debugStr)
atReq := &remotesapi.AddTableFilesRequest{
RepoId: id,
RepoToken: token,

View File

@@ -263,7 +263,9 @@ func (tt TaggedValues) String() string {
// CountCellDiffs returns the number of fields that are different between two
// tuples and does not panic if tuples are different lengths.
func CountCellDiffs(from, to types.Tuple) (uint64, error) {
func CountCellDiffs(from, to types.Tuple, fromSch, toSch schema.Schema) (uint64, error) {
fromColLen := len(fromSch.GetAllCols().GetColumns())
toColLen := len(toSch.GetAllCols().GetColumns())
changed := 0
f, err := ParseTaggedValues(from)
if err != nil {
@@ -277,7 +279,8 @@ func CountCellDiffs(from, to types.Tuple) (uint64, error) {
for i, v := range f {
ov, ok := t[i]
if !ok || !v.Equals(ov) {
// !ok means t[i] has NULL value, and it is not cell modify if it was from drop column or add column
if (!ok && fromColLen == toColLen) || (ok && !v.Equals(ov)) {
changed++
}
}

View File

@@ -233,6 +233,11 @@ func MapSchemaBasedOnTagAndName(inSch, outSch Schema) ([]int, []int, error) {
keyMapping := make([]int, inSch.GetPKCols().Size())
valMapping := make([]int, inSch.GetNonPKCols().Size())
// if inSch or outSch is empty schema. This can be from added or dropped table.
if len(inSch.GetAllCols().cols) == 0 || len(outSch.GetAllCols().cols) == 0 {
return keyMapping, valMapping, nil
}
err := inSch.GetPKCols().Iter(func(tag uint64, col Column) (stop bool, err error) {
i := inSch.GetPKCols().TagToIdx[tag]
if col, ok := outSch.GetPKCols().GetByTag(tag); ok {

View File

@@ -0,0 +1,39 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster
import (
"github.com/dolthub/go-mysql-server/sql"
)
func newAssumeRoleProcedure(controller *Controller) sql.ExternalStoredProcedureDetails {
return sql.ExternalStoredProcedureDetails{
Name: "dolt_assume_cluster_role",
Schema: sql.Schema{
&sql.Column{
Name: "status",
Type: sql.Int64,
Nullable: false,
},
},
Function: func(ctx *sql.Context, role string, epoch int) (sql.RowIter, error) {
err := controller.setRoleAndEpoch(role, epoch)
if err != nil {
return nil, err
}
return sql.RowsToRowIter(sql.Row{0}), nil
},
}
}

View File

@@ -0,0 +1,31 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster
type Config interface {
StandbyRemotes() []StandbyRemoteConfig
BootstrapRole() string
BootstrapEpoch() int
RemotesAPIConfig() RemotesAPIConfig
}
type RemotesAPIConfig interface {
Port() int
}
type StandbyRemoteConfig interface {
Name() string
RemoteURLTemplate() string
}

View File

@@ -0,0 +1,170 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster
import (
"fmt"
"strconv"
"sync"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/utils/config"
)
type Role string
const RolePrimary Role = "primary"
const RoleStandby Role = "standby"
const PersistentConfigPrefix = "sqlserver.cluster"
type Controller struct {
cfg Config
persistentCfg config.ReadWriteConfig
role Role
epoch int
systemVars sqlvars
mu sync.Mutex
}
type sqlvars interface {
AddSystemVariables(sysVars []sql.SystemVariable)
}
type procedurestore interface {
Register(sql.ExternalStoredProcedureDetails)
}
const (
DoltClusterRoleVariable = "dolt_cluster_role"
DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch"
)
func NewController(cfg Config, pCfg config.ReadWriteConfig) (*Controller, error) {
if cfg == nil {
return nil, nil
}
pCfg = config.NewPrefixConfig(pCfg, PersistentConfigPrefix)
role, epoch, err := applyBootstrapClusterConfig(cfg, pCfg)
if err != nil {
return nil, err
}
return &Controller{
cfg: cfg,
persistentCfg: pCfg,
role: role,
epoch: epoch,
}, nil
}
func (c *Controller) ManageSystemVariables(variables sqlvars) {
c.mu.Lock()
defer c.mu.Unlock()
c.systemVars = variables
c.refreshSystemVars()
}
func (c *Controller) RegisterStoredProcedures(store procedurestore) {
store.Register(newAssumeRoleProcedure(c))
}
func (c *Controller) refreshSystemVars() {
role, epoch := string(c.role), c.epoch
vars := []sql.SystemVariable{
{
Name: DoltClusterRoleVariable,
Dynamic: false,
Scope: sql.SystemVariableScope_Persist,
Type: sql.NewSystemStringType(DoltClusterRoleVariable),
Default: role,
},
{
Name: DoltClusterRoleEpochVariable,
Dynamic: false,
Scope: sql.SystemVariableScope_Persist,
Type: sql.NewSystemIntType(DoltClusterRoleEpochVariable, 0, 9223372036854775807, false),
Default: epoch,
},
}
c.systemVars.AddSystemVariables(vars)
}
func (c *Controller) persistVariables() error {
toset := make(map[string]string)
toset[DoltClusterRoleVariable] = string(c.role)
toset[DoltClusterRoleEpochVariable] = strconv.Itoa(c.epoch)
return c.persistentCfg.SetStrings(toset)
}
func applyBootstrapClusterConfig(cfg Config, pCfg config.ReadWriteConfig) (Role, int, error) {
toset := make(map[string]string)
persistentRole := pCfg.GetStringOrDefault(DoltClusterRoleVariable, "")
persistentEpoch := pCfg.GetStringOrDefault(DoltClusterRoleEpochVariable, "")
if persistentRole == "" {
if cfg.BootstrapRole() != "" {
persistentRole = cfg.BootstrapRole()
} else {
persistentRole = "primary"
}
toset[DoltClusterRoleVariable] = persistentRole
}
if persistentEpoch == "" {
persistentEpoch = strconv.Itoa(cfg.BootstrapEpoch())
toset[DoltClusterRoleEpochVariable] = persistentEpoch
}
if persistentRole != string(RolePrimary) && persistentRole != string(RoleStandby) {
return "", 0, fmt.Errorf("persisted role %s.%s = %s must be \"primary\" or \"secondary\"", PersistentConfigPrefix, DoltClusterRoleVariable, persistentRole)
}
epochi, err := strconv.Atoi(persistentEpoch)
if err != nil {
return "", 0, fmt.Errorf("persisted role epoch %s.%s = %s must be an integer", PersistentConfigPrefix, DoltClusterRoleEpochVariable, persistentEpoch)
}
if len(toset) > 0 {
err := pCfg.SetStrings(toset)
if err != nil {
return "", 0, err
}
}
return Role(persistentRole), epochi, nil
}
func (c *Controller) setRoleAndEpoch(role string, epoch int) error {
c.mu.Lock()
defer c.mu.Unlock()
if epoch == c.epoch && role == string(c.role) {
return nil
}
if epoch == c.epoch {
return fmt.Errorf("error assuming role '%s' at epoch %d; already at epoch %d with different role, '%s'", role, epoch, c.epoch, c.role)
}
if epoch < c.epoch {
return fmt.Errorf("error assuming role '%s' at epoch %d; already at epoch %d", role, epoch, c.epoch)
}
if role == string(c.role) {
c.epoch = epoch
c.refreshSystemVars()
return c.persistVariables()
}
if role != "primary" && role != "standby" {
return fmt.Errorf("error assuming role '%s'; valid roles are 'primary' and 'standby'", role)
}
// TODO: Role is transitioning...lots of stuff to do.
c.role = Role(role)
c.epoch = epoch
c.refreshSystemVars()
return c.persistVariables()
}

View File

@@ -29,12 +29,10 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dprocedures"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/libraries/utils/earl"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
)
@@ -513,15 +511,8 @@ func (p DoltDatabaseProvider) cloneDatabaseFromRemote(
// TODO: this method only adds error handling. Remove?
func getRemoteDb(ctx *sql.Context, r env.Remote, dialer dbfactory.GRPCDialProvider) (*doltdb.DoltDB, error) {
ddb, err := r.GetRemoteDB(ctx, types.Format_Default, dialer)
if err != nil {
bdr := errhand.BuildDError("error: failed to get remote db").AddCause(err)
if err == remotestorage.ErrInvalidDoltSpecPath {
urlObj, _ := earl.Parse(r.Url)
bdr.AddDetails("'%s' should be in the format 'organization/repo'", urlObj.Path)
}
return nil, bdr.Build()
}
@@ -768,6 +759,10 @@ func (p DoltDatabaseProvider) Function(_ *sql.Context, name string) (sql.Functio
return fn, nil
}
func (p DoltDatabaseProvider) Register(d sql.ExternalStoredProcedureDetails) {
p.externalProcedures.Register(d)
}
// ExternalStoredProcedure implements the sql.ExternalStoredProcedureProvider interface
func (p DoltDatabaseProvider) ExternalStoredProcedure(_ *sql.Context, name string, numOfParams int) (*sql.ExternalStoredProcedureDetails, error) {
return p.externalProcedures.LookupByNameAndParamCount(name, numOfParams)
@@ -782,9 +777,13 @@ func (p DoltDatabaseProvider) ExternalStoredProcedures(_ *sql.Context, name stri
func (p DoltDatabaseProvider) TableFunction(_ *sql.Context, name string) (sql.TableFunction, error) {
// currently, only one table function is supported, if we extend this, we should clean this up
// and store table functions in a map, similar to regular functions.
if strings.ToLower(name) == "dolt_diff" {
switch strings.ToLower(name) {
case "dolt_diff":
dtf := &DiffTableFunction{}
return dtf, nil
case "dolt_diff_summary":
dtf := &DiffSummaryTableFunction{}
return dtf, nil
}
return nil, sql.ErrTableFunctionNotFound.New(name)
@@ -860,7 +859,7 @@ func switchAndFetchReplicaHead(ctx *sql.Context, branch string, db ReadReplicaDa
}
// create workingSets/heads/branch and update the working set
err = pullBranches(ctx, db, []string{branch}, currentBranchRef)
err = pullBranches(ctx, db, []string{branch}, currentBranchRef, pullBehavior_fastForward)
if err != nil {
return err
}

View File

@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/datas"
)
@@ -94,10 +93,7 @@ func DoDoltPush(ctx *sql.Context, args []string) (int, error) {
}
remoteDB, err := sess.Provider().GetRemoteDB(ctx, dbData.Ddb, opts.Remote, true)
if err != nil {
if err == remotestorage.ErrInvalidDoltSpecPath {
return 1, actions.HandleInvalidDoltSpecPathErr(opts.Remote.Name, opts.Remote.Url, err)
}
return 1, err
return 1, actions.HandleInitRemoteStorageClientErr(opts.Remote.Name, opts.Remote.Url, err)
}
tmpDir, err := dbData.Rsw.TempTableFilesDir()

View File

@@ -0,0 +1,509 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqle
import (
"fmt"
"io"
"math"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/atomicerr"
)
var _ sql.TableFunction = (*DiffSummaryTableFunction)(nil)
type DiffSummaryTableFunction struct {
ctx *sql.Context
fromCommitExpr sql.Expression
toCommitExpr sql.Expression
tableNameExpr sql.Expression
database sql.Database
}
var diffSummaryTableSchema = sql.Schema{
&sql.Column{Name: "table_name", Type: sql.LongText, Nullable: false},
&sql.Column{Name: "rows_unmodified", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "rows_added", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "rows_deleted", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "rows_modified", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "cells_added", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "cells_deleted", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "cells_modified", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "old_row_count", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "new_row_count", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "old_cell_count", Type: sql.Int64, Nullable: true},
&sql.Column{Name: "new_cell_count", Type: sql.Int64, Nullable: true},
}
// NewInstance creates a new instance of TableFunction interface
func (ds *DiffSummaryTableFunction) NewInstance(ctx *sql.Context, db sql.Database, expressions []sql.Expression) (sql.Node, error) {
newInstance := &DiffSummaryTableFunction{
ctx: ctx,
database: db,
}
node, err := newInstance.WithExpressions(expressions...)
if err != nil {
return nil, err
}
return node, nil
}
// Database implements the sql.Databaser interface
func (ds *DiffSummaryTableFunction) Database() sql.Database {
return ds.database
}
// WithDatabase implements the sql.Databaser interface
func (ds *DiffSummaryTableFunction) WithDatabase(database sql.Database) (sql.Node, error) {
ds.database = database
return ds, nil
}
// FunctionName implements the sql.TableFunction interface
func (ds *DiffSummaryTableFunction) FunctionName() string {
return "dolt_diff_summary"
}
// Resolved implements the sql.Resolvable interface
func (ds *DiffSummaryTableFunction) Resolved() bool {
if ds.tableNameExpr != nil {
return ds.fromCommitExpr.Resolved() && ds.toCommitExpr.Resolved() && ds.tableNameExpr.Resolved()
}
return ds.fromCommitExpr.Resolved() && ds.toCommitExpr.Resolved()
}
// String implements the Stringer interface
func (ds *DiffSummaryTableFunction) String() string {
if ds.tableNameExpr != nil {
return fmt.Sprintf("DOLT_DIFF_SUMMARY(%s, %s, %s)", ds.fromCommitExpr.String(), ds.toCommitExpr.String(), ds.tableNameExpr.String())
}
return fmt.Sprintf("DOLT_DIFF_SUMMARY(%s, %s)", ds.fromCommitExpr.String(), ds.toCommitExpr.String())
}
// Schema implements the sql.Node interface.
func (ds *DiffSummaryTableFunction) Schema() sql.Schema {
return diffSummaryTableSchema
}
// Children implements the sql.Node interface.
func (ds *DiffSummaryTableFunction) Children() []sql.Node {
return nil
}
// WithChildren implements the sql.Node interface.
func (ds *DiffSummaryTableFunction) WithChildren(children ...sql.Node) (sql.Node, error) {
if len(children) != 0 {
return nil, fmt.Errorf("unexpected children")
}
return ds, nil
}
// CheckPrivileges implements the interface sql.Node.
func (ds *DiffSummaryTableFunction) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool {
if ds.tableNameExpr != nil {
if !sql.IsText(ds.tableNameExpr.Type()) {
return false
}
tableNameVal, err := ds.tableNameExpr.Eval(ds.ctx, nil)
if err != nil {
return false
}
tableName, ok := tableNameVal.(string)
if !ok {
return false
}
// TODO: Add tests for privilege checking
return opChecker.UserHasPrivileges(ctx,
sql.NewPrivilegedOperation(ds.database.Name(), tableName, "", sql.PrivilegeType_Select))
}
tblNames, err := ds.database.GetTableNames(ctx)
if err != nil {
return false
}
var operations []sql.PrivilegedOperation
for _, tblName := range tblNames {
operations = append(operations, sql.NewPrivilegedOperation(ds.database.Name(), tblName, "", sql.PrivilegeType_Select))
}
return opChecker.UserHasPrivileges(ctx, operations...)
}
// Expressions implements the sql.Expressioner interface.
func (ds *DiffSummaryTableFunction) Expressions() []sql.Expression {
exprs := []sql.Expression{ds.fromCommitExpr, ds.toCommitExpr}
if ds.tableNameExpr != nil {
exprs = append(exprs, ds.tableNameExpr)
}
return exprs
}
// WithExpressions implements the sql.Expressioner interface.
func (ds *DiffSummaryTableFunction) WithExpressions(expression ...sql.Expression) (sql.Node, error) {
if len(expression) < 2 || len(expression) > 3 {
return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "2 or 3", len(expression))
}
for _, expr := range expression {
if !expr.Resolved() {
return nil, ErrInvalidNonLiteralArgument.New(ds.FunctionName(), expr.String())
}
}
ds.fromCommitExpr = expression[0]
ds.toCommitExpr = expression[1]
if len(expression) == 3 {
ds.tableNameExpr = expression[2]
}
// validate the expressions
if !sql.IsText(ds.fromCommitExpr.Type()) {
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.fromCommitExpr.String())
}
if !sql.IsText(ds.toCommitExpr.Type()) {
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.toCommitExpr.String())
}
if ds.tableNameExpr != nil {
if !sql.IsText(ds.tableNameExpr.Type()) {
return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.tableNameExpr.String())
}
}
return ds, nil
}
// RowIter implements the sql.Node interface
func (ds *DiffSummaryTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.RowIter, error) {
fromCommitVal, toCommitVal, tableName, err := ds.evaluateArguments()
if err != nil {
return nil, err
}
sqledb, ok := ds.database.(Database)
if !ok {
return nil, fmt.Errorf("unexpected database type: %T", ds.database)
}
sess := dsess.DSessFromSess(ctx.Session)
fromRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), fromCommitVal)
if err != nil {
return nil, err
}
toRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), toCommitVal)
if err != nil {
return nil, err
}
deltas, err := diff.GetTableDeltas(ctx, fromRoot, toRoot)
if err != nil {
return nil, err
}
// If tableNameExpr defined, return a single table diff summary result
if ds.tableNameExpr != nil {
delta := findMatchingDelta(deltas, tableName)
diffSum, hasDiff, err := getDiffSummaryNodeFromDelta(ctx, delta, fromRoot, toRoot, tableName)
if err != nil {
return nil, err
}
if !hasDiff {
return NewDiffSummaryTableFunctionRowIter([]diffSummaryNode{}), nil
}
return NewDiffSummaryTableFunctionRowIter([]diffSummaryNode{diffSum}), nil
}
var diffSummaries []diffSummaryNode
for _, delta := range deltas {
tblName := delta.ToName
if tblName == "" {
tblName = delta.FromName
}
diffSum, hasDiff, err := getDiffSummaryNodeFromDelta(ctx, delta, fromRoot, toRoot, tblName)
if err != nil {
return nil, err
}
if hasDiff {
diffSummaries = append(diffSummaries, diffSum)
}
}
return NewDiffSummaryTableFunctionRowIter(diffSummaries), nil
}
// evaluateArguments returns fromCommitValStr, toCommitValStr and tableName.
// It evaluates the argument expressions to turn them into values this DiffTableFunction
// can use. Note that this method only evals the expressions, and doesn't validate the values.
func (ds *DiffSummaryTableFunction) evaluateArguments() (string, string, string, error) {
var tableName string
if ds.tableNameExpr != nil {
tableNameVal, err := ds.tableNameExpr.Eval(ds.ctx, nil)
if err != nil {
return "", "", "", err
}
tn, ok := tableNameVal.(string)
if !ok {
return "", "", "", ErrInvalidTableName.New(ds.tableNameExpr.String())
}
tableName = tn
}
fromCommitVal, err := ds.fromCommitExpr.Eval(ds.ctx, nil)
if err != nil {
return "", "", "", err
}
fromCommitValStr, ok := fromCommitVal.(string)
if !ok {
return "", "", "", fmt.Errorf("received '%v' when expecting commit hash string", fromCommitVal)
}
toCommitVal, err := ds.toCommitExpr.Eval(ds.ctx, nil)
if err != nil {
return "", "", "", err
}
toCommitValStr, ok := toCommitVal.(string)
if !ok {
return "", "", "", fmt.Errorf("received '%v' when expecting commit hash string", toCommitVal)
}
return fromCommitValStr, toCommitValStr, tableName, nil
}
// getDiffSummaryNodeFromDelta returns diffSummaryNode object and whether there is data diff or not. It gets tables
// from roots and diff summary if there is a valid table exists in both fromRoot and toRoot.
func getDiffSummaryNodeFromDelta(ctx *sql.Context, delta diff.TableDelta, fromRoot, toRoot *doltdb.RootValue, tableName string) (diffSummaryNode, bool, error) {
var oldColLen int
var newColLen int
fromTable, _, fromTableExists, err := fromRoot.GetTableInsensitive(ctx, tableName)
if err != nil {
return diffSummaryNode{}, false, err
}
if fromTableExists {
fromSch, err := fromTable.GetSchema(ctx)
if err != nil {
return diffSummaryNode{}, false, err
}
oldColLen = len(fromSch.GetAllCols().GetColumns())
}
toTable, _, toTableExists, err := toRoot.GetTableInsensitive(ctx, tableName)
if err != nil {
return diffSummaryNode{}, false, err
}
if toTableExists {
toSch, err := toTable.GetSchema(ctx)
if err != nil {
return diffSummaryNode{}, false, err
}
newColLen = len(toSch.GetAllCols().GetColumns())
}
if !fromTableExists && !toTableExists {
return diffSummaryNode{}, false, sql.ErrTableNotFound.New(tableName)
}
// no diff from tableDelta
if delta.FromTable == nil && delta.ToTable == nil {
return diffSummaryNode{}, false, nil
}
diffSum, hasDiff, keyless, err := getDiffSummary(ctx, delta)
if err != nil {
return diffSummaryNode{}, false, err
}
return diffSummaryNode{tableName, diffSum, oldColLen, newColLen, keyless}, hasDiff, nil
}
// getDiffSummary returns diff.DiffSummaryProgress object and whether there is a data diff or not.
func getDiffSummary(ctx *sql.Context, td diff.TableDelta) (diff.DiffSummaryProgress, bool, bool, error) {
// got this method from diff_output.go
// todo: use errgroup.Group
ae := atomicerr.New()
ch := make(chan diff.DiffSummaryProgress)
go func() {
defer close(ch)
err := diff.SummaryForTableDelta(ctx, ch, td)
ae.SetIfError(err)
}()
acc := diff.DiffSummaryProgress{}
var count int64
for p := range ch {
if ae.IsSet() {
break
}
acc.Adds += p.Adds
acc.Removes += p.Removes
acc.Changes += p.Changes
acc.CellChanges += p.CellChanges
acc.NewRowSize += p.NewRowSize
acc.OldRowSize += p.OldRowSize
acc.NewCellSize += p.NewCellSize
acc.OldCellSize += p.OldCellSize
count++
}
if err := ae.Get(); err != nil {
return diff.DiffSummaryProgress{}, false, false, err
}
keyless, err := td.IsKeyless(ctx)
if err != nil {
return diff.DiffSummaryProgress{}, false, keyless, err
}
if (acc.Adds+acc.Removes+acc.Changes) == 0 && (acc.OldCellSize-acc.NewCellSize) == 0 {
return diff.DiffSummaryProgress{}, false, keyless, nil
}
return acc, true, keyless, nil
}
//------------------------------------
// diffSummaryTableFunctionRowIter
//------------------------------------
var _ sql.RowIter = &diffSummaryTableFunctionRowIter{}
type diffSummaryTableFunctionRowIter struct {
diffSums []diffSummaryNode
diffIdx int
}
func (d *diffSummaryTableFunctionRowIter) incrementIndexes() {
d.diffIdx++
if d.diffIdx >= len(d.diffSums) {
d.diffIdx = 0
d.diffSums = nil
}
}
type diffSummaryNode struct {
tblName string
diffSummary diff.DiffSummaryProgress
oldColLen int
newColLen int
keyless bool
}
func NewDiffSummaryTableFunctionRowIter(ds []diffSummaryNode) sql.RowIter {
return &diffSummaryTableFunctionRowIter{
diffSums: ds,
}
}
func (d *diffSummaryTableFunctionRowIter) Next(ctx *sql.Context) (sql.Row, error) {
defer d.incrementIndexes()
if d.diffIdx >= len(d.diffSums) {
return nil, io.EOF
}
if d.diffSums == nil {
return nil, io.EOF
}
ds := d.diffSums[d.diffIdx]
return getRowFromDiffSummary(ds.tblName, ds.diffSummary, ds.newColLen, ds.oldColLen, ds.keyless), nil
}
func (d *diffSummaryTableFunctionRowIter) Close(context *sql.Context) error {
return nil
}
// getRowFromDiffSummary takes diff.DiffSummaryProgress and calculates the row_modified, cell_added, cell_deleted.
// If the number of cell change from old to new cell count does not equal to cell_added and/or cell_deleted, there
// must be schema changes that affects cell_added and cell_deleted value addition to the row count * col length number.
func getRowFromDiffSummary(tblName string, dsp diff.DiffSummaryProgress, newColLen, oldColLen int, keyless bool) sql.Row {
// if table is keyless table, match current CLI command result
if keyless {
return sql.Row{
tblName, // table_name
nil, // rows_unmodified
int64(dsp.Adds), // rows_added
int64(dsp.Removes), // rows_deleted
nil, // rows_modified
nil, // cells_added
nil, // cells_deleted
nil, // cells_modified
nil, // old_row_count
nil, // new_row_count
nil, // old_cell_count
nil, // new_cell_count
}
}
numCellInserts, numCellDeletes := GetCellsAddedAndDeleted(dsp, newColLen)
rowsUnmodified := dsp.OldRowSize - dsp.Changes - dsp.Removes
return sql.Row{
tblName, // table_name
int64(rowsUnmodified), // rows_unmodified
int64(dsp.Adds), // rows_added
int64(dsp.Removes), // rows_deleted
int64(dsp.Changes), // rows_modified
int64(numCellInserts), // cells_added
int64(numCellDeletes), // cells_deleted
int64(dsp.CellChanges), // cells_modified
int64(dsp.OldRowSize), // old_row_count
int64(dsp.NewRowSize), // new_row_count
int64(dsp.OldCellSize), // old_cell_count
int64(dsp.NewCellSize), // new_cell_count
}
}
// GetCellsAddedAndDeleted calculates cells added and deleted given diff.DiffSummaryProgress and toCommit table
// column length. We use rows added and deleted to calculate cells added and deleted, but it does not include
// cells added and deleted from schema changes. Here we fill those in using total number of cells in each commit table.
func GetCellsAddedAndDeleted(acc diff.DiffSummaryProgress, newColLen int) (uint64, uint64) {
var numCellInserts, numCellDeletes float64
rowToCellInserts := float64(acc.Adds) * float64(newColLen)
rowToCellDeletes := float64(acc.Removes) * float64(newColLen)
cellDiff := float64(acc.NewCellSize) - float64(acc.OldCellSize)
if cellDiff > 0 {
numCellInserts = cellDiff + rowToCellDeletes
numCellDeletes = rowToCellDeletes
} else if cellDiff < 0 {
numCellInserts = rowToCellInserts
numCellDeletes = math.Abs(cellDiff) + rowToCellInserts
} else {
if rowToCellInserts != rowToCellDeletes {
numCellDeletes = math.Max(rowToCellDeletes, rowToCellInserts)
numCellInserts = math.Max(rowToCellDeletes, rowToCellInserts)
} else {
numCellDeletes = rowToCellDeletes
numCellInserts = rowToCellInserts
}
}
return uint64(numCellInserts), uint64(numCellDeletes)
}

View File

@@ -50,7 +50,7 @@ type DiffTableFunction struct {
toDate *types.Timestamp
}
// NewInstance implements the TableFunction interface
// NewInstance creates a new instance of TableFunction interface
func (dtf *DiffTableFunction) NewInstance(ctx *sql.Context, database sql.Database, expressions []sql.Expression) (sql.Node, error) {
newInstance := &DiffTableFunction{
ctx: ctx,
@@ -192,7 +192,7 @@ func loadDetailsForRef(
// WithChildren implements the sql.Node interface
func (dtf *DiffTableFunction) WithChildren(node ...sql.Node) (sql.Node, error) {
if len(node) != 0 {
panic("unexpected children")
return nil, fmt.Errorf("unexpected children")
}
return dtf, nil
}
@@ -257,7 +257,7 @@ func (dtf *DiffTableFunction) generateSchema(ctx *sql.Context, tableName string,
sqledb, ok := dtf.database.(Database)
if !ok {
panic(fmt.Sprintf("unexpected database type: %T", dtf.database))
return fmt.Errorf("unexpected database type: %T", dtf.database)
}
delta, err := dtf.cacheTableDelta(ctx, tableName, fromCommitVal, toCommitVal, sqledb)

View File

@@ -414,7 +414,7 @@ func (d *DoltSession) NewPendingCommit(ctx *sql.Context, dbName string, roots do
mergeParentCommits = []*doltdb.Commit{sessionState.WorkingSet.MergeState().Commit()}
}
pendingCommit, err := actions.GetCommitStaged(ctx, roots, sessionState.WorkingSet.MergeActive(), mergeParentCommits, sessionState.dbData, props)
pendingCommit, err := actions.GetCommitStaged(ctx, roots, sessionState.WorkingSet.MergeActive(), mergeParentCommits, sessionState.dbData.Ddb, props)
if _, ok := err.(actions.NothingStaged); err != nil && !ok {
return nil, err
}

View File

@@ -141,7 +141,7 @@ func doltCommit(ctx *sql.Context,
tx *DoltTransaction,
commit *doltdb.PendingCommit,
workingSet *doltdb.WorkingSet,
hash hash.Hash,
currHash hash.Hash,
) (*doltdb.WorkingSet, *doltdb.Commit, error) {
headRef, err := workingSet.Ref().ToHeadRef()
if err != nil {
@@ -149,7 +149,7 @@ func doltCommit(ctx *sql.Context,
}
workingSet = workingSet.ClearMerge()
newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, commit, workingSet, hash, tx.getWorkingSetMeta(ctx))
newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, commit, workingSet, currHash, tx.getWorkingSetMeta(ctx))
return workingSet, newCommit, err
}

View File

@@ -39,6 +39,7 @@ const (
AllowCommitConflicts = "dolt_allow_commit_conflicts"
ReplicateToRemote = "dolt_replicate_to_remote"
ReadReplicaRemote = "dolt_read_replica_remote"
ReadReplicaForcePull = "dolt_read_replica_force_pull"
ReplicationRemoteURLTemplate = "dolt_replication_remote_url_template"
SkipReplicationErrors = "dolt_skip_replication_errors"
ReplicateHeads = "dolt_replicate_heads"

View File

@@ -46,7 +46,7 @@ var skipPrepared bool
// SkipPreparedsCount is used by the "ci-check-repo CI workflow
// as a reminder to consider prepareds when adding a new
// enginetest suite.
const SkipPreparedsCount = 80
const SkipPreparedsCount = 79
const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS"
@@ -1134,6 +1134,28 @@ func TestDiffTableFunctionPrepared(t *testing.T) {
}
}
func TestDiffSummaryTableFunction(t *testing.T) {
harness := newDoltHarness(t)
harness.Setup(setup.MydbData)
for _, test := range DiffSummaryTableFunctionScriptTests {
harness.engine = nil
t.Run(test.Name, func(t *testing.T) {
enginetest.TestScript(t, harness, test)
})
}
}
func TestDiffSummaryTableFunctionPrepared(t *testing.T) {
harness := newDoltHarness(t)
harness.Setup(setup.MydbData)
for _, test := range DiffSummaryTableFunctionScriptTests {
harness.engine = nil
t.Run(test.Name, func(t *testing.T) {
enginetest.TestScriptPrepared(t, harness, test)
})
}
}
func TestCommitDiffSystemTable(t *testing.T) {
harness := newDoltHarness(t)
harness.Setup(setup.MydbData)
@@ -1230,6 +1252,13 @@ func TestDoltCommit(t *testing.T) {
}
}
func TestDoltCommitPrepared(t *testing.T) {
harness := newDoltHarness(t)
for _, script := range DoltCommitTests {
enginetest.TestScriptPrepared(t, harness, script)
}
}
func TestQueriesPrepared(t *testing.T) {
enginetest.TestQueriesPrepared(t, newDoltHarness(t))
}

View File

@@ -734,6 +734,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT * FROM dolt_diff('test', 'main~', 'main');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// Without access to the database, dolt_diff_summary should fail with a database access error
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_summary('main~', 'main', 'test');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// Grant single-table access to the underlying user table
User: "root",
@@ -755,6 +762,20 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT * FROM dolt_diff('test2', 'main~', 'main');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// With access to the db, but not the table, dolt_diff_summary should fail
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_summary('main~', 'main', 'test2');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// With access to the db, dolt_diff_summary should fail for all tables if no access any of tables
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_summary('main~', 'main');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// Revoke select on mydb.test
User: "root",
@@ -783,6 +804,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT COUNT(*) FROM dolt_diff('test', 'main~', 'main');",
Expected: []sql.Row{{1}},
},
{
// After granting access to the entire db, dolt_diff_summary should work
User: "tester",
Host: "localhost",
Query: "SELECT COUNT(*) FROM dolt_diff_summary('main~', 'main');",
Expected: []sql.Row{{1}},
},
{
// Revoke multi-table access
User: "root",
@@ -797,6 +825,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT * FROM dolt_diff('test', 'main~', 'main');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// After revoking access, dolt_diff_summary should fail
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_summary('main~', 'main', 'test');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// Grant global access to *.*
User: "root",
@@ -4851,6 +4886,524 @@ var DiffTableFunctionScriptTests = []queries.ScriptTest{
},
}
var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
{
Name: "invalid arguments",
SetUpScript: []string{
"create table t (pk int primary key, c1 varchar(20), c2 varchar(20));",
"call dolt_add('.')",
"set @Commit1 = dolt_commit('-am', 'creating table t');",
"insert into t values(1, 'one', 'two'), (2, 'two', 'three');",
"set @Commit2 = dolt_commit('-am', 'inserting into t');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary('t');",
ExpectedErr: sql.ErrInvalidArgumentNumber,
},
{
Query: "SELECT * from dolt_diff_summary('t', @Commit1, @Commit2, 'extra');",
ExpectedErr: sql.ErrInvalidArgumentNumber,
},
{
Query: "SELECT * from dolt_diff_summary(null, null, null);",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_diff_summary(123, @Commit1, @Commit2);",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_diff_summary('t', 123, @Commit2);",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_diff_summary('t', @Commit1, 123);",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_diff_summary('fake-branch', @Commit2, 't');",
ExpectedErrStr: "branch not found: fake-branch",
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, 'fake-branch', 't');",
ExpectedErrStr: "branch not found: fake-branch",
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 'doesnotexist');",
ExpectedErr: sql.ErrTableNotFound,
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, concat('fake', '-', 'branch'), 't');",
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
},
{
Query: "SELECT * from dolt_diff_summary(hashof('main'), @Commit2, 't');",
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, LOWER('T'));",
ExpectedErr: sqle.ErrInvalidNonLiteralArgument,
},
},
},
{
Name: "basic case with single table",
SetUpScript: []string{
"set @Commit0 = HashOf('HEAD');",
"set @Commit1 = dolt_commit('--allow-empty', '-m', 'creating table t');",
// create table t only
"create table t (pk int primary key, c1 varchar(20), c2 varchar(20));",
"call dolt_add('.')",
"set @Commit2 = dolt_commit('-am', 'creating table t');",
// insert 1 row into t
"insert into t values(1, 'one', 'two');",
"set @Commit3 = dolt_commit('-am', 'inserting 1 into table t');",
// insert 2 rows into t and update two cells
"insert into t values(2, 'two', 'three'), (3, 'three', 'four');",
"update t set c1='uno', c2='dos' where pk=1;",
"set @Commit4 = dolt_commit('-am', 'inserting 2 into table t');",
// drop table t only
"drop table t;",
"set @Commit5 = dolt_commit('-am', 'drop table t');",
},
Assertions: []queries.ScriptTestAssertion{
{
// table is added, no data diff, result is empty
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{{"t", 0, 1, 0, 0, 3, 0, 0, 0, 1, 0, 3}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
Expected: []sql.Row{{"t", 0, 2, 0, 1, 6, 0, 2, 1, 3, 3, 9}},
},
{
// change from and to commits
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit3, 't');",
Expected: []sql.Row{{"t", 0, 0, 2, 1, 0, 6, 2, 3, 1, 9, 3}},
},
{
// table is dropped
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit5, 't');",
Expected: []sql.Row{{"t", 0, 0, 3, 0, 0, 9, 0, 3, 0, 9, 0}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit4, 't');",
Expected: []sql.Row{{"t", 0, 3, 0, 0, 9, 0, 0, 0, 3, 0, 9}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
ExpectedErr: sql.ErrTableNotFound,
},
},
},
{
Name: "basic case with single keyless table",
SetUpScript: []string{
"set @Commit0 = HashOf('HEAD');",
"set @Commit1 = dolt_commit('--allow-empty', '-m', 'creating table t');",
// create table t only
"create table t (id int, c1 varchar(20), c2 varchar(20));",
"call dolt_add('.')",
"set @Commit2 = dolt_commit('-am', 'creating table t');",
// insert 1 row into t
"insert into t values(1, 'one', 'two');",
"set @Commit3 = dolt_commit('-am', 'inserting 1 into table t');",
// insert 2 rows into t and update two cells
"insert into t values(2, 'two', 'three'), (3, 'three', 'four');",
"update t set c1='uno', c2='dos' where id=1;",
"set @Commit4 = dolt_commit('-am', 'inserting 2 into table t');",
// drop table t only
"drop table t;",
"set @Commit5 = dolt_commit('-am', 'drop table t');",
},
Assertions: []queries.ScriptTestAssertion{
{
// table is added, no data diff, result is empty
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{{"t", nil, 1, 0, nil, nil, nil, nil, nil, nil, nil, nil}},
},
{
// TODO : (correct result is commented out)
// update row for keyless table deletes the row and insert the new row
// this causes row added = 3 and row deleted = 1
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
//Expected: []sql.Row{{"t", nil, 2, 0, nil, nil, nil, nil, nil, nil, nil, nil}},
Expected: []sql.Row{{"t", nil, 3, 1, nil, nil, nil, nil, nil, nil, nil, nil}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit3, 't');",
//Expected: []sql.Row{{"t", nil, 0, 2, nil, nil, nil, nil, nil, nil, nil, nil}},
Expected: []sql.Row{{"t", nil, 1, 3, nil, nil, nil, nil, nil, nil, nil, nil}},
},
{
// table is dropped
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit5, 't');",
Expected: []sql.Row{{"t", nil, 0, 3, nil, nil, nil, nil, nil, nil, nil, nil}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit4, 't');",
Expected: []sql.Row{{"t", nil, 3, 0, nil, nil, nil, nil, nil, nil, nil, nil}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
ExpectedErr: sql.ErrTableNotFound,
},
},
},
{
Name: "basic case with multiple tables",
SetUpScript: []string{
"set @Commit0 = HashOf('HEAD');",
// add table t with 1 row
"create table t (pk int primary key, c1 varchar(20), c2 varchar(20));",
"insert into t values(1, 'one', 'two');",
"call dolt_add('.')",
"set @Commit1 = dolt_commit('-am', 'inserting into table t');",
// add table t2 with 1 row
"create table t2 (pk int primary key, c1 varchar(20), c2 varchar(20));",
"insert into t2 values(100, 'hundred', 'hundert');",
"call dolt_add('.')",
"set @Commit2 = dolt_commit('-am', 'inserting into table t2');",
// changes on both tables
"insert into t values(2, 'two', 'three'), (3, 'three', 'four'), (4, 'four', 'five');",
"update t set c1='uno', c2='dos' where pk=1;",
"insert into t2 values(101, 'hundred one', 'one');",
"set @Commit3 = dolt_commit('-am', 'inserting into table t');",
// changes on both tables
"delete from t where c2 = 'four';",
"update t2 set c2='zero' where pk=100;",
"set @Commit4 = dolt_commit('-am', 'inserting into table t');",
// create keyless table
"create table keyless (id int);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit0, @Commit1);",
Expected: []sql.Row{{"t", 0, 1, 0, 0, 3, 0, 0, 0, 1, 0, 3}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2);",
Expected: []sql.Row{{"t2", 0, 1, 0, 0, 3, 0, 0, 0, 1, 0, 3}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3);",
Expected: []sql.Row{{"t", 0, 3, 0, 1, 9, 0, 2, 1, 4, 3, 12}, {"t2", 1, 1, 0, 0, 3, 0, 0, 1, 2, 3, 6}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4);",
Expected: []sql.Row{{"t", 3, 0, 1, 0, 0, 3, 0, 4, 3, 12, 9}, {"t2", 1, 0, 0, 1, 0, 0, 1, 2, 2, 6, 6}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit2);",
Expected: []sql.Row{{"t", 0, 0, 2, 1, 0, 6, 2, 3, 1, 9, 3}, {"t2", 0, 0, 1, 1, 0, 3, 1, 2, 1, 6, 3}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, 'WORKING');",
Expected: []sql.Row{{"t", 3, 0, 1, 0, 0, 3, 0, 4, 3, 12, 9}, {"t2", 1, 0, 0, 1, 0, 0, 1, 2, 2, 6, 6}},
},
},
},
{
Name: "WORKING and STAGED",
SetUpScript: []string{
"set @Commit0 = HashOf('HEAD');",
"create table t (pk int primary key, c1 text, c2 text);",
"call dolt_add('.')",
"insert into t values (1, 'one', 'two'), (2, 'three', 'four');",
"set @Commit1 = dolt_commit('-am', 'inserting two rows into table t');",
"insert into t values (3, 'five', 'six');",
"delete from t where pk = 2",
"update t set c2 = '100' where pk = 1",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit1, 'WORKING', 't')",
Expected: []sql.Row{{"t", 0, 1, 1, 1, 3, 3, 1, 2, 2, 6, 6}},
},
{
Query: "SELECT * from dolt_diff_summary('STAGED', 'WORKING', 't')",
Expected: []sql.Row{{"t", 0, 1, 1, 1, 3, 3, 1, 2, 2, 6, 6}},
},
{
Query: "SELECT * from dolt_diff_summary('WORKING', 'STAGED', 't')",
Expected: []sql.Row{{"t", 0, 1, 1, 1, 3, 3, 1, 2, 2, 6, 6}},
},
{
Query: "SELECT * from dolt_diff_summary('WORKING', 'WORKING', 't')",
Expected: []sql.Row{},
},
{
Query: "SELECT * from dolt_diff_summary('STAGED', 'STAGED', 't')",
Expected: []sql.Row{},
},
{
Query: "call dolt_add('.')",
SkipResultsCheck: true,
},
{
Query: "SELECT * from dolt_diff_summary('WORKING', 'STAGED', 't')",
Expected: []sql.Row{},
},
{
Query: "SELECT * from dolt_diff_summary('HEAD', 'STAGED', 't')",
Expected: []sql.Row{{"t", 0, 1, 1, 1, 3, 3, 1, 2, 2, 6, 6}},
},
},
},
{
Name: "diff with branch refs",
SetUpScript: []string{
"create table t (pk int primary key, c1 varchar(20), c2 varchar(20));",
"call dolt_add('.')",
"set @Commit1 = dolt_commit('-am', 'creating table t');",
"insert into t values(1, 'one', 'two');",
"set @Commit2 = dolt_commit('-am', 'inserting row 1 into t in main');",
"select dolt_checkout('-b', 'branch1');",
"alter table t drop column c2;",
"set @Commit3 = dolt_commit('-am', 'dropping column c2 in branch1');",
"delete from t where pk=1;",
"set @Commit4 = dolt_commit('-am', 'deleting row 1 in branch1');",
"insert into t values (2, 'two');",
"set @Commit5 = dolt_commit('-am', 'inserting row 2 in branch1');",
"select dolt_checkout('main');",
"insert into t values (2, 'two', 'three');",
"set @Commit6 = dolt_commit('-am', 'inserting row 2 in main');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary('main', 'branch1', 't');",
Expected: []sql.Row{{"t", 0, 0, 1, 1, 0, 4, 0, 2, 1, 6, 2}},
},
{
Query: "SELECT * from dolt_diff_summary('branch1', 'main', 't');",
Expected: []sql.Row{{"t", 0, 1, 0, 1, 4, 0, 1, 1, 2, 2, 6}},
},
{
Query: "SELECT * from dolt_diff_summary('main~', 'branch1', 't');",
Expected: []sql.Row{{"t", 0, 1, 1, 0, 2, 3, 0, 1, 1, 3, 2}},
},
},
},
{
Name: "schema modification: drop and add column",
SetUpScript: []string{
"create table t (pk int primary key, c1 varchar(20), c2 varchar(20));",
"call dolt_add('.');",
"insert into t values (1, 'one', 'two'), (2, 'two', 'three');",
"set @Commit1 = dolt_commit('-am', 'inserting row 1, 2 into t');",
// drop 1 column and add 1 row
"alter table t drop column c2;",
"set @Commit2 = dolt_commit('-am', 'dropping column c2');",
// drop 1 column and add 1 row
"insert into t values (3, 'three');",
"set @Commit3 = dolt_commit('-am', 'inserting row 3');",
// add 1 column and 1 row and update
"alter table t add column c2 varchar(20);",
"insert into t values (4, 'four', 'five');",
"update t set c2='foo' where pk=1;",
"set @Commit4 = dolt_commit('-am', 'adding column c2, inserting, and updating data');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{{"t", 0, 0, 0, 2, 0, 2, 0, 2, 2, 6, 4}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{{"t", 2, 1, 0, 0, 2, 0, 0, 2, 3, 4, 6}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit3, 't');",
Expected: []sql.Row{{"t", 0, 1, 0, 2, 2, 2, 0, 2, 3, 6, 6}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
Expected: []sql.Row{{"t", 2, 1, 0, 1, 6, 0, 1, 3, 4, 6, 12}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit4, 't');",
Expected: []sql.Row{{"t", 0, 2, 0, 2, 6, 0, 2, 2, 4, 6, 12}},
},
},
},
{
Name: "schema modification: rename columns",
SetUpScript: []string{
"create table t (pk int primary key, c1 varchar(20), c2 int);",
"call dolt_add('.')",
"set @Commit1 = dolt_commit('-am', 'creating table t');",
"insert into t values(1, 'one', -1), (2, 'two', -2);",
"set @Commit2 = dolt_commit('-am', 'inserting into t');",
"alter table t rename column c2 to c3;",
"set @Commit3 = dolt_commit('-am', 'renaming column c2 to c3');",
"insert into t values (3, 'three', -3);",
"update t set c3=1 where pk=1;",
"set @Commit4 = dolt_commit('-am', 'inserting and updating data');",
"alter table t rename column c3 to c2;",
"insert into t values (4, 'four', -4);",
"set @Commit5 = dolt_commit('-am', 'renaming column c3 to c2, and inserting data');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{{"t", 0, 2, 0, 0, 6, 0, 0, 0, 2, 0, 6}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
Expected: []sql.Row{{"t", 1, 1, 0, 1, 3, 0, 1, 2, 3, 6, 9}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit5, 't');",
Expected: []sql.Row{{"t", 3, 1, 0, 0, 3, 0, 0, 3, 4, 9, 12}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
Expected: []sql.Row{{"t", 0, 4, 0, 0, 12, 0, 0, 0, 4, 0, 12}},
},
},
},
{
Name: "new table",
SetUpScript: []string{
"create table t1 (a int primary key, b int)",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "select * from dolt_diff_summary('HEAD', 'WORKING')",
Expected: []sql.Row{},
},
{
Query: "select * from dolt_diff_summary('WORKING', 'HEAD')",
Expected: []sql.Row{},
},
{
Query: "insert into t1 values (1,2)",
SkipResultsCheck: true,
},
{
Query: "select * from dolt_diff_summary('HEAD', 'WORKING', 't1')",
Expected: []sql.Row{{"t1", 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 2}},
},
{
Query: "select * from dolt_diff_summary('WORKING', 'HEAD', 't1')",
Expected: []sql.Row{{"t1", 0, 0, 1, 0, 0, 2, 0, 1, 0, 2, 0}},
},
},
},
{
Name: "dropped table",
SetUpScript: []string{
"create table t1 (a int primary key, b int)",
"call dolt_add('.')",
"insert into t1 values (1,2)",
"call dolt_commit('-am', 'new table')",
"drop table t1",
"call dolt_commit('-am', 'dropped table')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't1')",
Expected: []sql.Row{{"t1", 0, 0, 1, 0, 0, 2, 0, 1, 0, 2, 0}},
},
{
Query: "select * from dolt_diff_summary('HEAD', 'HEAD~', 't1')",
Expected: []sql.Row{{"t1", 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 2}},
},
},
},
{
Name: "renamed table",
SetUpScript: []string{
"create table t1 (a int primary key, b int)",
"call dolt_add('.')",
"insert into t1 values (1,2)",
"call dolt_commit('-am', 'new table')",
"alter table t1 rename to t2",
"call dolt_add('.')",
"insert into t2 values (3,4)",
"call dolt_commit('-am', 'renamed table')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't2')",
Expected: []sql.Row{{"t2", 1, 1, 0, 0, 2, 0, 0, 1, 2, 2, 4}},
},
{
// Old table name can be matched as well
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't1')",
Expected: []sql.Row{{"t1", 1, 1, 0, 0, 2, 0, 0, 1, 2, 2, 4}},
},
},
},
{
Name: "add multiple columns, then set and unset a value. Should not show a diff",
SetUpScript: []string{
"CREATE table t (pk int primary key);",
"Insert into t values (1);",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-am', 'setup');",
"alter table t add column col1 int;",
"alter table t add column col2 int;",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-am', 'add columns');",
"UPDATE t set col1 = 1 where pk = 1;",
"UPDATE t set col1 = null where pk = 1;",
"CALL DOLT_COMMIT('--allow-empty', '-am', 'fix short tuple');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary('HEAD~2', 'HEAD');",
Expected: []sql.Row{{"t", 1, 0, 0, 0, 2, 0, 0, 1, 1, 1, 3}},
},
{
Query: "SELECT * from dolt_diff_summary('HEAD~', 'HEAD');",
Expected: []sql.Row{},
},
},
},
}
var LargeJsonObjectScriptTests = []queries.ScriptTest{
{
Name: "JSON under max length limit",
@@ -6313,4 +6866,21 @@ var DoltCommitTests = []queries.ScriptTest{
},
},
},
{
Name: "dolt commit works with arguments",
SetUpScript: []string{
"CREATE table t (pk int primary key);",
"INSERT INTO t VALUES (1);",
"CALL DOLT_ADD('t');",
"CALL DOLT_COMMIT('-m', concat('author: ','somebody'));",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT message from dolt_log where message = 'author: somebody'",
Expected: []sql.Row{
{"author: somebody"},
},
},
},
},
}

View File

@@ -113,6 +113,11 @@ func (rrd ReadReplicaDatabase) PullFromRemote(ctx *sql.Context) error {
return sql.ErrUnknownSystemVariable.New(dsess.ReplicateAllHeads)
}
behavior := pullBehavior_fastForward
if ReadReplicaForcePull() {
behavior = pullBehavior_forcePull
}
dSess := dsess.DSessFromSess(ctx.Session)
currentBranchRef, err := dSess.CWBHeadRef(ctx, rrd.name)
if err != nil {
@@ -131,7 +136,7 @@ func (rrd ReadReplicaDatabase) PullFromRemote(ctx *sql.Context) error {
if err != nil {
return err
}
err = pullBranches(ctx, rrd, branches, currentBranchRef)
err = pullBranches(ctx, rrd, branches, currentBranchRef, behavior)
if err != nil {
return err
}
@@ -141,7 +146,7 @@ func (rrd ReadReplicaDatabase) PullFromRemote(ctx *sql.Context) error {
return err
}
toPull, toDelete, err := getReplicationBranches(ctx, rrd)
err = pullBranches(ctx, rrd, toPull, currentBranchRef)
err = pullBranches(ctx, rrd, toPull, currentBranchRef, behavior)
if err != nil {
return err
}
@@ -155,7 +160,12 @@ func (rrd ReadReplicaDatabase) PullFromRemote(ctx *sql.Context) error {
return nil
}
func pullBranches(ctx *sql.Context, rrd ReadReplicaDatabase, branches []string, currentBranchRef ref.DoltRef) error {
type pullBehavior bool
const pullBehavior_fastForward pullBehavior = false
const pullBehavior_forcePull pullBehavior = true
func pullBranches(ctx *sql.Context, rrd ReadReplicaDatabase, branches []string, currentBranchRef ref.DoltRef, behavior pullBehavior) error {
refSpecs, err := env.ParseRSFromArgs(rrd.remote.Name, branches)
if err != nil {
return err
@@ -176,7 +186,15 @@ func pullBranches(ctx *sql.Context, rrd ReadReplicaDatabase, branches []string,
return nil, err
}
err = rrd.ddb.FastForward(fetchCtx, rtRef, srcDBCommit)
if behavior == pullBehavior_forcePull {
commitHash, herr := srcDBCommit.HashOf()
if herr != nil {
return nil, err
}
err = rrd.ddb.SetHead(fetchCtx, rtRef, commitHash)
} else {
err = rrd.ddb.FastForward(fetchCtx, rtRef, srcDBCommit)
}
if err != nil {
return nil, err
}
@@ -188,7 +206,15 @@ func pullBranches(ctx *sql.Context, rrd ReadReplicaDatabase, branches []string,
switch {
case err != nil:
case branchExists:
err = rrd.ddb.FastForward(fetchCtx, branch, srcDBCommit)
if behavior == pullBehavior_forcePull {
commitHash, herr := srcDBCommit.HashOf()
if herr != nil {
return nil, err
}
err = rrd.ddb.SetHead(fetchCtx, branch, commitHash)
} else {
err = rrd.ddb.FastForward(fetchCtx, branch, srcDBCommit)
}
default:
err = rrd.ddb.NewBranchAtCommit(fetchCtx, branch, srcDBCommit)
}

View File

@@ -32,9 +32,9 @@ type remotesrvStore struct {
var _ remotesrv.DBCache = remotesrvStore{}
func (s remotesrvStore) Get(org, repo, nbfVerStr string) (remotesrv.RemoteSrvStore, error) {
func (s remotesrvStore) Get(path, nbfVerStr string) (remotesrv.RemoteSrvStore, error) {
sess := dsess.DSessFromSess(s.ctx.Session)
db, err := sess.Provider().Database(s.ctx, repo)
db, err := sess.Provider().Database(s.ctx, path)
if err != nil {
return nil, err
}

View File

@@ -18,6 +18,7 @@ import (
"context"
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/dolthub/go-mysql-server/sql"
@@ -320,6 +321,13 @@ func SqlColToStr(sqlType sql.Type, col interface{}) (string, error) {
} else {
return "false", nil
}
case sql.SpatialColumnType:
res, err := sqlType.SQL(sqlColToStrContext, nil, col)
hexRes := fmt.Sprintf("0x%X", res.Raw())
if err != nil {
return "", err
}
return hexRes, nil
default:
res, err := sqlType.SQL(sqlColToStrContext, nil, col)
if err != nil {

View File

@@ -55,6 +55,14 @@ func AddDoltSystemVariables() {
Type: sql.NewSystemStringType(dsess.ReadReplicaRemote),
Default: "",
},
{
Name: dsess.ReadReplicaForcePull,
Scope: sql.SystemVariableScope_Global,
Dynamic: true,
SetVarHintApplies: false,
Type: sql.NewSystemStringType(dsess.ReadReplicaForcePull),
Default: int8(0),
},
{
Name: dsess.SkipReplicationErrors,
Scope: sql.SystemVariableScope_Global,
@@ -161,3 +169,11 @@ func SkipReplicationWarnings() bool {
}
return skip == SysVarTrue
}
func ReadReplicaForcePull() bool {
_, forcePull, ok := sql.SystemVariables.GetGlobal(dsess.ReadReplicaForcePull)
if !ok {
panic("dolt system variables not loaded")
}
return forcePull == SysVarTrue
}

View File

@@ -136,6 +136,8 @@ type Database interface {
// if this operation is not supported.
StatsSummary() string
Format() *types.NomsBinFormat
// chunkStore returns the ChunkStore used to read and write
// groups of values to the database efficiently. This interface is a low-
// level detail of the database that should infrequently be needed by

View File

@@ -608,6 +608,7 @@ func assertDatasetHash(
// CommitWithWorkingSet updates two Datasets atomically: the working set, and its corresponding HEAD. Uses the same
// global locking mechanism as UpdateWorkingSet.
// The current dataset head will be filled in as the first parent of the new commit if not already present.
func (db *database) CommitWithWorkingSet(
ctx context.Context,
commitDS, workingSetDS Dataset,
@@ -619,6 +620,17 @@ func (db *database) CommitWithWorkingSet(
return Dataset{}, Dataset{}, err
}
// Prepend the current head hash to the list of parents if one was provided. This is only necessary if parents were
// provided because we fill it in automatically in buildNewCommit otherwise.
if len(opts.Parents) > 0 {
headHash, ok := commitDS.MaybeHeadAddr()
if ok {
if !hasParentHash(opts, headHash) {
opts.Parents = append([]hash.Hash{headHash}, opts.Parents...)
}
}
}
commit, err := buildNewCommit(ctx, commitDS, val, opts)
if err != nil {
return Dataset{}, Dataset{}, err
@@ -859,7 +871,7 @@ func (db *database) validateRefAsCommit(ctx context.Context, r types.Ref) (types
}
func buildNewCommit(ctx context.Context, ds Dataset, v types.Value, opts CommitOptions) (*Commit, error) {
if opts.Parents == nil || len(opts.Parents) == 0 {
if len(opts.Parents) == 0 {
headAddr, ok := ds.MaybeHeadAddr()
if ok {
opts.Parents = []hash.Hash{headAddr}
@@ -867,14 +879,7 @@ func buildNewCommit(ctx context.Context, ds Dataset, v types.Value, opts CommitO
} else {
curr, ok := ds.MaybeHeadAddr()
if ok {
found := false
for _, h := range opts.Parents {
if h == curr {
found = true
break
}
}
if !found {
if !hasParentHash(opts, curr) {
return nil, ErrMergeNeeded
}
}
@@ -883,6 +888,17 @@ func buildNewCommit(ctx context.Context, ds Dataset, v types.Value, opts CommitO
return newCommitForValue(ctx, ds.db.chunkStore(), ds.db, ds.db.nodeStore(), v, opts)
}
func hasParentHash(opts CommitOptions, curr hash.Hash) bool {
found := false
for _, h := range opts.Parents {
if h == curr {
found = true
break
}
}
return found
}
func (db *database) doHeadUpdate(ctx context.Context, ds Dataset, updateFunc func(ds Dataset) error) (Dataset, error) {
err := updateFunc(ds)
if err != nil {

View File

@@ -43,32 +43,28 @@ func NewLocalCSCache(filesys filesys.Filesys) *LocalCSCache {
}
}
func (cache *LocalCSCache) Get(org, repo, nbfVerStr string) (remotesrv.RemoteSrvStore, error) {
func (cache *LocalCSCache) Get(repopath, nbfVerStr string) (remotesrv.RemoteSrvStore, error) {
cache.mu.Lock()
defer cache.mu.Unlock()
id := filepath.Join(org, repo)
id := filepath.FromSlash(repopath)
if cs, ok := cache.dbs[id]; ok {
return cs, nil
}
var newCS *nbs.NomsBlockStore
if cache.fs != nil {
err := cache.fs.MkDirs(id)
if err != nil {
return nil, err
}
path, err := cache.fs.Abs(id)
if err != nil {
return nil, err
}
err := cache.fs.MkDirs(id)
if err != nil {
return nil, err
}
path, err := cache.fs.Abs(id)
if err != nil {
return nil, err
}
newCS, err = nbs.NewLocalStore(context.TODO(), nbfVerStr, path, defaultMemTableSize, nbs.NewUnlimitedMemQuotaProvider())
if err != nil {
return nil, err
}
newCS, err := nbs.NewLocalStore(context.TODO(), nbfVerStr, path, defaultMemTableSize, nbs.NewUnlimitedMemQuotaProvider())
if err != nil {
return nil, err
}
cache.dbs[id] = newCS
@@ -80,6 +76,6 @@ type SingletonCSCache struct {
s remotesrv.RemoteSrvStore
}
func (cache SingletonCSCache) Get(org, repo, nbfVerStr string) (remotesrv.RemoteSrvStore, error) {
func (cache SingletonCSCache) Get(path, nbfVerStr string) (remotesrv.RemoteSrvStore, error) {
return cache.s, nil
}

View File

@@ -54,6 +54,7 @@ func main() {
*httpHostParam = fmt.Sprintf("%s:%d", *httpHostParam, *httpPortParam)
} else {
*httpPortParam = 80
*httpHostParam = ":80"
log.Println("'http-port' parameter not provided. Using default port 80")
}

View File

@@ -26,7 +26,31 @@ npm install -g bats
cd go/cmd/dolt && go install . && cd -
cd go/store/cmd/noms && go install . && cd -
````
3. Go to the directory with the bats tests and run:
3. Make sure you have `python3` installed.
This came with my Mac Developer Tools and was on my PATH.
4. `pip install mysql-connector-python` and `pip install pyarrow`
I also needed this specific version on the python mysql.connector. `pip install mysql.connector` mostly worked but caused some SSL errors.
```
pip3 install mysql-connector-python
pip install pyarrow
```
5. Install `parquet` and its dependencies
I used Homebrew on Mac to install `parquet`. You also need to install `hadoop` and set `PARQUET_RUNTIME_JAR` to get bats to work. Here's what I ended up running.
```
brew install parquet-cli
brew install hadoop
export PARQUET_RUNTIME_JAR=/opt/homebrew/opt/parquet-cli/libexec/parquet-cli-1.12.3-runtime.jar
```
6. Go to the directory with the bats tests and run:
```
bats .
```

View File

@@ -326,8 +326,10 @@ SQL
[[ "$output" =~ "2 Rows Added (100.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "12 Cells Added (100.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(2 Entries vs 4 Entries)" ]] || false
[[ "$output" =~ "(2 Row Entries vs 4 Row Entries)" ]] || false
dolt add test
dolt commit -m "added two rows"
@@ -338,8 +340,10 @@ SQL
[[ "$output" =~ "0 Rows Added (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "1 Row Modified (25.00%)" ]] || false
[[ "$output" =~ "0 Cells Added (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "2 Cells Modified (8.33%)" ]] || false
[[ "$output" =~ "(4 Entries vs 4 Entries)" ]] || false
[[ "$output" =~ "(4 Row Entries vs 4 Row Entries)" ]] || false
dolt add test
dolt commit -m "modified first row"
@@ -350,8 +354,10 @@ SQL
[[ "$output" =~ "0 Rows Added (0.00%)" ]] || false
[[ "$output" =~ "1 Row Deleted (25.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Added (0.00%)" ]] || false
[[ "$output" =~ "6 Cells Deleted (25.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(4 Entries vs 3 Entries)" ]] || false
[[ "$output" =~ "(4 Row Entries vs 3 Row Entries)" ]] || false
}
@test "diff: summary comparing row with a deleted cell and an added cell" {
@@ -367,8 +373,10 @@ SQL
[[ "$output" =~ "0 Rows Added (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "1 Row Modified (100.00%)" ]] || false
[[ "$output" =~ "0 Cells Added (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "1 Cell Modified (16.67%)" ]] || false
[[ "$output" =~ "(1 Entry vs 1 Entry)" ]] || false
[[ "$output" =~ "(1 Row Entry vs 1 Row Entry)" ]] || false
dolt add test
dolt commit -m "row modified"
dolt sql -q "replace into test values (0, 1, 2, 3, 4, 5)"
@@ -378,8 +386,10 @@ SQL
[[ "$output" =~ "0 Rows Added (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "1 Row Modified (100.00%)" ]] || false
[[ "$output" =~ "0 Cells Added (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "1 Cell Modified (16.67%)" ]] || false
[[ "$output" =~ "(1 Entry vs 1 Entry)" ]] || false
[[ "$output" =~ "(1 Row Entry vs 1 Row Entry)" ]] || false
}
@test "diff: summary comparing two branches" {
@@ -397,8 +407,10 @@ SQL
[[ "$output" =~ "1 Row Added (100.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "6 Cells Added (100.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(1 Entry vs 2 Entries)" ]] || false
[[ "$output" =~ "(1 Row Entry vs 2 Row Entries)" ]] || false
}
@test "diff: summary shows correct changes after schema change" {
@@ -423,8 +435,10 @@ DELIM
[[ "$output" =~ "1 Row Added (33.33%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "10 Cells Added (55.56%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(3 Entries vs 4 Entries)" ]] || false
[[ "$output" =~ "(3 Row Entries vs 4 Row Entries)" ]] || false
dolt sql -q "replace into employees values (0, 'tim', 'sehn', 'ceo', '2 years ago', '', 'Santa Monica')"
@@ -435,8 +449,10 @@ DELIM
[[ "$output" =~ "1 Row Added (33.33%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "1 Row Modified (33.33%)" ]] || false
[[ "$output" =~ "10 Cells Added (55.56%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "2 Cells Modified (11.11%)" ]] || false
[[ "$output" =~ "(3 Entries vs 4 Entries)" ]] || false
[[ "$output" =~ "(3 Row Entries vs 4 Row Entries)" ]] || false
}
@test "diff: summary gets summaries for all tables with changes" {

View File

@@ -59,7 +59,7 @@ SQL
0,1,2,3,4,5
9,8,7,6,5,4
'|dolt table import -u test_int
dolt table export --file-type=csv test_int|python -c '
dolt table export --file-type=csv test_int | python3 -c '
import sys
rows = []
for line in sys.stdin:
@@ -327,14 +327,16 @@ SQL
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f result.parquet ]
run parquet cat result.parquet > output.json
run parquet cat result.parquet
[ "$status" -eq 0 ]
row1='{"pk": 1, "col1": "row1", "col2": 22}'
row2='{"pk": 2, "col1": "row2", "col2": 33}'
row3='{"pk": 3, "col1": "row3", "col2": 22}'
[[ "$output" =~ "$row1" ]] || false
[[ "$output" =~ "$row2" ]] || false
[[ "$output" =~ "$row3" ]] || false
[ "${lines[0]}" = "$row1" ]
[ "${lines[1]}" = "$row2" ]
[ "${lines[2]}" = "$row3" ]
}
@test "export-tables: parquet file export compare pandas and pyarrow reads" {
@@ -444,16 +446,17 @@ SQL
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f test.parquet ]
run parquet cat test.parquet > output.json
run parquet cat test.parquet
[ "$status" -eq 0 ]
row1='{"pk": 0, "int": 0, "string": "asdf", "boolean": 1, "float": 0.0, "uint": 0, "uuid": "00000000-0000-0000-0000-000000000000"}'
row2='{"pk": 1, "int": -1, "string": "qwerty", "boolean": 0, "float": -1.0, "uint": 1, "uuid": "00000000-0000-0000-0000-000000000001"}'
row3='{"pk": 2, "int": 1, "string": "", "boolean": 1, "float": 0.0, "uint": 0, "uuid": "123e4567-e89b-12d3-a456-426655440000"}'
[[ "$output" =~ "$row1" ]] || false
[[ "$output" =~ "$row2" ]] || false
[[ "$output" =~ "$row3" ]] || false
}
[ "${lines[0]}" = "$row1" ]
[ "${lines[1]}" = "$row2" ]
[ "${lines[2]}" = "$row3" ]
@test "export-tables: table export decimal and bit types to parquet" {
skiponwindows "Missing dependencies"
dolt sql -q "CREATE TABLE more (pk BIGINT NOT NULL,v DECIMAL(9,5),b BIT(10),PRIMARY KEY (pk));"
@@ -465,7 +468,7 @@ SQL
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f more.parquet ]
run parquet cat more.parquet > output.json
run parquet cat more.parquet
[ "$status" -eq 0 ]
[[ "$output" =~ '{"pk": 1, "v": "1234.56789", "b": 511}' ]] || false
[[ "$output" =~ '{"pk": 2, "v": "5235.66789", "b": 514}' ]] || false

View File

@@ -235,6 +235,14 @@ stop_sql_server() {
#
server_query() {
let PORT="$$ % (65536-1024) + 1024"
server_query_with_port "$PORT" "$@"
}
# See server_query, but first parameter is the port sql-server is running on,
# every other parameter is positionally one later.
server_query_with_port() {
let PORT="$1"
shift
PYTEST_DIR="$BATS_TEST_DIRNAME/helper"
echo Executing server_query
python3 -u -c "$PYTHON_QUERY_SCRIPT" -- "$PYTEST_DIR" "$1" "$PORT" "$2" "$3" "$4" "$5" "$6" "$7"

View File

@@ -504,6 +504,69 @@ SQL
[[ "$output" =~ "remote not found: 'unknown'" ]] || false
}
@test "replication: non-fast-forward pull fails replication" {
dolt clone file://./rem1 clone1
cd clone1
dolt sql -q "create table t1 (a int primary key)"
dolt sql -q "insert into t1 values (1), (2), (3);"
dolt add .
dolt commit -am "new commit"
dolt push origin main
cd ../repo1
dolt config --local --add sqlserver.global.dolt_read_replica_remote remote1
dolt config --local --add sqlserver.global.dolt_replicate_heads main
run dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "t1" ]] || false
cd ../clone1
dolt checkout -b new-main HEAD~
dolt sql -q "create table t1 (a int primary key)"
dolt sql -q "insert into t1 values (1), (2), (3);"
dolt add .
dolt commit -am "new commit"
dolt push -f origin new-main:main
cd ../repo1
run dolt sql -q "show tables"
[ "$status" -ne 0 ]
[[ "$output" =~ "replication" ]] || false
}
@test "replication: non-fast-forward pull with force pull setting succeeds replication" {
dolt clone file://./rem1 clone1
cd clone1
dolt sql -q "create table t1 (a int primary key)"
dolt sql -q "insert into t1 values (1), (2), (3);"
dolt add .
dolt commit -am "new commit"
dolt push origin main
cd ../repo1
dolt config --local --add sqlserver.global.dolt_read_replica_remote remote1
dolt config --local --add sqlserver.global.dolt_replicate_heads main
dolt config --local --add sqlserver.global.dolt_read_replica_force_pull 1
run dolt sql -q "select sum(a) from t1"
[ "$status" -eq 0 ]
[[ "$output" =~ "6" ]] || false
cd ../clone1
dolt checkout -b new-main HEAD~
dolt sql -q "create table t1 (a int primary key)"
dolt sql -q "insert into t1 values (4), (5), (6);"
dolt add .
dolt commit -am "new commit"
dolt push -f origin new-main:main
cd ../repo1
run dolt sql -q "select sum(a) from t1"
[ "$status" -eq 0 ]
[[ "$output" =~ "15" ]] || false
}
@test "replication: pull bad remote quiet warning" {
cd repo1
dolt config --local --add sqlserver.global.dolt_read_replica_remote unknown

View File

@@ -0,0 +1,129 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
load $BATS_TEST_DIRNAME/helper/query-server-common.bash
make_repo() {
mkdir "$1"
cd "$1"
dolt init
cd ..
}
setup() {
skiponwindows "tests are flaky on Windows"
setup_no_dolt_init
make_repo repo1
make_repo repo2
}
teardown() {
stop_sql_server
teardown_common
}
@test "sql-server-cluster: persisted role and epoch take precedence over bootstrap values" {
echo "
user:
name: dolt
listener:
host: 0.0.0.0
port: 3309
behavior:
read_only: false
autocommit: true
cluster:
standby_remotes:
- name: doltdb-1
remote_url_template: http://doltdb-1.doltdb:50051/{database}
bootstrap_role: standby
bootstrap_epoch: 10
remotesapi:
port: 50051" > server.yaml
dolt sql-server --config server.yaml &
SERVER_PID=$!
wait_for_connection 3309 5000
server_query_with_port 3309 repo1 1 dolt "" "select @@GLOBAL.dolt_cluster_role, @@GLOBAL.dolt_cluster_role_epoch" "@@GLOBAL.dolt_cluster_role,@@GLOBAL.dolt_cluster_role_epoch\nstandby,10"
kill $SERVER_PID
wait $SERVER_PID
SERVER_PID=
echo "
user:
name: dolt
listener:
host: 0.0.0.0
port: 3309
behavior:
read_only: false
autocommit: true
cluster:
standby_remotes:
- name: doltdb-1
remote_url_template: http://doltdb-1.doltdb:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051" > server.yaml
dolt sql-server --config server.yaml &
SERVER_PID=$!
wait_for_connection 3309 5000
server_query_with_port 3309 repo1 1 dolt "" "select @@GLOBAL.dolt_cluster_role, @@GLOBAL.dolt_cluster_role_epoch" "@@GLOBAL.dolt_cluster_role,@@GLOBAL.dolt_cluster_role_epoch\nstandby,10"
}
@test "sql-server-cluster: dolt_assume_cluster_role" {
echo "
user:
name: dolt
listener:
host: 0.0.0.0
port: 3309
behavior:
read_only: false
autocommit: true
cluster:
standby_remotes:
- name: doltdb-1
remote_url_template: http://doltdb-1.doltdb:50051/{database}
bootstrap_role: standby
bootstrap_epoch: 10
remotesapi:
port: 50051" > server.yaml
dolt sql-server --config server.yaml &
SERVER_PID=$!
wait_for_connection 3309 5000
# stale epoch
run server_query_with_port 3309 repo1 1 dolt "" "call dolt_assume_cluster_role('standby', '9');" "" 1
[[ "$output" =~ "error assuming role" ]] || false
# wrong role at current epoch
run server_query_with_port 3309 repo1 1 dolt "" "call dolt_assume_cluster_role('primary', '10');" "" 1
[[ "$output" =~ "error assuming role" ]] || false
# wrong role name
run server_query_with_port 3309 repo1 1 dolt "" "call dolt_assume_cluster_role('backup', '11');" "" 1
[[ "$output" =~ "error assuming role" ]] || false
# successes
# same role, same epoch
server_query_with_port 3309 repo1 1 dolt "" "call dolt_assume_cluster_role('standby', '10');" "status\n0"
# same role, new epoch
server_query_with_port 3309 repo1 1 dolt "" "call dolt_assume_cluster_role('standby', '12'); select @@GLOBAL.dolt_cluster_role, @@GLOBAL.dolt_cluster_role_epoch;" "status\n0;@@GLOBAL.dolt_cluster_role,@@GLOBAL.dolt_cluster_role_epoch\nstandby,12"
# new role, new epoch
server_query_with_port 3309 repo1 1 dolt "" "call dolt_assume_cluster_role('primary', '13'); select @@GLOBAL.dolt_cluster_role, @@GLOBAL.dolt_cluster_role_epoch;" "status\n0;@@GLOBAL.dolt_cluster_role,@@GLOBAL.dolt_cluster_role_epoch\nprimary,13"
# Server comes back up with latest assumed role.
kill $SERVER_PID
wait $SERVER_PID
SERVER_PID=
dolt sql-server --config server.yaml &
SERVER_PID=$!
wait_for_connection 3309 5000
server_query_with_port 3309 repo1 1 dolt "" "select @@GLOBAL.dolt_cluster_role, @@GLOBAL.dolt_cluster_role_epoch;" "@@GLOBAL.dolt_cluster_role,@@GLOBAL.dolt_cluster_role_epoch\nprimary,13"
}

View File

@@ -35,7 +35,7 @@ teardown() {
srv_pid=$!
cd ../
dolt clone http://localhost:50051/ignored_named/remote repo1
dolt clone http://localhost:50051/remote repo1
cd repo1
run dolt ls
[[ "$output" =~ "vals" ]] || false
@@ -62,7 +62,7 @@ SQL
cd ../
# By cloning here, we have a near-at-hand way to wait for the server to be ready.
dolt clone http://localhost:50051/ignored_named/remote cloned_remote
dolt clone http://localhost:50051/remote cloned_remote
dolt sql-client -u root <<SQL
create database created;
@@ -73,7 +73,7 @@ call dolt_add('vals');
call dolt_commit('-m', 'add some vals');
SQL
dolt clone http://localhost:50051/ignored_named/created cloned_created
dolt clone http://localhost:50051/created cloned_created
cd cloned_created
run dolt ls
[[ "$output" =~ "vals" ]] || false
@@ -93,7 +93,7 @@ SQL
srv_pid=$!
cd ../
dolt clone http://localhost:50051/test-org/remote remote_cloned
dolt clone http://localhost:50051/remote remote_cloned
cd remote_cloned
dolt sql -q 'insert into vals values (1), (2), (3), (4), (5);'
@@ -110,7 +110,7 @@ SQL
dolt sql-server --remotesapi-port 50051 &
srv_pid=$!
dolt clone http://localhost:50051/test-org/remote_one remote_one_cloned
dolt clone http://localhost:50051/remote_one remote_one_cloned
cd ../remote_two
dolt init
@@ -133,7 +133,7 @@ SQL
cd ../../
mkdir -p read_replica
cd read_replica
dolt clone http://127.0.0.1:50051/test-org/db
dolt clone http://127.0.0.1:50051/db
cd db
dolt sql <<SQL
set @@persist.dolt_read_replica_remote = 'origin';

View File

@@ -0,0 +1,98 @@
const mysql = require('mysql2/promise');
const args = process.argv.slice(2);
const user = args[0];
const port = args[1];
const dbName = args[2];
async function createTable() {
const conn = await getConnection();
try {
await conn.execute("create table users (name varchar(20))");
} catch (err) {
console.error(`Error creating table:`, err);
process.exit(1);
} finally {
conn.end();
}
}
async function commitTable() {
const conn = await getConnection();
try {
await conn.execute(`call dolt_add('.')`);
await conn.execute(`call dolt_commit('-am', 'new table')`);
} catch (err) {
console.error(`Error committing table:`, err);
} finally {
conn.end();
}
}
const authors = [
'bob', 'john', 'mary', 'alice',
'bob2', 'john2', 'mary2', 'alice2',
'bob3', 'john3', 'mary3', 'alice3',
'bob4', 'john4', 'mary4', 'alice4',
'bob5', 'john5', 'mary5', 'alice5',
'bob6', 'john6', 'mary6', 'alice6',
'bob7', 'john7', 'mary7', 'alice7',
'bob8', 'john8', 'mary8', 'alice8',
'bob9', 'john9', 'mary9', 'alice9'
];
async function insertAuthor(name) {
const conn = await getConnection();
try {
await conn.execute('start transaction');
await conn.execute('INSERT INTO users (name) VALUES(?);', [name]);
await conn.execute(`call dolt_commit('-am', concat('created author', ?))`, [name]);
} catch (err) {
console.error(`Error committing ${name}:`, err);
process.exit(1)
} finally {
conn.end();
}
}
async function validateCommits(name) {
const conn = await getConnection();
var results;
try {
results = await conn.query(`select count(*) as c from dolt_log where message like 'created author%'`);
} catch (err) {
console.error(`Error:`, err);
process.exit(1)
} finally {
conn.end();
}
const count = results[0][0].c;
const expectedCount = authors.length;
if (count != expectedCount) {
console.error(`Unexpected number of commits: expected ${expectedCount}, was ${count}`);
process.exit(1)
}
}
async function getConnection() {
const connection = await mysql.createConnection({
host: '127.0.0.1',
port: port,
user: user,
database: dbName,
});
return connection;
}
// Regression test concurrent dolt_commit with node clients
// https://github.com/dolthub/dolt/issues/4361
async function main() {
await createTable();
await commitTable();
await Promise.all(authors.map(insertAuthor));
await validateCommits();
}
main();