Merge branch 'main' into james/show-fk-diff

This commit is contained in:
James Cor
2023-02-15 12:12:03 -08:00
18 changed files with 3840 additions and 112 deletions
+5
View File
@@ -26,6 +26,11 @@ Lots of things! Dolt is a generally useful tool with countless
applications. But if you want some ideas, [here's how people are using
it so far](https://www.dolthub.com/blog/2022-07-11-dolt-case-studies/).
Dolt can be [set up as a replica of your existing MySQL or MariaDB](https://www.dolthub.com/blog/2023-02-17-binlog-replication-preview/)
database using standard MySQL binlog replication. Every write becomes
a Dolt commit. This is a great way to get the version control benefits
of Dolt and keep an existing MySQL or MariaDB database.
# Dolt CLI
The `dolt` CLI has the same commands as `git`, with some extras.
+41 -14
View File
@@ -24,6 +24,7 @@ import (
gms "github.com/dolthub/go-mysql-server"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/analyzer"
"github.com/dolthub/go-mysql-server/sql/binlogreplication"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
_ "github.com/dolthub/go-mysql-server/sql/variables"
"github.com/dolthub/vitess/go/vt/sqlparser"
@@ -32,6 +33,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
dblr "github.com/dolthub/dolt/go/libraries/doltcore/sqle/binlogreplication"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/mysql_file_handler"
@@ -51,19 +53,20 @@ type sessionFactory func(ctx *sql.Context, mysqlSess *sql.BaseSession, pro sql.D
type contextFactory func(ctx context.Context) (*sql.Context, error)
type SqlEngineConfig struct {
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
PrivFilePath string
BranchCtrlFilePath string
ServerUser string
ServerPass string
ServerHost string
Autocommit bool
Bulk bool
JwksConfig []JwksConfig
ClusterController *cluster.Controller
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
PrivFilePath string
BranchCtrlFilePath string
ServerUser string
ServerPass string
ServerHost string
Autocommit bool
Bulk bool
JwksConfig []JwksConfig
ClusterController *cluster.Controller
BinlogReplicaController binlogreplication.BinlogReplicaController
}
// NewSqlEngine returns a SqlEngine
@@ -73,7 +76,6 @@ func NewSqlEngine(
format PrintResultFormat,
config *SqlEngineConfig,
) (*SqlEngine, error) {
if ok, _ := mrEnv.IsLocked(); ok {
config.IsServerLocked = true
}
@@ -169,6 +171,8 @@ func NewSqlEngine(
return nil, err
}
configureBinlogReplicaController(config, engine, sess)
return &SqlEngine{
provider: pro,
contextFactory: newSqlContext(sess, config.InitialDb),
@@ -272,6 +276,29 @@ func (se *SqlEngine) Close() error {
return nil
}
// configureBinlogReplicaController examines the specified |config| and if a binlog replica controller is provided,
// it creates a new context from the specified |sess| for the replia's applier to use, and it configures the
// binlog replica controller with the |engine|.
func configureBinlogReplicaController(config *SqlEngineConfig, engine *gms.Engine, sess *dsess.DoltSession) error {
if config.BinlogReplicaController == nil {
return nil
}
contextFactory := newSqlContext(sess, config.InitialDb)
newCtx, err := contextFactory(context.Background())
if err != nil {
return err
}
newCtx.SetClient(sql.Client{
User: "root",
Address: "localhost",
})
dblr.DoltBinlogReplicaController.SetExecutionContext(newCtx)
engine.Analyzer.BinlogReplicaController = config.BinlogReplicaController
return nil
}
func newSqlContext(sess *dsess.DoltSession, initialDb string) func(ctx context.Context) (*sql.Context, error) {
return func(ctx context.Context) (*sql.Context, error) {
sqlCtx := sql.NewContext(ctx, sql.WithSession(sess))
+13 -11
View File
@@ -37,6 +37,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/remotesrv"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/binlogreplication"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
@@ -140,17 +141,18 @@ func Serve(
// Create SQL Engine with users
config := &engine.SqlEngineConfig{
InitialDb: "",
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
BranchCtrlFilePath: serverConfig.BranchControlFilePath(),
DoltCfgDirPath: serverConfig.CfgDir(),
ServerUser: serverConfig.User(),
ServerPass: serverConfig.Password(),
ServerHost: serverConfig.Host(),
Autocommit: serverConfig.AutoCommit(),
JwksConfig: serverConfig.JwksConfig(),
ClusterController: clusterController,
InitialDb: "",
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
BranchCtrlFilePath: serverConfig.BranchControlFilePath(),
DoltCfgDirPath: serverConfig.CfgDir(),
ServerUser: serverConfig.User(),
ServerPass: serverConfig.Password(),
ServerHost: serverConfig.Host(),
Autocommit: serverConfig.AutoCommit(),
JwksConfig: serverConfig.JwksConfig(),
ClusterController: clusterController,
BinlogReplicaController: binlogreplication.DoltBinlogReplicaController,
}
sqlEngine, err := engine.NewSqlEngine(
ctx,
+1 -1
View File
@@ -56,7 +56,7 @@ import (
)
const (
Version = "0.52.20"
Version = "0.53.0"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
+17 -12
View File
@@ -2,12 +2,12 @@ module github.com/dolthub/dolt/go
require (
cloud.google.com/go/storage v1.12.0
github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml v1.1.0
github.com/HdrHistogram/hdrhistogram-go v1.1.2
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883
github.com/attic-labs/kingpin v2.2.7-0.20180312050558-442efcfac769+incompatible
github.com/aws/aws-sdk-go v1.33.0
github.com/aws/aws-sdk-go v1.34.0
github.com/bcicen/jstream v1.0.0
github.com/boltdb/bolt v1.3.1
github.com/denisbrodbeck/machineid v1.0.1
@@ -23,11 +23,10 @@ require (
github.com/gocraft/dbr/v2 v2.7.2
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.1
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.2.0
github.com/jpillora/backoff v1.0.0
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d
github.com/mattn/go-isatty v0.0.14
github.com/mattn/go-isatty v0.0.16
github.com/mattn/go-runewidth v0.0.13
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.5.0
@@ -47,22 +46,25 @@ require (
golang.org/x/sys v0.2.0
google.golang.org/api v0.32.0
google.golang.org/grpc v1.49.0
google.golang.org/protobuf v1.27.1
google.golang.org/protobuf v1.28.1
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/src-d/go-errors.v1 v1.0.0
gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v2 v2.4.0
)
require (
github.com/Shopify/toxiproxy/v2 v2.5.0
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/go-mysql-server v0.14.1-0.20230214164824-b1608f9aaec3
github.com/google/flatbuffers v2.0.6+incompatible
github.com/jmoiron/sqlx v1.3.4
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/mitchellh/go-ps v1.0.0
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_golang v1.13.0
github.com/rs/zerolog v1.28.0
github.com/shirou/gopsutil/v3 v3.22.1
github.com/vbauerster/mpb v3.4.0+incompatible
github.com/vbauerster/mpb/v8 v8.0.2
@@ -88,7 +90,7 @@ require (
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-fonts/liberation v0.2.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
@@ -100,6 +102,7 @@ require (
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jstemmer/go-junit-report v0.9.1 // indirect
@@ -107,7 +110,7 @@ require (
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
github.com/lestrrat-go/strftime v1.0.4 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mattn/go-colorable v0.1.9 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/hashstructure v1.1.0 // indirect
github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 // indirect
@@ -115,8 +118,9 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rs/xid v1.4.0 // indirect
github.com/tklauser/numcpus v0.3.0 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.opencensus.io v0.22.4 // indirect
@@ -125,12 +129,13 @@ require (
golang.org/x/image v0.0.0-20220302094943-723b81ca9867 // indirect
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/term v0.2.0 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
golang.org/x/tools v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210506142907-4a47615972c2 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
)
replace (
+53 -14
View File
@@ -45,8 +45,9 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@@ -57,6 +58,8 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc=
github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0=
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db h1:CjPUSXOiYptLbTdr1RceuZgSFDQ7U15ITERUGrUORx8=
@@ -93,8 +96,8 @@ github.com/attic-labs/kingpin v2.2.7-0.20180312050558-442efcfac769+incompatible/
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.33.0 h1:Bq5Y6VTLbfnJp1IV8EL/qUU5qO1DYHda/zis/sqevkY=
github.com/aws/aws-sdk-go v1.33.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.34.0 h1:brux2dRrlwCF5JhTL7MUT3WUwo9zfDHZZp3+g3Mvlmo=
github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250=
github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA=
@@ -127,8 +130,9 @@ github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@@ -143,6 +147,7 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE
github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@@ -211,11 +216,13 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81 h1:6zl3BbBhdnMkpSj2YY30qV3gDcVBGtFgVsV3+/i+mKQ=
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -235,6 +242,7 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gocraft/dbr/v2 v2.7.2 h1:ccUxMuz6RdZvD7VPhMRRMSS/ECF3gytPhPtcavjktHk=
github.com/gocraft/dbr/v2 v2.7.2/go.mod h1:5bCqyIXO5fYn3jEp/L06QF4K1siFdhxChMjdNu6YJrg=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -293,7 +301,6 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -319,6 +326,8 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
@@ -370,6 +379,7 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
@@ -417,16 +427,19 @@ github.com/lyft/protoc-gen-star v0.5.2/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkV
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
@@ -451,6 +464,7 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
@@ -510,8 +524,10 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -523,21 +539,29 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY=
github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
@@ -760,6 +784,9 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -767,8 +794,10 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -835,15 +864,22 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -853,6 +889,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1030,8 +1067,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1054,6 +1091,7 @@ gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-errors.v1 v1.0.0 h1:cooGdZnCjYbeS1zb1s6pVAAimTdKceRrpn7aKOnNIfc=
gopkg.in/src-d/go-errors.v1 v1.0.0/go.mod h1:q1cBlomlw2FnDBDNGlnh6X0jPihy+QxZfMMNxPCbdYg=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
@@ -1063,8 +1101,9 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -0,0 +1,94 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
)
// persistReplicationConfiguration saves the specified |replicaSourceInfo| to disk; if any problems are encountered
// while saving to disk, an error is returned.
func persistReplicationConfiguration(ctx *sql.Context, replicaSourceInfo *mysql_db.ReplicaSourceInfo) error {
server := sqlserver.GetRunningServer()
if server == nil {
return fmt.Errorf("no SQL server running; " +
"replication commands may only be used when running from dolt sql-server, and not from dolt sql")
}
engine := server.Engine
replicaSourceInfoTableData := engine.Analyzer.Catalog.MySQLDb.ReplicaSourceInfoTable().Data()
err := replicaSourceInfoTableData.Put(ctx, replicaSourceInfo)
if err != nil {
return err
}
return engine.Analyzer.Catalog.MySQLDb.Persist(ctx)
}
// loadReplicationConfiguration loads the replication configuration for default channel ("").
func loadReplicationConfiguration(_ *sql.Context) (*mysql_db.ReplicaSourceInfo, error) {
server := sqlserver.GetRunningServer()
if server == nil {
return nil, fmt.Errorf("no SQL server running; " +
"replication commands may only be used when running from dolt sql-server, and not from dolt sql")
}
engine := server.Engine
replicaSourceInfoTableData := engine.Analyzer.Catalog.MySQLDb.ReplicaSourceInfoTable().Data()
// ReplicaSourceInfo is keyed on channel name, but we currently only support
// the default channel (""), so we use that regardless of what was passed in.
entries := replicaSourceInfoTableData.Get(mysql_db.ReplicaSourceInfoPrimaryKey{
Channel: "",
})
if len(entries) == 1 {
return entries[0].(*mysql_db.ReplicaSourceInfo), nil
}
return nil, nil
}
// deleteReplicationConfiguration deletes all replication configuration for the default channel ("").
func deleteReplicationConfiguration(ctx *sql.Context) error {
server := sqlserver.GetRunningServer()
if server == nil {
return fmt.Errorf("no SQL server running; " +
"replication commands may only be used when running from dolt sql-server, and not from dolt sql")
}
engine := server.Engine
replicaSourceInfoTableData := engine.Analyzer.Catalog.MySQLDb.ReplicaSourceInfoTable().Data()
err := replicaSourceInfoTableData.Remove(ctx, mysql_db.ReplicaSourceInfoPrimaryKey{}, nil)
if err != nil {
return err
}
return engine.Analyzer.Catalog.MySQLDb.Persist(ctx)
}
// persistSourceUuid saves the specified |sourceUuid| to a persistent storage location.
func persistSourceUuid(ctx *sql.Context, sourceUuid string) error {
replicaSourceInfo, err := loadReplicationConfiguration(ctx)
if err != nil {
return err
}
replicaSourceInfo.Uuid = sourceUuid
return persistReplicationConfiguration(ctx, replicaSourceInfo)
}
@@ -0,0 +1,137 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/vitess/go/mysql"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const binlogPositionDirectory = ".doltcfg"
const binlogPositionFilename = "binlog-position"
const mysqlFlavor = "MySQL56"
// binlogPositionStore manages loading and saving data to the binlog position file stored on disk. This provides
// durable storage for the set of GTIDs that have been successfully executed on the replica, so that the replica
// server can be restarted and resume binlog event messages at the correct point.
type binlogPositionStore struct {
mu sync.Mutex
}
// Load loads a mysql.Position instance from the .doltcfg/binlog-position file at the root of the provider's filesystem.
// This file MUST be stored at the root of the provider's filesystem, and NOT inside a nested database's .doltcfg directory,
// since the binlog position contains events that cover all databases in a SQL server. The returned mysql.Position
// represents the set of GTIDs that have been successfully executed and applied on this replica. Currently only the
// default binlog channel ("") is supported. If no .doltcfg/binlog-position file is stored, this method returns a nil
// mysql.Position and a nil error. If any errors are encountered, a nil mysql.Position and an error are returned.
func (store *binlogPositionStore) Load(ctx *sql.Context) (*mysql.Position, error) {
store.mu.Lock()
defer store.mu.Unlock()
doltSession := dsess.DSessFromSess(ctx.Session)
filesys := doltSession.Provider().FileSystem()
doltDirExists, _ := filesys.Exists(binlogPositionDirectory)
if !doltDirExists {
return nil, nil
}
positionFileExists, _ := filesys.Exists(filepath.Join(binlogPositionDirectory, binlogPositionFilename))
if !positionFileExists {
return nil, nil
}
filePath, err := filesys.Abs(filepath.Join(binlogPositionDirectory, binlogPositionFilename))
if err != nil {
return nil, err
}
bytes, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
positionString := string(bytes)
// Strip off the "MySQL56/" prefix
prefix := "MySQL56/"
if strings.HasPrefix(positionString, prefix) {
positionString = string(bytes[len(prefix):])
}
position, err := mysql.ParsePosition(mysqlFlavor, positionString)
if err != nil {
return nil, err
}
return &position, nil
}
// Save saves the specified |position| to disk in the .doltcfg/binlog-position file at the root of the provider's
// filesystem. This file MUST be stored at the root of the provider's filesystem, and NOT inside a nested database's
// .doltcfg directory, since the binlog position contains events that cover all databases in a SQL server. |position|
// represents the set of GTIDs that have been successfully executed and applied on this replica. Currently only the
// default binlog channel ("") is supported. If any errors are encountered persisting the position to disk, an
// error is returned.
func (store *binlogPositionStore) Save(ctx *sql.Context, position *mysql.Position) error {
if position == nil {
return fmt.Errorf("unable to save binlog position: nil position passed")
}
store.mu.Lock()
defer store.mu.Unlock()
doltSession := dsess.DSessFromSess(ctx.Session)
filesys := doltSession.Provider().FileSystem()
// The .doltcfg dir may not exist yet, so create it if necessary.
exists, isDir := filesys.Exists(binlogPositionDirectory)
if !exists {
err := filesys.MkDirs(binlogPositionDirectory)
if err != nil {
return fmt.Errorf("unable to save binlog position: %s", err)
}
} else if !isDir {
return fmt.Errorf("unable to save binlog position: %s exists as a file, not a dir", binlogPositionDirectory)
}
filePath, err := filesys.Abs(filepath.Join(binlogPositionDirectory, binlogPositionFilename))
if err != nil {
return err
}
encodedPosition := mysql.EncodePosition(*position)
return os.WriteFile(filePath, []byte(encodedPosition), 0666)
}
// Delete deletes the stored mysql.Position information stored in .doltcfg/binlog-position in the root of the provider's
// filesystem. This is useful for the "RESET REPLICA" command, since it clears out the current replication state. If
// any errors are encountered removing the position file, an error is returned.
func (store *binlogPositionStore) Delete(ctx *sql.Context) error {
store.mu.Lock()
defer store.mu.Unlock()
doltSession := dsess.DSessFromSess(ctx.Session)
filesys := doltSession.Provider().FileSystem()
return filesys.Delete(filepath.Join(binlogPositionDirectory, binlogPositionFilename), false)
}
@@ -0,0 +1,886 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"io"
"strconv"
"strings"
"time"
gms "github.com/dolthub/go-mysql-server"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/binlogreplication"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
"github.com/dolthub/go-mysql-server/sql/parse"
"github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/vitess/go/mysql"
"github.com/dolthub/vitess/go/sqltypes"
vquery "github.com/dolthub/vitess/go/vt/proto/query"
"github.com/sirupsen/logrus"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/globalstate"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/writer"
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
)
// positionStore is a singleton instance for loading/saving binlog position state to disk for durable storage.
var positionStore = &binlogPositionStore{}
const (
ERNetReadError = 1158
ERFatalReplicaError = 13117
)
// binlogReplicaApplier represents the process that applies updates from a binlog connection.
//
// This type is NOT used concurrently there is currently only one single applier process running to process binlog
// events, so the state in this type is NOT protected with a mutex.
type binlogReplicaApplier struct {
format mysql.BinlogFormat
tableMapsById map[uint64]*mysql.TableMap
stopReplicationChan chan struct{}
// currentGtid is the current GTID being processed, but not yet committed
currentGtid mysql.GTID
// replicationSourceUuid holds the UUID of the source server
replicationSourceUuid string
// currentPosition records which GTIDs have been successfully executed
currentPosition *mysql.Position
filters *filterConfiguration
}
func newBinlogReplicaApplier(filters *filterConfiguration) *binlogReplicaApplier {
return &binlogReplicaApplier{
tableMapsById: make(map[uint64]*mysql.TableMap),
stopReplicationChan: make(chan struct{}),
filters: filters,
}
}
// Row Flags https://mariadb.com/kb/en/rows_event_v1v2-rows_compressed_event_v1/
// rowFlag_endOfStatement indicates that a row event with this flag set is the last event in a statement.
const rowFlag_endOfStatement = 0x0001
const rowFlag_noForeignKeyChecks = 0x0002
const rowFlag_noUniqueKeyChecks = 0x0004
const rowFlag_noCheckConstraints = 0x0010
// rowFlag_rowsAreComplete indicates that rows in this event are complete, and contain values for all columns of the table.
const rowFlag_rowsAreComplete = 0x0008
// Go spawns a new goroutine to run the applier's binlog event handler.
func (a *binlogReplicaApplier) Go(ctx *sql.Context) {
go func() {
err := a.replicaBinlogEventHandler(ctx)
if err != nil {
ctx.GetLogger().Errorf("unexpected error of type %T: '%v'", err, err.Error())
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, err.Error())
}
}()
}
// connectAndStartReplicationEventStream connects to the configured MySQL replication source, including pausing
// and retrying if errors are encountered.
//
// NOTE: Our fork of Vitess currently only supports mysql_native_password auth. The latest code in the main
//
// Vitess repo supports the current MySQL default auth plugin, caching_sha2_password.
// https://dev.mysql.com/blog-archive/upgrading-to-mysql-8-0-default-authentication-plugin-considerations/
// To work around this limitation, add the following to your /etc/my.cnf:
// [mysqld]
// default-authentication-plugin=mysql_native_password
// or start mysqld with:
// --default-authentication-plugin=mysql_native_password
func (a *binlogReplicaApplier) connectAndStartReplicationEventStream(ctx *sql.Context) (*mysql.Conn, error) {
var maxConnectionAttempts uint64
var connectRetryDelay uint32
DoltBinlogReplicaController.updateStatus(func(status *binlogreplication.ReplicaStatus) {
status.ReplicaIoRunning = binlogreplication.ReplicaIoConnecting
status.ReplicaSqlRunning = binlogreplication.ReplicaSqlRunning
maxConnectionAttempts = status.SourceRetryCount
connectRetryDelay = status.ConnectRetry
})
var conn *mysql.Conn
var err error
for connectionAttempts := uint64(0); ; connectionAttempts++ {
replicaSourceInfo, err := loadReplicationConfiguration(ctx)
if replicaSourceInfo == nil {
err = ErrServerNotConfiguredAsReplica
DoltBinlogReplicaController.setIoError(ERFatalReplicaError, err.Error())
return nil, err
} else if replicaSourceInfo.Uuid != "" {
a.replicationSourceUuid = replicaSourceInfo.Uuid
}
if replicaSourceInfo.Host == "" {
err = fmt.Errorf("fatal error: Invalid (empty) hostname when attempting to connect " +
"to the source server. Connection attempt terminated")
DoltBinlogReplicaController.setIoError(ERFatalReplicaError, err.Error())
return nil, err
} else if replicaSourceInfo.User == "" {
err = fmt.Errorf("fatal error: Invalid (empty) username when attempting to connect " +
"to the source server. Connection attempt terminated")
DoltBinlogReplicaController.setIoError(ERFatalReplicaError, err.Error())
return nil, err
}
connParams := mysql.ConnParams{
Host: replicaSourceInfo.Host,
Port: int(replicaSourceInfo.Port),
Uname: replicaSourceInfo.User,
Pass: replicaSourceInfo.Password,
ConnectTimeoutMs: 4_000,
}
conn, err = mysql.Connect(ctx, &connParams)
if err != nil && connectionAttempts >= maxConnectionAttempts {
return nil, err
} else if err != nil {
time.Sleep(time.Duration(connectRetryDelay) * time.Second)
} else {
break
}
}
// Request binlog events to start
// TODO: This should also have retry logic
err = a.startReplicationEventStream(ctx, conn)
if err != nil {
return nil, err
}
DoltBinlogReplicaController.updateStatus(func(status *binlogreplication.ReplicaStatus) {
status.ReplicaIoRunning = binlogreplication.ReplicaIoRunning
})
return conn, nil
}
// startReplicationEventStream sends a request over |conn|, the connection to the MySQL source server, to begin
// sending binlog events.
func (a *binlogReplicaApplier) startReplicationEventStream(ctx *sql.Context, conn *mysql.Conn) error {
serverId, err := loadReplicaServerId()
if err != nil {
return err
}
position, err := positionStore.Load(ctx)
if err != nil {
return err
}
if position == nil {
// If the positionStore doesn't have a record of executed GTIDs, check to see if the gtid_purged system
// variable is set. If it holds a GTIDSet, then we use that as our starting position. As part of loading
// a mysqldump onto a replica, gtid_purged will be set to indicate where to start replication.
_, value, ok := sql.SystemVariables.GetGlobal("gtid_purged")
gtidPurged, isString := value.(string)
if ok && value != nil && isString {
// Starting in MySQL 8.0, when setting the GTID_PURGED sys variable, if the new value starts with '+', then
// the specified GTID Set value is added to the current GTID Set value to get a new GTID Set that contains
// all the previous GTIDs, plus the new ones from the current assignment. Dolt doesn't support this
// special behavior for appending to GTID Sets yet, so in this case the GTID_PURGED sys var will end up
// with a "+" prefix. For now, just ignore the "+" prefix if we see it.
// https://dev.mysql.com/doc/refman/8.0/en/replication-options-gtids.html#sysvar_gtid_purged
if strings.HasPrefix(gtidPurged, "+") {
ctx.GetLogger().Warnf("Ignoring unsupported '+' prefix on @@GTID_PURGED value")
gtidPurged = gtidPurged[1:]
}
purged, err := mysql.ParsePosition(mysqlFlavor, gtidPurged)
if err != nil {
return err
}
position = &purged
}
}
if position == nil {
// If we still don't have any record of executed GTIDs, we create a GTIDSet with just one transaction ID
// for the 0000 server ID. There doesn't seem to be a cleaner way of saying "start at the very beginning".
//
// Also... "starting position" is a bit of a misnomer it's actually the processed GTIDs, which
// indicate the NEXT GTID where replication should start, but it's not as direct as specifying
// a starting position, like the Vitess function signature seems to suggest.
gtid := mysql.Mysql56GTID{
Sequence: 1,
}
position = &mysql.Position{GTIDSet: gtid.GTIDSet()}
}
a.currentPosition = position
return conn.SendBinlogDumpCommand(serverId, *position)
}
// replicaBinlogEventHandler runs a loop, processing binlog events until the applier's stop replication channel
// receives a signal to stop.
func (a *binlogReplicaApplier) replicaBinlogEventHandler(ctx *sql.Context) error {
server := sqlserver.GetRunningServer()
if server == nil {
return fmt.Errorf("unable to access a running SQL server")
}
engine := server.Engine
conn, err := a.connectAndStartReplicationEventStream(ctx)
if err != nil {
return err
}
// Process binlog events
for {
select {
case <-a.stopReplicationChan:
ctx.GetLogger().Trace("received signal to stop replication routine")
return nil
default:
event, err := conn.ReadBinlogEvent()
if err != nil {
if sqlError, isSqlError := err.(*mysql.SQLError); isSqlError {
if sqlError.Message == io.EOF.Error() {
ctx.GetLogger().Trace("No more binlog messages; retrying in 1s...")
time.Sleep(1 * time.Second)
continue
} else if strings.HasPrefix(sqlError.Message, io.ErrUnexpectedEOF.Error()) {
DoltBinlogReplicaController.updateStatus(func(status *binlogreplication.ReplicaStatus) {
status.LastIoError = io.ErrUnexpectedEOF.Error()
status.LastIoErrNumber = ERNetReadError
currentTime := time.Now()
status.LastIoErrorTimestamp = &currentTime
})
conn, err = a.connectAndStartReplicationEventStream(ctx)
if err != nil {
return err
}
continue
} else if strings.Contains(sqlError.Message, "can not handle replication events with the checksum") {
// Ignore any errors about checksums
ctx.GetLogger().Debug("ignoring binlog checksum error message")
continue
}
}
// otherwise, log the error if it's something we don't expect and continue
ctx.GetLogger().Errorf("unexpected error of type %T: '%v'", err, err.Error())
DoltBinlogReplicaController.setIoError(mysql.ERUnknownError, err.Error())
continue
}
err = a.processBinlogEvent(ctx, engine, event)
if err != nil {
ctx.GetLogger().Errorf("unexpected error of type %T: '%v'", err, err.Error())
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, err.Error())
}
}
}
}
// processBinlogEvent processes a single binlog event message and returns an error if there were any problems
// processing it.
func (a *binlogReplicaApplier) processBinlogEvent(ctx *sql.Context, engine *gms.Engine, event mysql.BinlogEvent) error {
var err error
createCommit := false
commitToAllDatabases := false
switch {
case event.IsRand():
// A RAND_EVENT contains two seed values that set the rand_seed1 and rand_seed2 system variables that are
// used to compute the random number. For more details, see: https://mariadb.com/kb/en/rand_event/
// Note: it is written only before a QUERY_EVENT and is NOT used with row-based logging.
ctx.GetLogger().Debug("Received binlog event: Rand")
case event.IsXID():
// An XID event is generated for a COMMIT of a transaction that modifies one or more tables of an
// XA-capable storage engine. For more details, see: https://mariadb.com/kb/en/xid_event/
ctx.GetLogger().Debug("Received binlog event: XID")
createCommit = true
commitToAllDatabases = true
case event.IsQuery():
// A Query event represents a statement executed on the source server that should be executed on the
// replica. Used for all statements with statement-based replication, DDL statements with row-based replication
// as well as COMMITs for non-transactional engines such as MyISAM.
// For more details, see: https://mariadb.com/kb/en/query_event/
query, err := event.Query(a.format)
if err != nil {
return err
}
ctx.GetLogger().WithFields(logrus.Fields{
"database": query.Database,
"charset": query.Charset,
"query": query.SQL,
}).Debug("Received binlog event: Query")
// When executing SQL statements sent from the primary, we can't be sure what database was modified unless we
// look closely at the statement. For example, we could be connected to db01, but executed
// "create table db02.t (...);" i.e., looking at query.Database is NOT enough to always determine the correct
// database that was modified, so instead, we commit to all databases when we see a Query binlog event to
// avoid issues with correctness, at the cost of being slightly less efficient
commitToAllDatabases = true
executeQueryWithEngine(ctx, engine, query.SQL)
createCommit = strings.ToLower(query.SQL) != "begin"
case event.IsRotate():
// When a binary log file exceeds the configured size limit, a ROTATE_EVENT is written at the end of the file,
// pointing to the next file in the sequence. ROTATE_EVENT is generated locally and written to the binary log
// on the source server and it's also written when a FLUSH LOGS statement occurs on the source server.
// For more details, see: https://mariadb.com/kb/en/rotate_event/
ctx.GetLogger().Debug("Received binlog event: Rotate")
case event.IsFormatDescription():
// This is a descriptor event that is written to the beginning of a binary log file, at position 4 (after
// the 4 magic number bytes). For more details, see: https://mariadb.com/kb/en/format_description_event/
a.format, err = event.Format()
if err != nil {
return err
}
ctx.GetLogger().WithFields(logrus.Fields{
"format": a.format,
}).Debug("Received binlog event: FormatDescription")
case event.IsPreviousGTIDs():
// Logged in every binlog to record the current replication state. Consists of the last GTID seen for each
// replication domain. For more details, see: https://mariadb.com/kb/en/gtid_list_event/
position, err := event.PreviousGTIDs(a.format)
if err != nil {
return err
}
ctx.GetLogger().WithFields(logrus.Fields{
"previousGtids": position.GTIDSet.String(),
}).Debug("Received binlog event: PreviousGTIDs")
case event.IsGTID():
// For global transaction ID, used to start a new transaction event group, instead of the old BEGIN query event,
// and also to mark stand-alone (ddl). For more details, see: https://mariadb.com/kb/en/gtid_event/
gtid, isBegin, err := event.GTID(a.format)
if err != nil {
return err
}
if isBegin {
ctx.GetLogger().Errorf("unsupported binlog protocol message: GTID event with 'isBegin' set to true")
}
ctx.GetLogger().WithFields(logrus.Fields{
"gtid": gtid,
"isBegin": isBegin,
}).Debug("Received binlog event: GTID")
a.currentGtid = gtid
// if the source's UUID hasn't been set yet, set it and persist it
if a.replicationSourceUuid == "" {
uuid := fmt.Sprintf("%v", gtid.SourceServer())
err = persistSourceUuid(ctx, uuid)
if err != nil {
return err
}
a.replicationSourceUuid = uuid
}
case event.IsTableMap():
// Used for row-based binary logging beginning (binlog_format=ROW or MIXED). This event precedes each row
// operation event and maps a table definition to a number, where the table definition consists of database
// and table names. For more details, see: https://mariadb.com/kb/en/table_map_event/
// Note: TableMap events are sent before each row event, so there is no need to persist them between restarts.
tableId := event.TableID(a.format)
tableMap, err := event.TableMap(a.format)
if err != nil {
return err
}
ctx.GetLogger().WithFields(logrus.Fields{
"id": tableId,
"tableName": tableMap.Name,
"database": tableMap.Database,
"flags": convertToHexString(tableMap.Flags),
"metadata": tableMap.Metadata,
"types": tableMap.Types,
}).Debug("Received binlog event: TableMap")
if tableId == 0xFFFFFF {
// Table ID 0xFFFFFF is a special value that indicates table maps can be freed.
ctx.GetLogger().Infof("binlog protocol message: table ID '0xFFFFFF'; clearing table maps")
a.tableMapsById = make(map[uint64]*mysql.TableMap)
} else {
flags := tableMap.Flags
if flags&rowFlag_endOfStatement == rowFlag_endOfStatement {
// nothing to be done for end of statement; just clear the flag
flags = flags &^ rowFlag_endOfStatement
}
if flags&rowFlag_noForeignKeyChecks == rowFlag_noForeignKeyChecks {
flags = flags &^ rowFlag_noForeignKeyChecks
}
if flags != 0 {
msg := fmt.Sprintf("unsupported binlog protocol message: TableMap event with unsupported flags '%x'", flags)
ctx.GetLogger().Errorf(msg)
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, msg)
}
a.tableMapsById[tableId] = tableMap
}
case event.IsDeleteRows(), event.IsWriteRows(), event.IsUpdateRows():
// A ROWS_EVENT is written for row based replication if data is inserted, deleted or updated.
// For more details, see: https://mariadb.com/kb/en/rows_event_v1v2-rows_compressed_event_v1/
err = a.processRowEvent(ctx, event, engine)
if err != nil {
return err
}
default:
// We can't access the bytes directly because these non-interface types in Vitess are not exposed.
// Having a Bytes() or Type() method on the Vitess interface would let us clean this up.
byteString := fmt.Sprintf("%v", event)
if strings.HasPrefix(byteString, "{[0 0 0 0 27 ") {
// Type 27 is a Heartbeat event. This event does not appear in the binary log. It's only sent over the
// network by a primary to a replica to let it know that the primary is still alive, and is only sent
// when the primary has no binlog events to send to replica servers.
// For more details, see: https://mariadb.com/kb/en/heartbeat_log_event/
ctx.GetLogger().Debug("Received binlog event: Heartbeat")
} else {
return fmt.Errorf("received unknown event: %v", event)
}
}
if createCommit {
var databasesToCommit []string
if commitToAllDatabases {
databasesToCommit = getAllUserDatabaseNames(ctx, engine)
for _, database := range databasesToCommit {
executeQueryWithEngine(ctx, engine, "use `"+database+"`;")
executeQueryWithEngine(ctx, engine, "commit;")
}
}
// Record the last GTID processed after the commit
a.currentPosition.GTIDSet = a.currentPosition.GTIDSet.AddGTID(a.currentGtid)
err := sql.SystemVariables.AssignValues(map[string]interface{}{"gtid_executed": a.currentPosition.GTIDSet.String()})
if err != nil {
ctx.GetLogger().Errorf("unable to set @@GLOBAL.gtid_executed: %s", err.Error())
}
err = positionStore.Save(ctx, a.currentPosition)
if err != nil {
return fmt.Errorf("unable to store GTID executed metadata to disk: %s", err.Error())
}
// For now, create a Dolt commit from every data update. Eventually, we'll want to make this configurable.
ctx.GetLogger().Trace("Creating Dolt commit(s)")
for _, database := range databasesToCommit {
executeQueryWithEngine(ctx, engine, "use `"+database+"`;")
executeQueryWithEngine(ctx, engine,
fmt.Sprintf("call dolt_commit('-Am', 'Dolt binlog replica commit: GTID %s');", a.currentGtid))
}
}
return nil
}
// processRowEvent processes a WriteRows, DeleteRows, or UpdateRows binlog event and returns an error if any problems
// were encountered.
func (a *binlogReplicaApplier) processRowEvent(ctx *sql.Context, event mysql.BinlogEvent, engine *gms.Engine) error {
switch {
case event.IsDeleteRows():
ctx.GetLogger().Debug("Received binlog event: DeleteRows")
case event.IsWriteRows():
ctx.GetLogger().Debug("Received binlog event: WriteRows")
case event.IsUpdateRows():
ctx.GetLogger().Debug("Received binlog event: UpdateRows")
default:
return fmt.Errorf("unsupported event type: %v", event)
}
tableId := event.TableID(a.format)
tableMap, ok := a.tableMapsById[tableId]
if !ok {
return fmt.Errorf("unable to find replication metadata for table ID: %d", tableId)
}
if a.filters.isTableFilteredOut(ctx, tableMap) {
return nil
}
rows, err := event.Rows(a.format, tableMap)
if err != nil {
return err
}
flags := rows.Flags
if flags&rowFlag_endOfStatement == rowFlag_endOfStatement {
// nothing to be done for end of statement; just clear the flag and move on
flags = flags &^ rowFlag_endOfStatement
}
if flags&rowFlag_noForeignKeyChecks == rowFlag_noForeignKeyChecks {
flags = flags &^ rowFlag_noForeignKeyChecks
}
if flags != 0 {
msg := fmt.Sprintf("unsupported binlog protocol message: DeleteRows event with unsupported flags '%x'", flags)
ctx.GetLogger().Errorf(msg)
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, msg)
}
schema, err := getTableSchema(ctx, engine, tableMap.Name, tableMap.Database)
if err != nil {
return err
}
switch {
case event.IsDeleteRows():
ctx.GetLogger().Debugf(" - Deleted Rows (table: %s)", tableMap.Name)
case event.IsUpdateRows():
ctx.GetLogger().Debugf(" - Updated Rows (table: %s)", tableMap.Name)
case event.IsWriteRows():
ctx.GetLogger().Debugf(" - New Rows (table: %s)", tableMap.Name)
}
foreignKeyChecksDisabled := tableMap.Flags&rowFlag_noForeignKeyChecks > 0
writeSession, tableWriter, err := getTableWriter(ctx, engine, tableMap.Name, tableMap.Database, foreignKeyChecksDisabled)
if err != nil {
return err
}
for _, row := range rows.Rows {
var identityRow, dataRow sql.Row
if len(row.Identify) > 0 {
identityRow, err = parseRow(ctx, tableMap, schema, rows.IdentifyColumns, row.NullIdentifyColumns, row.Identify)
if err != nil {
return err
}
ctx.GetLogger().Debugf(" - Identity: %v ", sql.FormatRow(identityRow))
}
if len(row.Data) > 0 {
dataRow, err = parseRow(ctx, tableMap, schema, rows.DataColumns, row.NullColumns, row.Data)
if err != nil {
return err
}
ctx.GetLogger().Debugf(" - Data: %v ", sql.FormatRow(dataRow))
}
switch {
case event.IsDeleteRows():
err = tableWriter.Delete(ctx, identityRow)
case event.IsWriteRows():
err = tableWriter.Insert(ctx, dataRow)
case event.IsUpdateRows():
err = tableWriter.Update(ctx, identityRow, dataRow)
}
if err != nil {
return err
}
}
err = closeWriteSession(ctx, engine, tableMap.Database, writeSession)
if err != nil {
return err
}
return nil
}
//
// Helper functions
//
// closeWriteSession flushes and closes the specified |writeSession| and returns an error if anything failed.
func closeWriteSession(ctx *sql.Context, engine *gms.Engine, databaseName string, writeSession writer.WriteSession) error {
newWorkingSet, err := writeSession.Flush(ctx)
if err != nil {
return err
}
database, err := engine.Analyzer.Catalog.Database(ctx, databaseName)
if err != nil {
return err
}
if privDatabase, ok := database.(mysql_db.PrivilegedDatabase); ok {
database = privDatabase.Unwrap()
}
sqlDatabase, ok := database.(sqle.Database)
if !ok {
return fmt.Errorf("unexpected database type: %T", database)
}
hash, err := newWorkingSet.HashOf()
if err != nil {
return err
}
return sqlDatabase.DbData().Ddb.UpdateWorkingSet(ctx, newWorkingSet.Ref(), newWorkingSet, hash, newWorkingSet.Meta())
}
// getTableSchema returns a sql.Schema for the specified table in the specified database.
func getTableSchema(ctx *sql.Context, engine *gms.Engine, tableName, databaseName string) (sql.Schema, error) {
database, err := engine.Analyzer.Catalog.Database(ctx, databaseName)
if err != nil {
return nil, err
}
table, ok, err := database.GetTableInsensitive(ctx, tableName)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("unable to find table %q", tableName)
}
return table.Schema(), nil
}
// getTableWriter returns a WriteSession and a TableWriter for writing to the specified |table| in the specified |database|.
func getTableWriter(ctx *sql.Context, engine *gms.Engine, tableName, databaseName string, foreignKeyChecksDisabled bool) (writer.WriteSession, writer.TableWriter, error) {
database, err := engine.Analyzer.Catalog.Database(ctx, databaseName)
if err != nil {
return nil, nil, err
}
if privDatabase, ok := database.(mysql_db.PrivilegedDatabase); ok {
database = privDatabase.Unwrap()
}
sqlDatabase, ok := database.(sqle.Database)
if !ok {
return nil, nil, fmt.Errorf("unexpected database type: %T", database)
}
binFormat := sqlDatabase.DbData().Ddb.Format()
ws, err := env.WorkingSet(ctx, sqlDatabase.GetDoltDB(), sqlDatabase.DbData().Rsr)
if err != nil {
return nil, nil, err
}
tracker, err := globalstate.NewAutoIncrementTracker(ctx, ws)
if err != nil {
return nil, nil, err
}
options := sqlDatabase.EditOptions()
options.ForeignKeyChecksDisabled = foreignKeyChecksDisabled
writeSession := writer.NewWriteSession(binFormat, ws, tracker, options)
ds := dsess.DSessFromSess(ctx.Session)
setter := ds.SetRoot
tableWriter, err := writeSession.GetTableWriter(ctx, tableName, databaseName, setter, false)
if err != nil {
return nil, nil, err
}
return writeSession, tableWriter, nil
}
// parseRow parses the binary row data from a MySQL binlog event and converts it into a go-mysql-server Row using the
// |schema| information provided. |columnsPresentBitmap| indicates which column values are present in |data| and
// |nullValuesBitmap| indicates which columns have null values and are NOT present in |data|.
func parseRow(ctx *sql.Context, tableMap *mysql.TableMap, schema sql.Schema, columnsPresentBitmap, nullValuesBitmap mysql.Bitmap, data []byte) (sql.Row, error) {
var parsedRow sql.Row
pos := 0
for i, typ := range tableMap.Types {
column := schema[i]
if columnsPresentBitmap.Bit(i) == false {
parsedRow = append(parsedRow, nil)
continue
}
var value sqltypes.Value
var err error
if nullValuesBitmap.Bit(i) {
value, err = sqltypes.NewValue(vquery.Type_NULL_TYPE, nil)
if err != nil {
return nil, err
}
} else {
var length int
value, length, err = mysql.CellValue(data, pos, typ, tableMap.Metadata[i], getSignedType(column))
if err != nil {
return nil, err
}
pos += length
}
convertedValue, err := convertSqlTypesValue(ctx, value, column)
if err != nil {
return nil, err
}
parsedRow = append(parsedRow, convertedValue)
}
return parsedRow, nil
}
// getSignedType returns a Vitess query.Type that can be used with the Vitess mysql.CellValue function to correctly
// parse the value of a signed or unsigned integer value. The mysql.TableMap structure provides information about the
// type, but it doesn't indicate if an integer type is signed or unsigned, so we have to look at the column type in the
// replica's schema and then choose any signed/unsigned query.Type to pass into mysql.CellValue to instruct it whether
// to treat a value as signed or unsigned the actual type does not matter, only the signed/unsigned property.
func getSignedType(column *sql.Column) vquery.Type {
switch column.Type.Type() {
case vquery.Type_UINT8, vquery.Type_UINT16, vquery.Type_UINT24, vquery.Type_UINT32, vquery.Type_UINT64:
// For any unsigned integer value, we just need to return any unsigned numeric type to signal to Vitess to treat
// the value as unsigned. The actual type returned doesn't matter only the signed/unsigned property is used.
return vquery.Type_UINT64
default:
return vquery.Type_INT64
}
}
// convertSqlTypesValues converts a sqltypes.Value instance (from vitess) into a sql.Type value (for go-mysql-server).
func convertSqlTypesValue(ctx *sql.Context, value sqltypes.Value, column *sql.Column) (interface{}, error) {
if value.IsNull() {
return nil, nil
}
var convertedValue interface{}
var err error
switch {
case types.IsEnum(column.Type), types.IsSet(column.Type):
atoi, err := strconv.Atoi(value.ToString())
if err != nil {
return nil, err
}
convertedValue, err = column.Type.Convert(atoi)
case types.IsDecimal(column.Type):
// Decimal values need to have any leading/trailing whitespace trimmed off
// TODO: Consider moving this into DecimalType_.Convert; if DecimalType_.Convert handled trimming
// leading/trailing whitespace, this special case for Decimal types wouldn't be needed.
convertedValue, err = column.Type.Convert(strings.TrimSpace(value.ToString()))
case types.IsJSON(column.Type):
convertedValue, err = convertVitessJsonExpressionString(ctx, value)
default:
convertedValue, err = column.Type.Convert(value.ToString())
}
if err != nil {
return nil, fmt.Errorf("unable to convert value %q, for column of type %T: %v", value.ToString(), column.Type, err.Error())
}
return convertedValue, nil
}
// convertVitessJsonExpressionString extracts a JSON value from the specified |value| instance, which Vitess has
// encoded as a SQL expression string. Vitess parses the binary JSON representation from an incoming binlog event,
// and converts it into an expression string containing JSON_OBJECT and JSON_ARRAY function calls. Because we don't
// have access to the raw JSON string or JSON bytes, we have to do extra work to translate from Vitess' SQL
// expression syntax into a raw JSON string value that we can pass to the storage layer. If Vitess kept around the
// raw string representation and returned it from value.ToString, this logic would not be necessary.
func convertVitessJsonExpressionString(ctx *sql.Context, value sqltypes.Value) (interface{}, error) {
if value.Type() != vquery.Type_EXPRESSION {
return nil, fmt.Errorf("invalid sqltypes.Value specified; expected a Value instance with an Expression type")
}
strValue := value.String()
if strings.HasPrefix(strValue, "EXPRESSION(") {
strValue = strValue[len("EXPRESSION(") : len(strValue)-1]
}
node, err := parse.Parse(ctx, "SELECT "+strValue)
if err != nil {
return nil, err
}
server := sqlserver.GetRunningServer()
if server == nil {
return nil, fmt.Errorf("unable to access running SQL server")
}
analyze, err := server.Engine.Analyzer.Analyze(ctx, node, nil)
if err != nil {
return nil, err
}
rowIter, err := analyze.RowIter(ctx, nil)
if err != nil {
return nil, err
}
row, err := rowIter.Next(ctx)
if err != nil {
return nil, err
}
return row[0], nil
}
func getAllUserDatabaseNames(ctx *sql.Context, engine *gms.Engine) []string {
allDatabases := engine.Analyzer.Catalog.AllDatabases(ctx)
userDatabaseNames := make([]string, 0, len(allDatabases))
for _, database := range allDatabases {
switch database.Name() {
case "information_schema", "mysql":
default:
userDatabaseNames = append(userDatabaseNames, database.Name())
}
}
return userDatabaseNames
}
// loadReplicaServerId loads the @@GLOBAL.server_id system variable needed to register the replica with the source,
// and returns an error specific to replication configuration if the variable is not set to a valid value.
func loadReplicaServerId() (uint32, error) {
_, value, ok := sql.SystemVariables.GetGlobal("server_id")
if !ok {
return 0, fmt.Errorf("no server_id global system variable set")
}
serverId, ok := value.(uint32)
if !ok || serverId == 0 {
return 0, fmt.Errorf("invalid server ID configured for @@GLOBAL.server_id (%v); "+
"must be an integer greater than zero and less than 4,294,967,296", serverId)
}
return serverId, nil
}
func executeQueryWithEngine(ctx *sql.Context, engine *gms.Engine, query string) {
if ctx.GetCurrentDatabase() == "" {
ctx.GetLogger().Warn("No current database selected")
}
_, iter, err := engine.Query(ctx, query)
if err != nil {
// Log any errors, except for commits with "nothing to commit"
if err.Error() != "nothing to commit" {
msg := fmt.Sprintf("ERROR executing query: %v ", err.Error())
ctx.GetLogger().Errorf(msg)
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, msg)
}
return
}
for {
_, err := iter.Next(ctx)
if err != nil {
if err != io.EOF {
ctx.GetLogger().Errorf("ERROR reading query results: %v ", err.Error())
}
return
}
}
}
//
// Generic util functions...
//
// convertToHexString returns a lower-case hex string representation of the specified uint16 value |v|.
func convertToHexString(v uint16) string {
return fmt.Sprintf("%x", v)
}
// keys returns a slice containing the keys in the specified map |m|.
func keys[K comparable, V any](m map[K]V) []K {
keys := make([]K, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}
@@ -0,0 +1,350 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"strings"
"sync"
"time"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/binlogreplication"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
)
var DoltBinlogReplicaController = newDoltBinlogReplicaController()
// ErrServerNotConfiguredAsReplica is returned when replication is started without enough configuration provided.
var ErrServerNotConfiguredAsReplica = fmt.Errorf(
"server is not configured as a replica; fix with CHANGE REPLICATION SOURCE TO")
// doltBinlogReplicaController implements the BinlogReplicaController interface for a Dolt database in order to
// provide support for a Dolt server to be a replica of a MySQL primary.
//
// This type is used concurrently multiple sessions on the DB can call this interface concurrently,
// so all state that the controller tracks MUST be protected with a mutex.
type doltBinlogReplicaController struct {
status binlogreplication.ReplicaStatus
filters *filterConfiguration
applier *binlogReplicaApplier
ctx *sql.Context
mu *sync.Mutex
}
var _ binlogreplication.BinlogReplicaController = (*doltBinlogReplicaController)(nil)
// newDoltBinlogReplicaController creates a new doltBinlogReplicaController instance.
func newDoltBinlogReplicaController() *doltBinlogReplicaController {
controller := doltBinlogReplicaController{
mu: &sync.Mutex{},
filters: newFilterConfiguration(),
}
controller.status.AutoPosition = true
controller.status.ReplicaIoRunning = binlogreplication.ReplicaIoNotRunning
controller.status.ReplicaSqlRunning = binlogreplication.ReplicaSqlNotRunning
controller.applier = newBinlogReplicaApplier(controller.filters)
return &controller
}
// StartReplica implements the BinlogReplicaController interface.
func (d *doltBinlogReplicaController) StartReplica(ctx *sql.Context) error {
if false {
// TODO: If the database is already configured for Dolt replication/clustering, then error out.
// Add a (BATS?) test to cover this case
return fmt.Errorf("dolt replication already enabled; unable to use binlog replication with other replication modes. " +
"Disable Dolt replication first before starting binlog replication")
}
// If we aren't running in a sql-server context, it would be nice to return a helpful, Dolt-specific
// error message. Currently, this case would trigger an error from the GMS layer, so we can't give
// a specific error message about needing to run Dolt in sql-server mode yet.
_, err := loadReplicaServerId()
if err != nil {
return fmt.Errorf("unable to start replication: %s", err.Error())
}
configuration, err := loadReplicationConfiguration(ctx)
if err != nil {
return err
} else if configuration == nil {
return ErrServerNotConfiguredAsReplica
}
if d.ctx == nil {
return fmt.Errorf("no execution context set for the replica controller")
}
ctx.GetLogger().Info("starting binlog replication...")
d.applier.Go(d.ctx)
return nil
}
// SetExecutionContext sets the unique |ctx| for the replica's applier to use when applying changes from binlog events
// to a database. The applier cannot reuse any existing context, because it executes in a separate routine and would
// cause race conditions.
func (d *doltBinlogReplicaController) SetExecutionContext(ctx *sql.Context) {
d.ctx = ctx
}
// StopReplica implements the BinlogReplicaController interface.
func (d *doltBinlogReplicaController) StopReplica(_ *sql.Context) error {
d.applier.stopReplicationChan <- struct{}{}
d.mu.Lock()
defer d.mu.Unlock()
d.status.ReplicaIoRunning = binlogreplication.ReplicaIoNotRunning
d.status.ReplicaSqlRunning = binlogreplication.ReplicaSqlNotRunning
return nil
}
// SetReplicationSourceOptions implements the BinlogReplicaController interface.
func (d *doltBinlogReplicaController) SetReplicationSourceOptions(ctx *sql.Context, options []binlogreplication.ReplicationOption) error {
replicaSourceInfo, err := loadReplicationConfiguration(ctx)
if err != nil {
return err
}
if replicaSourceInfo == nil {
replicaSourceInfo = mysql_db.NewReplicaSourceInfo()
}
for _, option := range options {
switch strings.ToUpper(option.Name) {
case "SOURCE_HOST":
value, err := getOptionValueAsString(option)
if err != nil {
return err
}
replicaSourceInfo.Host = value
case "SOURCE_USER":
value, err := getOptionValueAsString(option)
if err != nil {
return err
}
replicaSourceInfo.User = value
case "SOURCE_PASSWORD":
value, err := getOptionValueAsString(option)
if err != nil {
return err
}
replicaSourceInfo.Password = value
case "SOURCE_PORT":
intValue, err := getOptionValueAsInt(option)
if err != nil {
return err
}
replicaSourceInfo.Port = uint16(intValue)
case "SOURCE_CONNECT_RETRY":
intValue, err := getOptionValueAsInt(option)
if err != nil {
return err
}
replicaSourceInfo.ConnectRetryInterval = uint32(intValue)
case "SOURCE_RETRY_COUNT":
intValue, err := getOptionValueAsInt(option)
if err != nil {
return err
}
replicaSourceInfo.ConnectRetryCount = uint64(intValue)
default:
return fmt.Errorf("unknown replication source option: %s", option.Name)
}
}
// Persist the updated replica source configuration to disk
return persistReplicationConfiguration(ctx, replicaSourceInfo)
}
// SetReplicationFilterOptions implements the BinlogReplicaController interface.
func (d *doltBinlogReplicaController) SetReplicationFilterOptions(_ *sql.Context, options []binlogreplication.ReplicationOption) error {
for _, option := range options {
switch strings.ToUpper(option.Name) {
case "REPLICATE_DO_TABLE":
value, err := getOptionValueAsTableNames(option)
if err != nil {
return err
}
err = d.filters.setDoTables(value)
if err != nil {
return err
}
case "REPLICATE_IGNORE_TABLE":
value, err := getOptionValueAsTableNames(option)
if err != nil {
return err
}
err = d.filters.setIgnoreTables(value)
if err != nil {
return err
}
default:
return fmt.Errorf("unsupported replication filter option: %s", option.Name)
}
}
// TODO: Consider persisting filter settings. MySQL doesn't actually do this... unlike CHANGE REPLICATION SOURCE,
// CHANGE REPLICATION FILTER requires users to re-apply the filter options every time a server is restarted,
// or to pass them to mysqld on the command line or in configuration. Since we don't want to force users
// to specify these on the command line, we should consider diverging from MySQL behavior here slightly and
// persisting the filter configuration options if customers want this.
return nil
}
// GetReplicaStatus implements the BinlogReplicaController interface
func (d *doltBinlogReplicaController) GetReplicaStatus(ctx *sql.Context) (*binlogreplication.ReplicaStatus, error) {
replicaSourceInfo, err := loadReplicationConfiguration(ctx)
if err != nil {
return nil, err
}
if replicaSourceInfo == nil {
return nil, nil
}
d.mu.Lock()
defer d.mu.Unlock()
var copy = d.status
copy.SourceUser = replicaSourceInfo.User
copy.SourceHost = replicaSourceInfo.Host
copy.SourcePort = uint(replicaSourceInfo.Port)
copy.SourceServerUuid = replicaSourceInfo.Uuid
copy.ConnectRetry = replicaSourceInfo.ConnectRetryInterval
copy.SourceRetryCount = replicaSourceInfo.ConnectRetryCount
copy.ReplicateDoTables = d.filters.getDoTables()
copy.ReplicateIgnoreTables = d.filters.getIgnoreTables()
return &copy, nil
}
// ResetReplica implements the BinlogReplicaController interface
func (d *doltBinlogReplicaController) ResetReplica(ctx *sql.Context, resetAll bool) error {
d.mu.Lock()
defer d.mu.Unlock()
if d.status.ReplicaIoRunning != binlogreplication.ReplicaIoNotRunning ||
d.status.ReplicaSqlRunning != binlogreplication.ReplicaSqlNotRunning {
return fmt.Errorf("unable to reset replica while replication is running; stop replication and try again")
}
// Reset error status
d.status.LastIoErrNumber = 0
d.status.LastSqlErrNumber = 0
d.status.LastIoErrorTimestamp = nil
d.status.LastSqlErrorTimestamp = nil
d.status.LastSqlError = ""
d.status.LastIoError = ""
if resetAll {
err := deleteReplicationConfiguration(ctx)
if err != nil {
return err
}
d.filters = nil
}
return nil
}
// updateStatus allows the caller to safely update the replica controller's status. The controller locks it's mutex
// before the specified function |f| is called, and unlocks it after |f| is finished running. The current status is
// passed into the callback function |f| and the caller can safely update or copy any fields they need.
func (d *doltBinlogReplicaController) updateStatus(f func(status *binlogreplication.ReplicaStatus)) {
d.mu.Lock()
defer d.mu.Unlock()
f(&d.status)
}
// setIoError updates the current replication status with the specific |errno| and |message| to describe an IO error.
func (d *doltBinlogReplicaController) setIoError(errno uint, message string) {
d.mu.Lock()
defer d.mu.Unlock()
// truncate the message to avoid errors when reporting replica status
if len(message) > 256 {
message = message[:256]
}
currentTime := time.Now()
d.status.LastIoErrorTimestamp = &currentTime
d.status.LastIoErrNumber = errno
d.status.LastIoError = message
}
// setSqlError updates the current replication status with the specific |errno| and |message| to describe an SQL error.
func (d *doltBinlogReplicaController) setSqlError(errno uint, message string) {
d.mu.Lock()
defer d.mu.Unlock()
// truncate the message to avoid errors when reporting replica status
if len(message) > 256 {
message = message[:256]
}
currentTime := time.Now()
d.status.LastSqlErrorTimestamp = &currentTime
d.status.LastSqlErrNumber = errno
d.status.LastSqlError = message
}
//
// Helper functions
//
func getOptionValueAsString(option binlogreplication.ReplicationOption) (string, error) {
stringOptionValue, ok := option.Value.(binlogreplication.StringReplicationOptionValue)
if ok {
return stringOptionValue.GetValueAsString(), nil
}
return "", fmt.Errorf("unsupported value type for option %q; found %T, "+
"but expected a string", option.Name, option.Value.GetValue())
}
func getOptionValueAsInt(option binlogreplication.ReplicationOption) (int, error) {
integerOptionValue, ok := option.Value.(binlogreplication.IntegerReplicationOptionValue)
if ok {
return integerOptionValue.GetValueAsInt(), nil
}
return 0, fmt.Errorf("unsupported value type for option %q; found %T, "+
"but expected an integer", option.Name, option.Value.GetValue())
}
func getOptionValueAsTableNames(option binlogreplication.ReplicationOption) ([]sql.UnresolvedTable, error) {
tableNamesOptionValue, ok := option.Value.(binlogreplication.TableNamesReplicationOptionValue)
if ok {
return tableNamesOptionValue.GetValueAsTableList(), nil
}
return nil, fmt.Errorf("unsupported value type for option %q; found %T, "+
"but expected a list of tables", option.Name, option.Value.GetValue())
}
func verifyAllTablesAreQualified(urts []sql.UnresolvedTable) error {
for _, urt := range urts {
if urt.Database() == "" {
return fmt.Errorf("no database specified for table '%s'; "+
"all filter table names must be qualified with a database name", urt.Name())
}
}
return nil
}
@@ -0,0 +1,168 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"strings"
"sync"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/vitess/go/mysql"
)
// filterConfiguration defines the binlog filtering rules applied on the replica.
type filterConfiguration struct {
// doTables holds a map of database name to map of table names, indicating tables that SHOULD be replicated.
doTables map[string]map[string]struct{}
// ignoreTables holds a map of database name to map of table names, indicating tables that should NOT be replicated.
ignoreTables map[string]map[string]struct{}
// mu guards against concurrent access to the filter configuration data.
mu *sync.Mutex
}
// newFilterConfiguration creates a new filterConfiguration instance and initializes members.
func newFilterConfiguration() *filterConfiguration {
return &filterConfiguration{
doTables: make(map[string]map[string]struct{}),
ignoreTables: make(map[string]map[string]struct{}),
mu: &sync.Mutex{},
}
}
// setDoTables sets the tables that are allowed to replicate and returns an error if any problems were
// encountered, such as unqualified tables being specified in |urts|. If any DoTables were previously configured,
// they are cleared out before the new tables are set as the value of DoTables.
func (fc *filterConfiguration) setDoTables(urts []sql.UnresolvedTable) error {
err := verifyAllTablesAreQualified(urts)
if err != nil {
return err
}
fc.mu.Lock()
defer fc.mu.Unlock()
// Setting new replication filters clears out any existing filters
fc.doTables = make(map[string]map[string]struct{})
for _, urt := range urts {
table := strings.ToLower(urt.Name())
db := strings.ToLower(urt.Database())
if fc.doTables[db] == nil {
fc.doTables[db] = make(map[string]struct{})
}
tableMap := fc.doTables[db]
tableMap[table] = struct{}{}
}
return nil
}
// setIgnoreTables sets the tables that are NOT allowed to replicate and returns an error if any problems were
// encountered, such as unqualified tables being specified in |urts|. If any IgnoreTables were previously configured,
// they are cleared out before the new tables are set as the value of IgnoreTables.
func (fc *filterConfiguration) setIgnoreTables(urts []sql.UnresolvedTable) error {
err := verifyAllTablesAreQualified(urts)
if err != nil {
return err
}
fc.mu.Lock()
defer fc.mu.Unlock()
// Setting new replication filters clears out any existing filters
fc.ignoreTables = make(map[string]map[string]struct{})
for _, urt := range urts {
table := strings.ToLower(urt.Name())
db := strings.ToLower(urt.Database())
if fc.ignoreTables[db] == nil {
fc.ignoreTables[db] = make(map[string]struct{})
}
tableMap := fc.ignoreTables[db]
tableMap[table] = struct{}{}
}
return nil
}
// isTableFilteredOut returns true if the table identified by |tableMap| has been filtered out on this replica and
// should not have any updates applied from binlog messages.
func (fc *filterConfiguration) isTableFilteredOut(ctx *sql.Context, tableMap *mysql.TableMap) bool {
if fc == nil {
return false
}
table := strings.ToLower(tableMap.Name)
db := strings.ToLower(tableMap.Database)
fc.mu.Lock()
defer fc.mu.Unlock()
// If any filter doTable options are specified, then a table MUST be listed in the set
// for it to be replicated. doTables options are processed BEFORE ignoreTables options.
// If a table appears in both doTable and ignoreTables, it is ignored.
// https://dev.mysql.com/doc/refman/8.0/en/replication-rules-table-options.html
if len(fc.doTables) > 0 {
if doTables, ok := fc.doTables[db]; ok {
if _, ok := doTables[table]; !ok {
ctx.GetLogger().Tracef("skipping table %s.%s (not in doTables) ", tableMap.Database, tableMap.Name)
return true
}
}
}
if len(fc.ignoreTables) > 0 {
if ignoredTables, ok := fc.ignoreTables[db]; ok {
if _, ok := ignoredTables[table]; ok {
// If this table is being ignored, don't process any further
ctx.GetLogger().Tracef("skipping table %s.%s (in ignoreTables)", tableMap.Database, tableMap.Name)
return true
}
}
}
return false
}
// getDoTables returns a slice of qualified table names that are configured to be replicated.
func (fc *filterConfiguration) getDoTables() []string {
fc.mu.Lock()
defer fc.mu.Unlock()
return convertFilterMapToStringSlice(fc.doTables)
}
// getIgnoreTables returns a slice of qualified table names that are configured to be filtered out of replication.
func (fc *filterConfiguration) getIgnoreTables() []string {
fc.mu.Lock()
defer fc.mu.Unlock()
return convertFilterMapToStringSlice(fc.ignoreTables)
}
// convertFilterMapToStringSlice converts the specified |filterMap| into a string slice, by iterating over every
// key in the top level map, which stores a database name, and for each of those keys, iterating over every key
// in the inner map, which stores a table name. Each table name is qualified with the matching database name and the
// results are returned as a slice of qualified table names.
func convertFilterMapToStringSlice(filterMap map[string]map[string]struct{}) []string {
if filterMap == nil {
return nil
}
tableNames := make([]string, 0, len(filterMap))
for dbName, tableMap := range filterMap {
for tableName, _ := range tableMap {
tableNames = append(tableNames, fmt.Sprintf("%s.%s", dbName, tableName))
}
}
return tableNames
}
@@ -0,0 +1,620 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"math/rand"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
// TestBinlogReplicationForAllTypes tests that operations (inserts, updates, and deletes) on all SQL
// data types can be successfully replicated.
func TestBinlogReplicationForAllTypes(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Set the session's timezone to UTC, to avoid TIMESTAMP test values changing
// when they are converted to UTC for storage.
primaryDatabase.MustExec("SET @@time_zone = '+0:00';")
// Create the test table
tableName := "alltypes"
createTableStatement := generateCreateTableStatement(tableName)
primaryDatabase.MustExec(createTableStatement)
// Make inserts on the primary small, large, and null values
primaryDatabase.MustExec(generateInsertValuesStatement(tableName, 0))
primaryDatabase.MustExec(generateInsertValuesStatement(tableName, 1))
primaryDatabase.MustExec(generateInsertNullValuesStatement(tableName))
// Verify inserts on replica
waitForReplicaToCatchUp(t)
rows, err := replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["pk"])
assertValues(t, 0, row)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "2", row["pk"])
assertValues(t, 1, row)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "3", row["pk"])
assertNullValues(t, row)
require.False(t, rows.Next())
require.NoError(t, rows.Close())
// Make updates on the primary
primaryDatabase.MustExec(generateUpdateToNullValuesStatement(tableName, 1))
primaryDatabase.MustExec(generateUpdateValuesStatement(tableName, 2, 0))
primaryDatabase.MustExec(generateUpdateValuesStatement(tableName, 3, 1))
// Verify updates on the replica
waitForReplicaToCatchUp(t)
replicaDatabase.MustExec("use db01;")
rows, err = replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["pk"])
assertNullValues(t, row)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "2", row["pk"])
assertValues(t, 0, row)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "3", row["pk"])
assertValues(t, 1, row)
require.False(t, rows.Next())
require.NoError(t, rows.Close())
// Make deletes on the primary
primaryDatabase.MustExec("delete from alltypes where pk=1;")
primaryDatabase.MustExec("delete from alltypes where pk=2;")
primaryDatabase.MustExec("delete from alltypes where pk=3;")
// Verify deletes on the replica
waitForReplicaToCatchUp(t)
rows, err = replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;")
require.NoError(t, err)
require.False(t, rows.Next())
require.NoError(t, rows.Close())
}
// ---------------------
// Test Data
// ---------------------
type typeDescriptionAssertion struct {
Value interface{}
ExpectedValue interface{}
}
func newTypeDescriptionAssertion(v interface{}) typeDescriptionAssertion {
return typeDescriptionAssertion{Value: v}
}
func newTypeDescriptionAssertionWithExpectedValue(v interface{}, x interface{}) typeDescriptionAssertion {
return typeDescriptionAssertion{Value: v, ExpectedValue: x}
}
func (tda *typeDescriptionAssertion) getExpectedValue() interface{} {
if tda.ExpectedValue != nil {
return tda.ExpectedValue
}
if valueString, isString := tda.Value.(string); isString {
removedPrefixes := []string{"DATE", "TIMESTAMP", "TIME"}
lowercaseValue := strings.ToUpper(valueString)
for _, prefix := range removedPrefixes {
if strings.HasPrefix(lowercaseValue, prefix) {
return valueString[len(prefix)+2 : len(valueString)-2]
}
}
}
return tda.Value
}
type typeDescription struct {
TypeDefinition string
Assertions [2]typeDescriptionAssertion
}
func (td *typeDescription) ColumnName() string {
name := "_" + strings.ReplaceAll(td.TypeDefinition, "(", "_")
name = strings.ReplaceAll(name, ")", "_")
name = strings.ReplaceAll(name, " ", "_")
name = strings.ReplaceAll(name, ",", "_")
name = strings.ReplaceAll(name, "\"", "")
name = strings.ReplaceAll(name, "'", "")
return name
}
func (td *typeDescription) IsStringType() bool {
def := strings.ToLower(td.TypeDefinition)
switch {
case strings.Contains(def, "char"),
strings.Contains(def, "binary"),
strings.Contains(def, "blob"),
strings.Contains(def, "text"),
strings.Contains(def, "enum"),
strings.Contains(def, "set"),
strings.Contains(def, "json"):
return true
default:
return false
}
}
// allTypes contains test data covering all SQL types.
//
// TODO: TypeWireTests contains most of the test data we need. I found it after implementing this, but we
// could simplify this test code by converting to use TypeWireTests and enhancing it with the additional
// test cases we need to cover (e.g. NULL values).
var allTypes = []typeDescription{
// Bit types
{
TypeDefinition: "bit",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertionWithExpectedValue("0", []uint8{0}),
newTypeDescriptionAssertionWithExpectedValue("1", []uint8{1}),
},
},
{
TypeDefinition: "bit(64)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertionWithExpectedValue("0", []byte{0, 0, 0, 0, 0, 0, 0, 0}),
newTypeDescriptionAssertionWithExpectedValue("1", []byte{0, 0, 0, 0, 0, 0, 0, 1}),
},
},
// Integer types
{
TypeDefinition: "tinyint",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-128"),
newTypeDescriptionAssertion("127"),
},
},
{
TypeDefinition: "tinyint unsigned",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("255"),
},
},
{
TypeDefinition: "bool",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("1"),
},
},
{
TypeDefinition: "smallint",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-32768"),
newTypeDescriptionAssertion("32767"),
},
},
{
TypeDefinition: "smallint unsigned",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("65535"),
},
},
{
TypeDefinition: "mediumint",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-32768"),
newTypeDescriptionAssertion("32767"),
},
},
{
TypeDefinition: "mediumint unsigned",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("65535"),
},
},
{
TypeDefinition: "int",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-32768"),
newTypeDescriptionAssertion("32767"),
},
},
{
TypeDefinition: "int unsigned",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("65535"),
},
},
{
TypeDefinition: "bigint",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-32768"),
newTypeDescriptionAssertion("32767"),
},
},
{
TypeDefinition: "bigint unsigned",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("65535"),
},
},
{
TypeDefinition: "decimal",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("1234567890"),
},
},
{
TypeDefinition: "decimal(10,2)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0.00"),
newTypeDescriptionAssertion("12345678.00"),
},
},
{
TypeDefinition: "decimal(20,8)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-1234567890.12345678"),
newTypeDescriptionAssertion("999999999999.00000001"),
},
},
// Floating point types
{
TypeDefinition: "float",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-3.40282e+38"),
newTypeDescriptionAssertion("-1.17549e-38"),
},
},
{
TypeDefinition: "float unsigned",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("1.17549e-38"),
newTypeDescriptionAssertion("3.40282e+38"),
},
},
{
TypeDefinition: "double",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("-1.7976931348623157e+308"),
newTypeDescriptionAssertion("-2.2250738585072014e-308"),
},
},
{
TypeDefinition: "double unsigned",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("2.2250738585072014e-308"),
newTypeDescriptionAssertion("1.7976931348623157e+308"),
},
},
// String types
{
TypeDefinition: "char(1)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion(""),
newTypeDescriptionAssertion("0"),
},
},
{
TypeDefinition: "char(10)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion(""),
newTypeDescriptionAssertion("0123456789"),
},
},
{
TypeDefinition: "varchar(255)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion(""),
newTypeDescriptionAssertion(generateTestDataString(255)),
},
},
{
TypeDefinition: "char(1) binary",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("1"),
},
},
{
TypeDefinition: "binary(1)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("1"),
},
},
{
TypeDefinition: "binary(255)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion(generateTestDataString(255)),
newTypeDescriptionAssertion(generateTestDataString(255)),
},
},
{
TypeDefinition: "varbinary(1)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion("1"),
},
},
{
TypeDefinition: "varbinary(255)",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion(generateTestDataString(0)),
newTypeDescriptionAssertion(generateTestDataString(255)),
},
},
// Blob/Text types
{
TypeDefinition: "tinyblob",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(255)),
},
},
{
TypeDefinition: "blob",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(10_000)),
},
},
{
TypeDefinition: "mediumblob",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(15_000)),
},
},
{
TypeDefinition: "longblob",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(20_000)),
},
},
{
TypeDefinition: "tinytext",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(255)),
},
},
{
TypeDefinition: "text",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(10_000)),
},
},
{
TypeDefinition: "mediumtext",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(15_000)),
},
},
{
TypeDefinition: "longtext",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("0"),
newTypeDescriptionAssertion(generateTestDataString(20_000)),
},
},
// Enum and Set types
{
TypeDefinition: "ENUM(\"\",\"a\",\"b\",\"c\")",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion(""),
newTypeDescriptionAssertion("c"),
},
},
{
TypeDefinition: "SET(\"a\",\"b\",\"c\")",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("a"),
newTypeDescriptionAssertion("a,b,c"),
},
},
// Date types
{
TypeDefinition: "date",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("DATE('1981-02-16')"),
newTypeDescriptionAssertion("DATE('1981-02-16')"),
},
},
{
TypeDefinition: "time",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("TIME('01:02:03')"),
newTypeDescriptionAssertion("TIME('01:02:03')"),
},
},
{
TypeDefinition: "datetime",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("TIMESTAMP('1981-02-16 12:13:14')"),
newTypeDescriptionAssertion("TIMESTAMP('1981-02-16 12:13:14')"),
},
},
{
TypeDefinition: "timestamp",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("TIMESTAMP('1981-02-16 12:13:14')"),
newTypeDescriptionAssertion("TIMESTAMP('1981-02-16 12:13:14')"),
},
},
{
TypeDefinition: "year",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("1981"),
newTypeDescriptionAssertion("2020"),
},
},
// Spatial types
{
TypeDefinition: "geometry",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertionWithExpectedValue("POINT(18, 23)",
"\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x002@\x00\x00\x00\x00\x00\x007@"),
newTypeDescriptionAssertionWithExpectedValue("LINESTRING(POINT(0,0),POINT(1,2),POINT(2,4))",
"\x00\x00\x00\x00\x01\x02\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"+
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00"+
"\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x10@"),
},
},
// JSON types
{
TypeDefinition: "json",
Assertions: [2]typeDescriptionAssertion{
newTypeDescriptionAssertion("{}"),
newTypeDescriptionAssertion("{\"name\":\"BillyBob\",\"os\":\"Mac\",\"resolution\":{\"x\":1920,\"y\":1080}}"),
},
},
}
// ---------------------
// Test Helper Functions
// ---------------------
func assertValues(t *testing.T, assertionIndex int, row map[string]interface{}) {
for _, typeDesc := range allTypes {
assertion := typeDesc.Assertions[assertionIndex]
expectedValue := assertion.getExpectedValue()
actualValue := ""
if row[typeDesc.ColumnName()] != nil {
actualValue = row[typeDesc.ColumnName()].(string)
}
if typeDesc.TypeDefinition == "json" {
// LD_1, DOLT_DEV, and DOLT storage formats return JSON strings slightly differently; DOLT removes spaces
// while LD_1 and DOLT_DEV add whitespace, so for json comparison, we sanitize by removing whitespace.
actualValue = strings.ReplaceAll(actualValue, " ", "")
}
require.EqualValues(t, expectedValue, actualValue,
"Failed on assertion %d for for column %q", assertionIndex, typeDesc.ColumnName())
}
}
func assertNullValues(t *testing.T, row map[string]interface{}) {
for _, typeDesc := range allTypes {
require.Nil(t, row[typeDesc.ColumnName()],
"Failed on NULL value for for column %q", typeDesc.ColumnName())
}
}
func generateCreateTableStatement(tableName string) string {
sb := strings.Builder{}
sb.WriteString("create table " + tableName)
sb.WriteString("(pk int primary key auto_increment")
for _, typeDesc := range allTypes {
sb.WriteString(fmt.Sprintf(", %s %s",
typeDesc.ColumnName(), typeDesc.TypeDefinition))
}
sb.WriteString(");")
return sb.String()
}
func generateInsertValuesStatement(tableName string, assertionIndex int) string {
sb := strings.Builder{}
sb.WriteString("insert into " + tableName)
sb.WriteString(" values (DEFAULT")
for _, typeDesc := range allTypes {
assertion := typeDesc.Assertions[assertionIndex]
value := assertion.Value
if typeDesc.IsStringType() {
value = fmt.Sprintf("'%s'", value)
}
sb.WriteString(", " + fmt.Sprintf("%v", value))
}
sb.WriteString(");")
return sb.String()
}
func generateInsertNullValuesStatement(tableName string) string {
sb := strings.Builder{}
sb.WriteString("insert into " + tableName)
sb.WriteString(" values (DEFAULT")
for range allTypes {
sb.WriteString(", null")
}
sb.WriteString(");")
return sb.String()
}
func generateUpdateToNullValuesStatement(tableName string, pk int) string {
sb := strings.Builder{}
sb.WriteString("update " + tableName + " set ")
for i, typeDesc := range allTypes {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString(fmt.Sprintf("%s=NULL", typeDesc.ColumnName()))
}
sb.WriteString(fmt.Sprintf(" where pk=%d;", pk))
return sb.String()
}
func generateUpdateValuesStatement(tableName string, pk int, assertionIndex int) string {
sb := strings.Builder{}
sb.WriteString("update " + tableName + " set ")
for i, typeDesc := range allTypes {
if i > 0 {
sb.WriteString(", ")
}
assertion := typeDesc.Assertions[assertionIndex]
value := assertion.Value
if typeDesc.IsStringType() {
value = fmt.Sprintf("'%s'", value)
}
sb.WriteString(fmt.Sprintf("%s=%v", typeDesc.ColumnName(), value))
}
sb.WriteString(fmt.Sprintf(" where pk=%d;", pk))
return sb.String()
}
func generateTestDataString(length uint) string {
sb := strings.Builder{}
for ; length > 0; length-- {
sb.WriteRune(rune(rand.Intn(90-48) + 48))
}
return sb.String()
}
@@ -0,0 +1,191 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
// TestBinlogReplicationFilters_ignoreTablesOnly tests that the ignoreTables replication
// filtering option is correctly applied and honored.
func TestBinlogReplicationFilters_ignoreTablesOnly(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Ignore replication events for db01.t2. Also tests that the first filter setting is overwritten by
// the second and that db and that db and table names are case-insensitive.
replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(db01.t1);")
replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(DB01.T2);")
// Assert that status shows replication filters
status := showReplicaStatus(t)
require.Equal(t, "db01.t2", status["Replicate_Ignore_Table"])
require.Equal(t, "", status["Replicate_Do_Table"])
// Make changes on the primary
primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);")
primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);")
for i := 1; i < 12; i++ {
primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i))
primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i))
}
primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;")
primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;")
primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;")
primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;")
// Pause to let the replica catch up
waitForReplicaToCatchUp(t)
// Verify that all changes from t1 were applied on the replica
rows, err := replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "10", row["count"])
require.Equal(t, "0", row["min"])
require.Equal(t, "9", row["max"])
require.NoError(t, rows.Close())
// Verify that no changes from t2 were applied on the replica
rows, err = replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "0", row["count"])
require.Equal(t, nil, row["min"])
require.Equal(t, nil, row["max"])
require.NoError(t, rows.Close())
}
// TestBinlogReplicationFilters_doTablesOnly tests that the doTables replication
// filtering option is correctly applied and honored.
func TestBinlogReplicationFilters_doTablesOnly(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Do replication events for db01.t1. Also tests that the first filter setting is overwritten by
// the second and that db and that db and table names are case-insensitive.
replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(db01.t2);")
replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(DB01.T1);")
// Assert that status shows replication filters
status := showReplicaStatus(t)
require.Equal(t, "db01.t1", status["Replicate_Do_Table"])
require.Equal(t, "", status["Replicate_Ignore_Table"])
// Make changes on the primary
primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);")
primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);")
for i := 1; i < 12; i++ {
primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i))
primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i))
}
primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;")
primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;")
primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;")
primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;")
// Pause to let the replica catch up
waitForReplicaToCatchUp(t)
// Verify that all changes from t1 were applied on the replica
rows, err := replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "10", row["count"])
require.Equal(t, "0", row["min"])
require.Equal(t, "9", row["max"])
require.NoError(t, rows.Close())
// Verify that no changes from t2 were applied on the replica
rows, err = replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "0", row["count"])
require.Equal(t, nil, row["min"])
require.Equal(t, nil, row["max"])
require.NoError(t, rows.Close())
}
// TestBinlogReplicationFilters_doTablesAndIgnoreTables tests that the doTables and ignoreTables
// replication filtering options are correctly applied and honored when used together.
func TestBinlogReplicationFilters_doTablesAndIgnoreTables(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Do replication events for db01.t1, and db01.t2
replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(db01.t1, db01.t2);")
// Ignore replication events for db01.t2
replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(db01.t2);")
// Assert that replica status shows replication filters
status := showReplicaStatus(t)
require.True(t, status["Replicate_Do_Table"] == "db01.t1,db01.t2" ||
status["Replicate_Do_Table"] == "db01.t2,db01.t1")
require.Equal(t, "db01.t2", status["Replicate_Ignore_Table"])
// Make changes on the primary
primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);")
primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);")
for i := 1; i < 12; i++ {
primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i))
primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i))
}
primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;")
primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;")
primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;")
primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;")
// Pause to let the replica catch up
waitForReplicaToCatchUp(t)
// Verify that all changes from t1 were applied on the replica
rows, err := replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "10", row["count"])
require.Equal(t, "0", row["min"])
require.Equal(t, "9", row["max"])
require.NoError(t, rows.Close())
// Verify that no changes from t2 were applied on the replica
rows, err = replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "0", row["count"])
require.Equal(t, nil, row["min"])
require.Equal(t, nil, row["max"])
require.NoError(t, rows.Close())
}
// TestBinlogReplicationFilters_errorCases test returned errors for various error cases.
func TestBinlogReplicationFilters_errorCases(t *testing.T) {
defer teardown(t)
startSqlServers(t)
// All tables must be qualified with a database
_, err := replicaDatabase.Queryx("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(t1);")
require.Error(t, err)
require.ErrorContains(t, err, "no database specified for table")
_, err = replicaDatabase.Queryx("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(t1);")
require.Error(t, err)
require.ErrorContains(t, err, "no database specified for table")
}
@@ -0,0 +1,200 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"testing"
"github.com/stretchr/testify/require"
)
// TestBinlogReplicationMultiDb tests that binlog events spanning multiple databases are correctly
// applied by a replica.
func TestBinlogReplicationMultiDb(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Make changes on the primary to db01 and db02
primaryDatabase.MustExec("create database db02;")
primaryDatabase.MustExec("create table db01.t01 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("create table db02.t02 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("insert into db01.t01 (pk) values (1), (3), (5), (8), (9);")
primaryDatabase.MustExec("insert into db02.t02 (pk) values (2), (4), (6), (7), (10);")
primaryDatabase.MustExec("delete from db01.t01 where pk=9;")
primaryDatabase.MustExec("delete from db02.t02 where pk=10;")
primaryDatabase.MustExec("update db01.t01 set pk=7 where pk=8;")
primaryDatabase.MustExec("update db02.t02 set pk=8 where pk=7;")
// Verify the changes in db01 on the replica
waitForReplicaToCatchUp(t)
rows, err := replicaDatabase.Queryx("select * from db01.t01 order by pk asc;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "3", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "5", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "7", row["pk"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
require.NoError(t, rows.Close())
// Verify db01.dolt_diff
replicaDatabase.MustExec("use db01;")
rows, err = replicaDatabase.Queryx("select * from db01.dolt_diff;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t01", row["table_name"])
require.EqualValues(t, "1", row["data_change"])
require.EqualValues(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t01", row["table_name"])
require.EqualValues(t, "1", row["data_change"])
require.EqualValues(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t01", row["table_name"])
require.EqualValues(t, "1", row["data_change"])
require.EqualValues(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t01", row["table_name"])
require.EqualValues(t, "0", row["data_change"])
require.EqualValues(t, "1", row["schema_change"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
require.NoError(t, rows.Close())
// Verify the changes in db02 on the replica
replicaDatabase.MustExec("use db02;")
rows, err = replicaDatabase.Queryx("select * from db02.t02 order by pk asc;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "2", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "4", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "6", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "8", row["pk"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
// Verify db02.dolt_diff
rows, err = replicaDatabase.Queryx("select * from db02.dolt_diff;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t02", row["table_name"])
require.Equal(t, "1", row["data_change"])
require.Equal(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t02", row["table_name"])
require.Equal(t, "1", row["data_change"])
require.Equal(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t02", row["table_name"])
require.Equal(t, "1", row["data_change"])
require.Equal(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t02", row["table_name"])
require.Equal(t, "0", row["data_change"])
require.Equal(t, "1", row["schema_change"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
}
// TestBinlogReplicationMultiDbTransactions tests that binlog events for transactions that span
// multiple DBs are applied correctly to a replica.
func TestBinlogReplicationMultiDbTransactions(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Make changes on the primary to db01 and db02
primaryDatabase.MustExec("create database db02;")
primaryDatabase.MustExec("create table db01.t01 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("create table db02.t02 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("set @autocommit = 0;")
primaryDatabase.MustExec("start transaction;")
primaryDatabase.MustExec("insert into db01.t01 (pk) values (1), (3), (5), (8), (9);")
primaryDatabase.MustExec("insert into db02.t02 (pk) values (2), (4), (6), (7), (10);")
primaryDatabase.MustExec("delete from db01.t01 where pk=9;")
primaryDatabase.MustExec("delete from db02.t02 where pk=10;")
primaryDatabase.MustExec("update db01.t01 set pk=7 where pk=8;")
primaryDatabase.MustExec("update db02.t02 set pk=8 where pk=7;")
primaryDatabase.MustExec("commit;")
// Verify the changes in db01 on the replica
waitForReplicaToCatchUp(t)
rows, err := replicaDatabase.Queryx("select * from db01.t01 order by pk asc;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "3", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "5", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "7", row["pk"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
// Verify db01.dolt_diff
replicaDatabase.MustExec("use db01;")
rows, err = replicaDatabase.Queryx("select * from db01.dolt_diff;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t01", row["table_name"])
require.EqualValues(t, "1", row["data_change"])
require.EqualValues(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t01", row["table_name"])
require.EqualValues(t, "0", row["data_change"])
require.EqualValues(t, "1", row["schema_change"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
// Verify the changes in db02 on the replica
waitForReplicaToCatchUp(t)
replicaDatabase.MustExec("use db02;")
rows, err = replicaDatabase.Queryx("select * from db02.t02 order by pk asc;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "2", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "4", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "6", row["pk"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "8", row["pk"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
// Verify db02.dolt_diff
rows, err = replicaDatabase.Queryx("select * from db02.dolt_diff;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t02", row["table_name"])
require.Equal(t, "1", row["data_change"])
require.Equal(t, "0", row["schema_change"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "t02", row["table_name"])
require.Equal(t, "0", row["data_change"])
require.Equal(t, "1", row["schema_change"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
}
@@ -0,0 +1,185 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/Shopify/toxiproxy/v2"
toxiproxyclient "github.com/Shopify/toxiproxy/v2/client"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"
"github.com/stretchr/testify/require"
)
var toxiClient *toxiproxyclient.Client
var mysqlProxy *toxiproxyclient.Proxy
var proxyPort int
// TestBinlogReplicationReconnection tests that the replica's connection to the primary is correctly
// reestablished if it drops.
func TestBinlogReplicationReconnection(t *testing.T) {
defer teardown(t)
startSqlServers(t)
configureToxiProxy(t)
configureFastConnectionRetry(t)
startReplication(t, proxyPort)
testInitialReplicaStatus(t)
primaryDatabase.MustExec("create table reconnect_test(pk int primary key, c1 varchar(255));")
for i := 0; i < 1000; i++ {
value := "foobarbazbashfoobarbazbashfoobarbazbashfoobarbazbashfoobarbazbash"
primaryDatabase.MustExec(fmt.Sprintf("insert into reconnect_test values (%v, %q)", i, value))
}
// Remove the limit_data toxic so that a connection can be reestablished
mysqlProxy.RemoveToxic("limit_data")
// Assert that all records get written to the table
waitForReplicaToCatchUp(t)
rows, err := replicaDatabase.Queryx("select min(pk) as min, max(pk) as max, count(pk) as count from db01.reconnect_test;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "0", row["min"])
require.Equal(t, "999", row["max"])
require.Equal(t, "1000", row["count"])
require.NoError(t, rows.Close())
// Assert that show replica status show reconnection IO error
status := showReplicaStatus(t)
require.Equal(t, "1158", status["Last_IO_Errno"])
require.Equal(t, "unexpected EOF", status["Last_IO_Error"])
requireRecentTimeString(t, status["Last_IO_Error_Timestamp"])
}
// configureFastConnectionRetry configures the replica to retry a failed connection after 5s, instead of the default 60s
// connection retry interval. This is used for testing connection retry logic without waiting the full default period.
func configureFastConnectionRetry(_ *testing.T) {
replicaDatabase.MustExec(
fmt.Sprintf("change replication source to SOURCE_CONNECT_RETRY=5;"))
}
// testInitialReplicaStatus tests the data returned by SHOW REPLICA STATUS and errors
// out if any values are not what is expected for a replica that has just connected
// to a MySQL primary.
func testInitialReplicaStatus(t *testing.T) {
status := showReplicaStatus(t)
// Positioning settings
require.Equal(t, "true", status["Auto_Position"])
// Connection settings
require.Equal(t, "5", status["Connect_Retry"])
require.Equal(t, "86400", status["Source_Retry_Count"])
require.Equal(t, "localhost", status["Source_Host"])
require.NotEmpty(t, status["Source_Port"])
require.NotEmpty(t, status["Source_User"])
// Error status
require.Equal(t, "0", status["Last_Errno"])
require.Equal(t, "", status["Last_Error"])
require.Equal(t, "0", status["Last_IO_Errno"])
require.Equal(t, "", status["Last_IO_Error"])
require.Equal(t, "", status["Last_IO_Error_Timestamp"])
require.Equal(t, "0", status["Last_SQL_Errno"])
require.Equal(t, "", status["Last_SQL_Error"])
require.Equal(t, "", status["Last_SQL_Error_Timestamp"])
// Empty filter configuration
require.Equal(t, "", status["Replicate_Do_Table"])
require.Equal(t, "", status["Replicate_Ignore_Table"])
// Thread status
require.True(t,
status["Replica_IO_Running"] == "Yes" ||
status["Replica_IO_Running"] == "Connecting")
require.Equal(t, "Yes", status["Replica_SQL_Running"])
// Unsupported fields
require.Equal(t, "INVALID", status["Source_Log_File"])
require.Equal(t, "Ignored", status["Source_SSL_Allowed"])
require.Equal(t, "None", status["Until_Condition"])
require.Equal(t, "0", status["SQL_Delay"])
require.Equal(t, "0", status["SQL_Remaining_Delay"])
require.Equal(t, "0", status["Seconds_Behind_Source"])
}
// requireRecentTimeString asserts that the specified |datetime| is a non-nil timestamp string
// with a value less than five minutes ago.
func requireRecentTimeString(t *testing.T, datetime interface{}) {
require.NotNil(t, datetime)
datetimeString := datetime.(string)
datetime, err := time.Parse(time.UnixDate, datetimeString)
require.NoError(t, err)
require.LessOrEqual(t, time.Now().Add(-5*time.Minute), datetime)
require.GreaterOrEqual(t, time.Now(), datetime)
}
// showReplicaStatus returns a map with the results of SHOW REPLICA STATUS, keyed by the
// name of each column.
func showReplicaStatus(t *testing.T) map[string]interface{} {
rows, err := replicaDatabase.Queryx("show replica status;")
defer rows.Close()
require.NoError(t, err)
return convertByteArraysToStrings(readNextRow(t, rows))
}
func configureToxiProxy(t *testing.T) {
toxiproxyPort := findFreePort()
metrics := toxiproxy.NewMetricsContainer(prometheus.NewRegistry())
toxiproxyServer := toxiproxy.NewServer(metrics, zerolog.Nop())
go func() {
toxiproxyServer.Listen("localhost", strconv.Itoa(toxiproxyPort))
}()
time.Sleep(500 * time.Millisecond)
fmt.Printf("Toxiproxy server running on port %d \n", toxiproxyPort)
toxiClient = toxiproxyclient.NewClient(fmt.Sprintf("localhost:%d", toxiproxyPort))
proxyPort = findFreePort()
var err error
mysqlProxy, err = toxiClient.CreateProxy("mysql",
fmt.Sprintf("localhost:%d", proxyPort), // downstream
fmt.Sprintf("localhost:%d", mySqlPort)) // upstream
if err != nil {
panic(fmt.Sprintf("unable to create toxiproxy: %v", err.Error()))
}
mysqlProxy.AddToxic("limit_data", "limit_data", "downstream", 1.0, toxiproxyclient.Attributes{
"bytes": 1_000,
})
fmt.Printf("Toxiproxy proxy with limit_data toxic (1KB) started on port %d \n", proxyPort)
}
// convertByteArraysToStrings converts each []byte value in the specified map |m| into a string.
// This is necessary because MapScan doesn't honor (or know about) the correct underlying SQL types it
// gets all results back as strings, typed as []byte.
// More info at the end of this issue: https://github.com/jmoiron/sqlx/issues/225
func convertByteArraysToStrings(m map[string]interface{}) map[string]interface{} {
for key, value := range m {
if bytes, ok := value.([]byte); ok {
m[key] = string(bytes)
}
}
return m
}
@@ -0,0 +1,83 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
// TestBinlogReplicationServerRestart tests that a replica can be configured and started, then the
// server process can be restarted and replica can be restarted without problems.
func TestBinlogReplicationServerRestart(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
primaryDatabase.MustExec("create table t (pk int auto_increment primary key)")
// Launch a goroutine that inserts data for 5 seconds
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
limit := 5 * time.Second
for startTime := time.Now(); time.Now().Sub(startTime) <= limit; {
primaryDatabase.MustExec("insert into t values (DEFAULT);")
time.Sleep(100 * time.Millisecond)
}
}()
// Let the replica process a few transactions, then stop the server and pause a second
waitForReplicaToReachGtid(t, 3)
stopDoltSqlServer(t)
time.Sleep(1000 * time.Millisecond)
var err error
doltPort, doltProcess, err = startDoltSqlServer(testDir)
require.NoError(t, err)
// Check replication status on the replica and assert configuration persisted
status := showReplicaStatus(t)
// The default Connect_Retry interval is 60s; but some tests configure a faster connection retry interval
require.True(t, status["Connect_Retry"] == "5" || status["Connect_Retry"] == "60")
require.Equal(t, "86400", status["Source_Retry_Count"])
require.Equal(t, "localhost", status["Source_Host"])
require.NotEmpty(t, status["Source_Port"])
require.NotEmpty(t, status["Source_User"])
// Restart replication on replica
// TODO: For now, we have to set server_id each time we start the service.
// Turn this into a persistent sys var
replicaDatabase.MustExec("set @@global.server_id=123;")
replicaDatabase.MustExec("START REPLICA")
// Assert that all changes have replicated from the primary
wg.Wait()
waitForReplicaToCatchUp(t)
countMaxQuery := "SELECT COUNT(pk) AS count, MAX(pk) as max FROM db01.t;"
primaryRows, err := primaryDatabase.Queryx(countMaxQuery)
require.NoError(t, err)
replicaRows, err := replicaDatabase.Queryx(countMaxQuery)
require.NoError(t, err)
primaryRow := convertByteArraysToStrings(readNextRow(t, primaryRows))
replicaRow := convertByteArraysToStrings(readNextRow(t, replicaRows))
require.Equal(t, primaryRow["count"], replicaRow["count"])
require.Equal(t, primaryRow["max"], replicaRow["max"])
require.NoError(t, replicaRows.Close())
}
@@ -0,0 +1,791 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package binlogreplication
import (
"bufio"
"fmt"
"io"
"net"
"os"
"os/exec"
"os/user"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/jmoiron/sqlx"
_ "github.com/go-sql-driver/mysql"
"github.com/stretchr/testify/require"
)
var mySqlPort, doltPort int
var primaryDatabase, replicaDatabase *sqlx.DB
var mySqlProcess, doltProcess *os.Process
var doltLogFilePath, oldDoltLogFilePath, mysqlLogFilePath string
var doltLogFile, mysqlLogFile *os.File
var testDir string
var originalWorkingDir string
func teardown(t *testing.T) {
if mySqlProcess != nil {
mySqlProcess.Kill()
}
if doltProcess != nil {
doltProcess.Kill()
}
if mysqlLogFile != nil {
mysqlLogFile.Close()
}
if doltLogFile != nil {
doltLogFile.Close()
}
// Output server logs on failure for easier debugging
if t.Failed() {
if oldDoltLogFilePath != "" {
fmt.Printf("\nDolt server log from %s:\n", oldDoltLogFilePath)
printFile(oldDoltLogFilePath)
}
fmt.Printf("\nDolt server log from %s:\n", doltLogFilePath)
printFile(doltLogFilePath)
fmt.Printf("\nMySQL server log from %s:\n", mysqlLogFilePath)
printFile(mysqlLogFilePath)
} else {
// clean up temp files on clean test runs
defer os.RemoveAll(testDir)
}
if toxiClient != nil {
proxies, _ := toxiClient.Proxies()
for _, value := range proxies {
value.Delete()
}
}
}
// TestBinlogReplicationSanityCheck performs the simplest possible binlog replication test. It starts up
// a MySQL primary and a Dolt replica, and asserts that a CREATE TABLE statement properly replicates to the
// Dolt replica.
func TestBinlogReplicationSanityCheck(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Make changes on the primary and verify on the replica
primaryDatabase.MustExec("create table t (pk int primary key)")
waitForReplicaToCatchUp(t)
expectedStatement := "CREATE TABLE t ( pk int NOT NULL, PRIMARY KEY (pk)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"
assertCreateTableStatement(t, replicaDatabase, "t", expectedStatement)
assertRepoStateFileExists(t, "db01")
}
// TestResetReplica tests that "RESET REPLICA" and "RESET REPLICA ALL" correctly clear out
// replication configuration and metadata.
func TestResetReplica(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// RESET REPLICA returns an error if replication is running
_, err := replicaDatabase.Queryx("RESET REPLICA")
require.Error(t, err)
require.ErrorContains(t, err, "unable to reset replica while replication is running")
// Calling RESET REPLICA clears out any errors
replicaDatabase.MustExec("STOP REPLICA;")
rows, err := replicaDatabase.Queryx("RESET REPLICA;")
require.NoError(t, err)
require.NoError(t, rows.Close())
rows, err = replicaDatabase.Queryx("SHOW REPLICA STATUS;")
require.NoError(t, err)
status := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "0", status["Last_Errno"])
require.Equal(t, "", status["Last_Error"])
require.Equal(t, "0", status["Last_IO_Errno"])
require.Equal(t, "", status["Last_IO_Error"])
require.Equal(t, "", status["Last_IO_Error_Timestamp"])
require.Equal(t, "0", status["Last_SQL_Errno"])
require.Equal(t, "", status["Last_SQL_Error"])
require.Equal(t, "", status["Last_SQL_Error_Timestamp"])
require.NoError(t, rows.Close())
// Calling RESET REPLICA ALL clears out all replica configuration
rows, err = replicaDatabase.Queryx("RESET REPLICA ALL;")
require.NoError(t, err)
require.NoError(t, rows.Close())
rows, err = replicaDatabase.Queryx("SHOW REPLICA STATUS;")
require.NoError(t, err)
require.False(t, rows.Next())
require.NoError(t, rows.Close())
rows, err = replicaDatabase.Queryx("select * from mysql.slave_master_info;")
require.NoError(t, err)
require.False(t, rows.Next())
require.NoError(t, rows.Close())
}
// TestStartReplicaErrors tests that the "START REPLICA" command returns appropriate responses
// for various error conditions.
func TestStartReplicaErrors(t *testing.T) {
defer teardown(t)
startSqlServers(t)
// START REPLICA returns an error if server_id has not been set to a non-zero value
_, err := replicaDatabase.Queryx("START REPLICA;")
require.Error(t, err)
require.ErrorContains(t, err, "invalid server ID configured")
replicaDatabase.MustExec("SET @@GLOBAL.server_id=4321")
// START REPLICA returns an error when no replication source is configured
_, err = replicaDatabase.Queryx("START REPLICA;")
require.Error(t, err)
require.ErrorContains(t, err, ErrServerNotConfiguredAsReplica.Error())
// For partial source configuration, START REPLICA doesn't throw an error, but an error will
// be populated in SHOW REPLICA STATUS after START REPLICA returns.
//START REPLICA doesn't return an error when replication source is only partially configured
replicaDatabase.MustExec("CHANGE REPLICATION SOURCE TO SOURCE_PORT=1234, SOURCE_HOST='localhost';")
replicaDatabase.MustExec("START REPLICA;")
rows, err := replicaDatabase.Queryx("SHOW REPLICA STATUS;")
require.NoError(t, err)
status := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "13117", status["Last_IO_Errno"])
require.NotEmpty(t, status["Last_IO_Error"])
require.NotEmpty(t, status["Last_IO_Error_Timestamp"])
require.NoError(t, rows.Close())
// START REPLICA doesn't return an error if replication is already running
startReplication(t, mySqlPort)
replicaDatabase.MustExec("START REPLICA;")
}
// TestDoltCommits tests that Dolt commits are created and use correct transaction boundaries.
func TestDoltCommits(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// First transaction (DDL)
primaryDatabase.MustExec("create table t1 (pk int primary key);")
// Second transaction (DDL)
primaryDatabase.MustExec("create table t2 (pk int primary key);")
// Third transaction (autocommit DML)
primaryDatabase.MustExec("insert into t2 values (0);")
// Disable autocommit so we can manually control transactions
primaryDatabase.MustExec("set autocommit=0;")
// Fourth transaction (explicitly controlled transaction)
primaryDatabase.MustExec("start transaction;")
primaryDatabase.MustExec("insert into t1 values(1);")
primaryDatabase.MustExec("insert into t1 values(2);")
primaryDatabase.MustExec("insert into t1 values(3);")
primaryDatabase.MustExec("insert into t2 values(3), (2), (1);")
primaryDatabase.MustExec("commit;")
// Verify Dolt commit on replica
waitForReplicaToCatchUp(t)
rows, err := replicaDatabase.Queryx("select count(*) as count from db01.dolt_log;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "5", row["count"])
require.NoError(t, rows.Close())
// Use dolt_diff so we can see what tables were edited and schema/data changes
replicaDatabase.MustExec("use db01;")
// Note: we don't use an order by clause, since the commits come in so quickly that they get the same timestamp
rows, err = replicaDatabase.Queryx("select * from db01.dolt_diff;")
require.NoError(t, err)
// Fourth transaction
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["data_change"])
require.Equal(t, "0", row["schema_change"])
require.Equal(t, "t1", row["table_name"])
commitId := row["commit_hash"]
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["data_change"])
require.Equal(t, "0", row["schema_change"])
require.Equal(t, "t2", row["table_name"])
require.Equal(t, commitId, row["commit_hash"])
// Third transaction
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["data_change"])
require.Equal(t, "0", row["schema_change"])
require.Equal(t, "t2", row["table_name"])
// Second transaction
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "0", row["data_change"])
require.Equal(t, "1", row["schema_change"])
require.Equal(t, "t2", row["table_name"])
// First transaction
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "0", row["data_change"])
require.Equal(t, "1", row["schema_change"])
require.Equal(t, "t1", row["table_name"])
require.NoError(t, rows.Close())
}
// TestForeignKeyChecks tests that foreign key constraints replicate correctly when foreign key checks are
// enabled and disabled.
func TestForeignKeyChecks(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Insert a record with a foreign key check
primaryDatabase.MustExec("CREATE TABLE colors (name varchar(100) primary key);")
primaryDatabase.MustExec("CREATE TABLE t1 (pk int primary key, color varchar(100), FOREIGN KEY (color) REFERENCES colors(name));")
primaryDatabase.MustExec("START TRANSACTION;")
primaryDatabase.MustExec("SET foreign_key_checks = 1;")
primaryDatabase.MustExec("INSERT INTO colors VALUES ('green'), ('red'), ('blue');")
primaryDatabase.MustExec("INSERT INTO t1 VALUES (1, 'red'), (2, 'green');")
primaryDatabase.MustExec("COMMIT;")
// Test the Insert path with foreign key checks turned off
primaryDatabase.MustExec("START TRANSACTION;")
primaryDatabase.MustExec("SET foreign_key_checks = 0;")
primaryDatabase.MustExec("INSERT INTO t1 VALUES (3, 'not-a-color');")
primaryDatabase.MustExec("COMMIT;")
// Test the Update and Delete paths with foreign key checks turned off
primaryDatabase.MustExec("START TRANSACTION;")
primaryDatabase.MustExec("DELETE FROM colors WHERE name='red';")
primaryDatabase.MustExec("UPDATE t1 SET color='still-not-a-color' WHERE pk=2;")
primaryDatabase.MustExec("COMMIT;")
// Verify the changes on the replica
waitForReplicaToCatchUp(t)
rows, err := replicaDatabase.Queryx("select * from db01.t1 order by pk;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1", row["pk"])
require.Equal(t, "red", row["color"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "2", row["pk"])
require.Equal(t, "still-not-a-color", row["color"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "3", row["pk"])
require.Equal(t, "not-a-color", row["color"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
rows, err = replicaDatabase.Queryx("select * from db01.colors order by name;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "blue", row["name"])
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "green", row["name"])
require.False(t, rows.Next())
require.NoError(t, rows.Close())
}
// TestCharsetsAndCollations tests that we can successfully replicate data using various charsets and collations.
func TestCharsetsAndCollations(t *testing.T) {
defer teardown(t)
startSqlServers(t)
startReplication(t, mySqlPort)
// Use non-default charset/collations to create data on the primary
primaryDatabase.MustExec("CREATE TABLE t1 (pk int primary key, c1 varchar(255) COLLATE ascii_general_ci, c2 varchar(255) COLLATE utf16_general_ci);")
primaryDatabase.MustExec("insert into t1 values (1, \"one\", \"one\");")
// Verify on the replica
waitForReplicaToCatchUp(t)
rows, err := replicaDatabase.Queryx("show create table db01.t1;")
require.NoError(t, err)
row := convertByteArraysToStrings(readNextRow(t, rows))
require.Contains(t, row["Create Table"], "ascii_general_ci")
require.Contains(t, row["Create Table"], "utf16_general_ci")
require.NoError(t, rows.Close())
rows, err = replicaDatabase.Queryx("select * from db01.t1;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "one", row["c1"])
require.Equal(t, "\x00o\x00n\x00e", row["c2"])
require.NoError(t, rows.Close())
// Test that we get an error for unsupported charsets/collations
primaryDatabase.MustExec("CREATE TABLE t2 (pk int primary key, c1 varchar(255) COLLATE utf16_german2_ci);")
waitForReplicaToCatchUp(t)
replicaDatabase.MustExec("use db01;")
rows, err = replicaDatabase.Queryx("SHOW TABLES WHERE Tables_in_db01 like 't2';")
require.NoError(t, err)
require.False(t, rows.Next())
require.NoError(t, rows.Close())
rows, err = replicaDatabase.Queryx("SHOW REPLICA STATUS;")
require.NoError(t, err)
row = convertByteArraysToStrings(readNextRow(t, rows))
require.Equal(t, "1105", row["Last_SQL_Errno"])
require.NotEmpty(t, row["Last_SQL_Error_Timestamp"])
require.Contains(t, row["Last_SQL_Error"], "The collation `utf16_german2_ci` has not yet been implemented")
require.False(t, rows.Next())
require.NoError(t, rows.Close())
}
//
// Test Helper Functions
//
// waitForReplicaToCatchUp waits (up to 20s) for the replica to catch up with the primary database. The
// lag is measured by checking that gtid_executed is the same on the primary and replica.
func waitForReplicaToCatchUp(t *testing.T) {
timeLimit := 20 * time.Second
endTime := time.Now().Add(timeLimit)
for time.Now().Before(endTime) {
replicaGtid := queryGtid(t, replicaDatabase)
primaryGtid := queryGtid(t, primaryDatabase)
if primaryGtid == replicaGtid {
return
} else {
fmt.Printf("primary and replica not in sync yet... (primary: %s, replica: %s)\n", primaryGtid, replicaGtid)
time.Sleep(250 * time.Millisecond)
}
}
t.Fatal("primary and replica did not synchronize within " + timeLimit.String())
}
// waitForReplicaToReachGtid waits (up to 10s) for the replica's @@gtid_executed sys var to show that
// it has executed the |target| gtid transaction number.
func waitForReplicaToReachGtid(t *testing.T, target int) {
timeLimit := 10 * time.Second
endTime := time.Now().Add(timeLimit)
for time.Now().Before(endTime) {
time.Sleep(250 * time.Millisecond)
replicaGtid := queryGtid(t, replicaDatabase)
if replicaGtid != "" {
components := strings.Split(replicaGtid, ":")
require.Equal(t, 2, len(components))
sourceGtid := components[1]
if strings.Contains(sourceGtid, "-") {
gtidRange := strings.Split(sourceGtid, "-")
require.Equal(t, 2, len(gtidRange))
sourceGtid = gtidRange[1]
}
i, err := strconv.Atoi(sourceGtid)
require.NoError(t, err)
if i >= target {
return
}
}
fmt.Printf("replica has not reached transaction %d yet; currently at: %s \n", target, replicaGtid)
}
t.Fatal("replica did not reach target GTID within " + timeLimit.String())
}
func queryGtid(t *testing.T, database *sqlx.DB) string {
rows, err := database.Queryx("SELECT @@global.gtid_executed as gtid_executed;")
require.NoError(t, err)
defer rows.Close()
row := convertByteArraysToStrings(readNextRow(t, rows))
if row["gtid_executed"] == nil {
t.Fatal("no value for @@GLOBAL.gtid_executed")
}
return row["gtid_executed"].(string)
}
func readNextRow(t *testing.T, rows *sqlx.Rows) map[string]interface{} {
row := make(map[string]interface{})
require.True(t, rows.Next())
err := rows.MapScan(row)
require.NoError(t, err)
return row
}
func readAllRows(t *testing.T, rows *sqlx.Rows) []map[string]interface{} {
result := make([]map[string]interface{}, 0)
for {
row := make(map[string]interface{})
if rows.Next() == false {
return result
}
err := rows.MapScan(row)
require.NoError(t, err)
row = convertByteArraysToStrings(row)
result = append(result, row)
}
}
func startSqlServers(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping binlog replication integ tests on Windows OS")
} else if runtime.GOOS == "darwin" && os.Getenv("CI") == "true" {
t.Skip("Skipping binlog replication integ tests in CI environment on Mac OS")
}
testDir = filepath.Join(os.TempDir(), t.Name()+"-"+time.Now().Format("12345"))
err := os.MkdirAll(testDir, 0777)
cmd := exec.Command("chmod", "777", testDir)
_, err = cmd.Output()
if err != nil {
panic(err)
}
require.NoError(t, err)
fmt.Printf("temp dir: %v \n", testDir)
// Start up primary and replica databases
mySqlPort, mySqlProcess, err = startMySqlServer(testDir)
require.NoError(t, err)
doltPort, doltProcess, err = startDoltSqlServer(testDir)
require.NoError(t, err)
}
func stopDoltSqlServer(t *testing.T) {
// Use the negative process ID so that we grab the entire process group.
// This is necessary to kill all the processes the child spawns.
// Note that we use os.FindProcess, instead of syscall.Kill, since syscall.Kill
// is not available on windows.
p, err := os.FindProcess(-doltProcess.Pid)
require.NoError(t, err)
err = p.Signal(syscall.SIGKILL)
require.NoError(t, err)
time.Sleep(250 * time.Millisecond)
}
func startReplication(_ *testing.T, port int) {
replicaDatabase.MustExec("SET @@GLOBAL.server_id=123;")
replicaDatabase.MustExec(
fmt.Sprintf("change replication source to SOURCE_HOST='localhost', SOURCE_USER='root', "+
"SOURCE_PASSWORD='', SOURCE_PORT=%v;", port))
replicaDatabase.MustExec("start replica;")
}
func assertCreateTableStatement(t *testing.T, database *sqlx.DB, table string, expectedStatement string) {
rows, err := database.Queryx("show create table db01." + table + ";")
require.NoError(t, err)
var actualTable, actualStatement string
require.True(t, rows.Next())
err = rows.Scan(&actualTable, &actualStatement)
require.NoError(t, err)
require.Equal(t, table, actualTable)
require.NotNil(t, actualStatement)
actualStatement = sanitizeCreateTableString(actualStatement)
require.Equal(t, expectedStatement, actualStatement)
}
func sanitizeCreateTableString(statement string) string {
statement = strings.ReplaceAll(statement, "`", "")
statement = strings.ReplaceAll(statement, "\n", "")
regex := regexp.MustCompile("\\s+")
return regex.ReplaceAllString(statement, " ")
}
// findFreePort returns an available port that can be used for a server. If any errors are
// encountered, this function will panic and fail the current test.
func findFreePort() int {
listener, err := net.Listen("tcp", ":0")
if err != nil {
panic(fmt.Sprintf("unable to find available TCP port: %v", err.Error()))
}
mySqlPort := listener.Addr().(*net.TCPAddr).Port
err = listener.Close()
if err != nil {
panic(fmt.Sprintf("unable to find available TCP port: %v", err.Error()))
}
return mySqlPort
}
// startMySqlServer configures a starts a fresh MySQL server instance and returns the port it is running on,
// and the os.Process handle. If unable to start up the MySQL server, an error is returned.
func startMySqlServer(dir string) (int, *os.Process, error) {
originalCwd, err := os.Getwd()
if err != nil {
panic(err)
}
dir = dir + string(os.PathSeparator) + "mysql" + string(os.PathSeparator)
dataDir := dir + "mysql_data"
err = os.MkdirAll(dir, 0777)
if err != nil {
return -1, nil, err
}
cmd := exec.Command("chmod", "777", dir)
output, err := cmd.Output()
if err != nil {
panic(err)
}
err = os.Chdir(dir)
if err != nil {
return -1, nil, err
}
mySqlPort = findFreePort()
// MySQL will NOT start up as the root user, so if we're running as root
// (e.g. in a CI env), use the "mysql" user instead.
user, err := user.Current()
if err != nil {
panic("unable to determine current user: " + err.Error())
}
username := user.Username
if username == "root" {
fmt.Printf("overriding current user (root) to run mysql as 'mysql' user instead\n")
username = "mysql"
}
// Create a fresh MySQL server for the primary
chmodCmd := exec.Command("mysqld",
"--no-defaults",
"--user="+username,
"--initialize-insecure",
"--datadir="+dataDir,
"--default-authentication-plugin=mysql_native_password")
output, err = chmodCmd.CombinedOutput()
if err != nil {
return -1, nil, fmt.Errorf("unable to execute command %v: %v %v", cmd.String(), err.Error(), string(output))
}
cmd = exec.Command("mysqld",
"--no-defaults",
"--user="+username,
"--datadir="+dataDir,
"--gtid-mode=ON",
"--enforce-gtid-consistency=ON",
fmt.Sprintf("--port=%v", mySqlPort),
"--server-id=11223344",
fmt.Sprintf("--socket=mysql-%v.sock", mySqlPort),
"--binlog-checksum=NONE",
"--general_log_file="+dir+"general_log",
"--log-bin="+dir+"log_bin",
"--slow_query_log_file="+dir+"slow_query_log",
"--log-error="+dir+"log_error",
fmt.Sprintf("--pid-file="+dir+"pid-%v.pid", mySqlPort))
mysqlLogFilePath = filepath.Join(dir, fmt.Sprintf("mysql-%d.out.log", time.Now().Unix()))
mysqlLogFile, err = os.Create(mysqlLogFilePath)
if err != nil {
return -1, nil, err
}
fmt.Printf("MySQL server logs at: %s \n", mysqlLogFilePath)
cmd.Stdout = mysqlLogFile
cmd.Stderr = mysqlLogFile
err = cmd.Start()
if err != nil {
return -1, nil, fmt.Errorf("unable to start process %q: %v", cmd.String(), err.Error())
}
dsn := fmt.Sprintf("root@tcp(127.0.0.1:%v)/", mySqlPort)
primaryDatabase = sqlx.MustOpen("mysql", dsn)
err = waitForSqlServerToStart(primaryDatabase)
if err != nil {
return -1, nil, err
}
// Create the initial database on the MySQL server
primaryDatabase.MustExec("create database db01;")
dsn = fmt.Sprintf("root@tcp(127.0.0.1:%v)/db01", mySqlPort)
primaryDatabase = sqlx.MustOpen("mysql", dsn)
os.Chdir(originalCwd)
fmt.Printf("MySQL server started on port %v \n", mySqlPort)
return mySqlPort, cmd.Process, nil
}
func initializeDevDoltBuild(dir string, goDirPath string) string {
// If we're not in a CI environment, don't worry about building a dev build
if os.Getenv("CI") != "true" {
return ""
}
basedir := filepath.Dir(filepath.Dir(dir))
fullpath := filepath.Join(basedir, fmt.Sprintf("devDolt-%d", os.Getpid()))
_, err := os.Stat(fullpath)
if err == nil {
return fullpath
}
fmt.Printf("building dolt dev build at: %s \n", fullpath)
cmd := exec.Command("go", "build", "-o", fullpath, "./cmd/dolt")
cmd.Dir = goDirPath
output, err := cmd.CombinedOutput()
if err != nil {
panic("unable to build dolt for binlog integration tests: " + err.Error() + "\nFull output: " + string(output) + "\n")
}
return fullpath
}
func startDoltSqlServer(dir string) (int, *os.Process, error) {
dir = filepath.Join(dir, "dolt")
err := os.MkdirAll(dir, 0777)
if err != nil {
return -1, nil, err
}
doltPort = findFreePort()
fmt.Printf("Starting Dolt sql-server on port: %d, with data dir %s\n", doltPort, dir)
// take the CWD and move up four directories to find the go directory
if originalWorkingDir == "" {
var err error
originalWorkingDir, err = os.Getwd()
if err != nil {
panic(err)
}
}
goDirPath := filepath.Join(originalWorkingDir, "..", "..", "..", "..")
err = os.Chdir(goDirPath)
if err != nil {
panic(err)
}
socketPath := filepath.Join("/tmp", fmt.Sprintf("dolt.%v.sock", doltPort))
args := []string{"go", "run", "./cmd/dolt",
"sql-server",
"-uroot",
"--loglevel=TRACE",
fmt.Sprintf("--data-dir=%s", dir),
fmt.Sprintf("--port=%v", doltPort),
fmt.Sprintf("--socket=%s", socketPath)}
// If we're running in CI, use a precompiled dolt binary instead of go run
devDoltPath := initializeDevDoltBuild(dir, goDirPath)
if devDoltPath != "" {
args[2] = devDoltPath
args = args[2:]
}
cmd := exec.Command(args[0], args[1:]...)
// Set a unique process group ID so that we can cleanly kill this process, as well as
// any spawned child processes later. Mac/Unix can set the "Setpgid" field directly, but
// on windows, this field isn't present, so we need to use reflection so that this code
// can still compile for windows, even though we don't run it there.
procAttr := &syscall.SysProcAttr{}
ps := reflect.ValueOf(procAttr)
s := ps.Elem()
f := s.FieldByName("Setpgid")
f.SetBool(true)
cmd.SysProcAttr = procAttr
// Some tests restart the Dolt sql-server, so if we have a current log file, save a reference
// to it so we can print the results later if the test fails.
if doltLogFilePath != "" {
oldDoltLogFilePath = doltLogFilePath
}
doltLogFilePath = filepath.Join(dir, fmt.Sprintf("dolt-%d.out.log", time.Now().Unix()))
doltLogFile, err = os.Create(doltLogFilePath)
if err != nil {
return -1, nil, err
}
fmt.Printf("dolt sql-server logs at: %s \n", doltLogFilePath)
cmd.Stdout = doltLogFile
cmd.Stderr = doltLogFile
err = cmd.Start()
if err != nil {
return -1, nil, fmt.Errorf("unable to execute command %v: %v", cmd.String(), err.Error())
}
fmt.Printf("Dolt CMD: %s\n", cmd.String())
dsn := fmt.Sprintf("root@tcp(127.0.0.1:%v)/", doltPort)
replicaDatabase = sqlx.MustOpen("mysql", dsn)
err = waitForSqlServerToStart(replicaDatabase)
if err != nil {
return -1, nil, err
}
fmt.Printf("Dolt server started on port %v \n", doltPort)
return doltPort, cmd.Process, nil
}
// waitForSqlServerToStart polls the specified database to wait for it to become available, pausing
// between retry attempts, and returning an error if it is not able to verify that the database is
// available.
func waitForSqlServerToStart(database *sqlx.DB) error {
fmt.Printf("Waiting for server to start...\n")
for counter := 0; counter < 20; counter++ {
if database.Ping() == nil {
return nil
}
fmt.Printf("not up yet; waiting...\n")
time.Sleep(500 * time.Millisecond)
}
return database.Ping()
}
// printFile opens the specified filepath |path| and outputs the contents of that file to stdout.
func printFile(path string) {
file, err := os.Open(path)
if err != nil {
fmt.Printf("Unable to open file: %s \n", err)
return
}
defer file.Close()
reader := bufio.NewReader(file)
for {
s, err := reader.ReadString(byte('\n'))
if err != nil {
if err == io.EOF {
break
} else {
panic(err)
}
}
fmt.Print(s)
}
fmt.Println()
}
// assertRepoStateFileExists asserts that the repo_state.json file is present for the specified
// database |db|.
func assertRepoStateFileExists(t *testing.T, db string) {
repoStateFile := filepath.Join(testDir, "dolt", db, ".dolt", "repo_state.json")
_, err := os.Stat(repoStateFile)
require.NoError(t, err)
}
+5 -60
View File
@@ -153,31 +153,31 @@ teardown() {
dolt push rem1 b1
dolt branch -d b1
run dolt sql -q "select * from dolt_branches"
run dolt sql -q "select name, latest_commit_message from dolt_branches"
[ $status -eq 0 ]
[[ "$output" =~ main.*Initialize\ data\ repository ]] || false
[[ "$output" =~ create-table-branch.*Added\ test\ table ]] || false
[[ ! "$output" =~ b1 ]] || false
run dolt sql -q "select * from dolt_remote_branches"
run dolt sql -q "select name, latest_commit_message from dolt_remote_branches"
[ $status -eq 0 ]
[[ ! "$output" =~ main.*Initialize\ data\ repository ]] || false
[[ ! "$output" =~ create-table-branch.*Added\ test\ table ]] || false
[[ "$output" =~ "remotes/rem1/b1" ]] || false
run dolt sql -q "select * from dolt_remote_branches where latest_commit_message ='Initialize data repository'"
run dolt sql -q "select name from dolt_remote_branches where latest_commit_message ='Initialize data repository'"
[ $status -eq 0 ]
[[ ! "$output" =~ "main" ]] || false
[[ ! "$output" =~ "create-table-branch" ]] || false
[[ ! "$output" =~ "remotes/rem1/b1" ]] || false
run dolt sql -q "select * from dolt_remote_branches where latest_commit_message ='Added test table'"
run dolt sql -q "select name from dolt_remote_branches where latest_commit_message ='Added test table'"
[ $status -eq 0 ]
[[ ! "$output" =~ "main" ]] || false
[[ ! "$output" =~ "create-table-branch" ]] || false
[[ "$output" =~ "remotes/rem1/b1" ]] || false
run dolt sql -q "select * from dolt_branches union select * from dolt_remote_branches"
run dolt sql -q "select name from dolt_branches union select name from dolt_remote_branches"
[[ "$output" =~ "main" ]] || false
[[ "$output" =~ "create-table-branch" ]] || false
[[ "$output" =~ "remotes/rem1/b1" ]] || false
@@ -552,61 +552,6 @@ SQL
[[ "$output" =~ "1,commit C" ]] || false
}
@test "system-tables: dolt_branches table should include remote refs as well" {
skip "This functionality needs to be implemented"
cd $BATS_TMPDIR
mkdir remotes-$$
mkdir remotes-$$/empty
echo remotesrv log available here $BATS_TMPDIR/remotes-$$/remotesrv.log
remotesrv --http-port 1234 --dir ./remotes-$$ &> ./remotes-$$/remotesrv.log 3>&- &
remotesrv_pid=$!
cd dolt-repo-$$
mkdir "dolt-repo-clones"
# Create a remote with a test branch
dolt remote add test-remote http://localhost:50051/test-org/test-repo
run dolt push test-remote main
dolt checkout -b test-branch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL,
c1 BIGINT,
c2 BIGINT,
c3 BIGINT,
c4 BIGINT,
c5 BIGINT,
PRIMARY KEY (pk)
);
SQL
dolt add test
dolt commit -m "test commit"
dolt push test-remote test-branch
# Clone the branch
cd "dolt-repo-clones"
run dolt clone http://localhost:50051/test-org/test-repo
[ "$status" -eq 0 ]
cd test-repo
# Assert we are on main
run dolt branch
[ "$status" -eq 0 ]
[[ "$output" =~ "main" ]] || false
[[ ! "$output" =~ "test-branch" ]] || false
# Validate that the dolt_branches table has the remote test-branch (this is the failing part)
run dolt sql -q "SELECT COUNT(*) from dolt_branches"
[ "$status" -eq 0 ]
[[ "$output" =~ "2" ]] || false
run dolt sql -q "SELECT COUNT(*) from dolt_branches WHERE name='test-branch'"
[ "$status" -eq 0 ]
[[ "$output" =~ "1" ]] || false
}
@test "system-tables: dolt_branches is read-only" {
run dolt sql -q "DELETE FROM dolt_branches"
[ "$status" -ne 0 ]