diff --git a/go/cmd/dolt/commands/engine/sqlengine.go b/go/cmd/dolt/commands/engine/sqlengine.go index 8609a54384..8f702fd98a 100644 --- a/go/cmd/dolt/commands/engine/sqlengine.go +++ b/go/cmd/dolt/commands/engine/sqlengine.go @@ -47,6 +47,7 @@ import ( "github.com/dolthub/dolt/go/libraries/doltcore/sqle/statspro" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/writer" "github.com/dolthub/dolt/go/libraries/utils/config" + "github.com/dolthub/dolt/go/libraries/utils/filesys" ) // SqlEngine packages up the context necessary to run sql queries against dsqle. @@ -55,6 +56,7 @@ type SqlEngine struct { contextFactory contextFactory dsessFactory sessionFactory engine *gms.Engine + fs filesys.Filesys } type sessionFactory func(mysqlSess *sql.BaseSession, pro sql.DatabaseProvider) (*dsess.DoltSession, error) @@ -196,6 +198,7 @@ func NewSqlEngine( sqlEngine.contextFactory = sqlContextFactory() sqlEngine.dsessFactory = sessFactory sqlEngine.engine = engine + sqlEngine.fs = pro.FileSystem() // configuring stats depends on sessionBuilder // sessionBuilder needs ref to statsProv @@ -316,6 +319,10 @@ func (se *SqlEngine) GetUnderlyingEngine() *gms.Engine { return se.engine } +func (se *SqlEngine) FileSystem() filesys.Filesys { + return se.fs +} + func (se *SqlEngine) Close() error { if se.engine != nil { if se.engine.Analyzer.Catalog.BinlogReplicaController != nil { diff --git a/go/cmd/dolt/commands/sqlserver/server.go b/go/cmd/dolt/commands/sqlserver/server.go index 1edfabf3c1..1f1dbdc0d7 100644 --- a/go/cmd/dolt/commands/sqlserver/server.go +++ b/go/cmd/dolt/commands/sqlserver/server.go @@ -559,21 +559,27 @@ func ConfigureServices( } listenaddr := fmt.Sprintf(":%d", port) + sqlContextInterceptor := sqle.SqlContextServerInterceptor{ + Factory: sqlEngine.NewDefaultContext, + } args := remotesrv.ServerArgs{ Logger: logrus.NewEntry(lgr), ReadOnly: apiReadOnly || serverConfig.ReadOnly(), HttpListenAddr: listenaddr, GrpcListenAddr: listenaddr, ConcurrencyControl: remotesapi.PushConcurrencyControl_PUSH_CONCURRENCY_CONTROL_ASSERT_WORKING_SET, + Options: sqlContextInterceptor.Options(), + HttpInterceptor: sqlContextInterceptor.HTTP(nil), } var err error - args.FS, args.DBCache, err = sqle.RemoteSrvFSAndDBCache(sqlEngine.NewDefaultContext, sqle.DoNotCreateUnknownDatabases) + args.FS = sqlEngine.FileSystem() + args.DBCache, err = sqle.RemoteSrvDBCache(sqle.GetInterceptorSqlContext, sqle.DoNotCreateUnknownDatabases) if err != nil { lgr.Errorf("error creating SQL engine context for remotesapi server: %v", err) return err } - authenticator := newAccessController(sqlEngine.NewDefaultContext, sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb) + authenticator := newAccessController(sqle.GetInterceptorSqlContext, sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb) args = sqle.WithUserPasswordAuth(args, authenticator) args.TLSConfig = serverConf.TLSConfig @@ -621,6 +627,7 @@ func ConfigureServices( lgr.Errorf("error creating SQL engine context for remotesapi server: %v", err) return err } + args.FS = sqlEngine.FileSystem() clusterRemoteSrvTLSConfig, err := LoadClusterTLSConfig(serverConfig.ClusterConfig()) if err != nil { @@ -634,7 +641,7 @@ func ConfigureServices( lgr.Errorf("error creating remotesapi server on port %d: %v", *serverConfig.RemotesapiPort(), err) return err } - clusterController.RegisterGrpcServices(sqlEngine.NewDefaultContext, clusterRemoteSrv.srv.GrpcServer()) + clusterController.RegisterGrpcServices(sqle.GetInterceptorSqlContext, clusterRemoteSrv.srv.GrpcServer()) clusterRemoteSrv.lis, err = clusterRemoteSrv.srv.Listeners() if err != nil { diff --git a/go/go.mod b/go/go.mod index 9122e3a063..2fc96289a2 100644 --- a/go/go.mod +++ b/go/go.mod @@ -56,7 +56,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 github.com/creasty/defaults v1.6.0 github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 - github.com/dolthub/go-mysql-server v0.19.1-0.20250128182847-3f5bb8c52cd8 + github.com/dolthub/go-mysql-server v0.19.1-0.20250131110511-67aa2a430366 github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63 github.com/dolthub/swiss v0.1.0 github.com/esote/minmaxheap v1.0.0 diff --git a/go/go.sum b/go/go.sum index 748abe7421..0af441a1c7 100644 --- a/go/go.sum +++ b/go/go.sum @@ -179,8 +179,8 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U= github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0= github.com/dolthub/go-icu-regex v0.0.0-20241215010122-db690dd53c90 h1:Sni8jrP0sy/w9ZYXoff4g/ixe+7bFCZlfCqXKJSU+zM= github.com/dolthub/go-icu-regex v0.0.0-20241215010122-db690dd53c90/go.mod h1:ylU4XjUpsMcvl/BKeRRMXSH7e7WBrPXdSLvnRJYrxEA= -github.com/dolthub/go-mysql-server v0.19.1-0.20250128182847-3f5bb8c52cd8 h1:eEGYHOC5Ft+56yPaH26gsdbonrZ2EiTwQLy8Oj3TAFE= -github.com/dolthub/go-mysql-server v0.19.1-0.20250128182847-3f5bb8c52cd8/go.mod h1:jYEJ8tNkA7K3k39X8iMqaX3MSMmViRgh222JSLHDgVc= +github.com/dolthub/go-mysql-server v0.19.1-0.20250131110511-67aa2a430366 h1:pJ+upgX6hrhyqgpkmk9Ye9lIPSualMHZcUMs8kWknV4= +github.com/dolthub/go-mysql-server v0.19.1-0.20250131110511-67aa2a430366/go.mod h1:jYEJ8tNkA7K3k39X8iMqaX3MSMmViRgh222JSLHDgVc= github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63 h1:OAsXLAPL4du6tfbBgK0xXHZkOlos63RdKYS3Sgw/dfI= github.com/dolthub/gozstd v0.0.0-20240423170813-23a2903bca63/go.mod h1:lV7lUeuDhH5thVGDCKXbatwKy2KW80L4rMT46n+Y2/Q= github.com/dolthub/ishell v0.0.0-20240701202509-2b217167d718 h1:lT7hE5k+0nkBdj/1UOSFwjWpNxf+LCApbRHgnCA17XE= diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_test.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_test.go index 9b8770befa..0221ccbfe5 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_test.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_test.go @@ -39,21 +39,21 @@ var doltReplicationPrimarySystemVars = map[string]string{ // TestBinlogPrimary_BinlogNotEnabled tests that when binary logging is NOT enabled, primary commands such as // SHOW BINARY LOGS still work, and that attempts to start replication fail with an error. func TestBinlogPrimary_BinlogNotEnabled(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, map[string]string{ + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(map[string]string{ "enforce_gtid_consistency": "ON", "gtid_mode": "ON", }) - setupForDoltToMySqlReplication() + h.setupForDoltToMySqlReplication() // When binary logging is NOT enabled, binary log commands such as SHOW MASTER STATUS, SHOW BINARY LOG STATUS, // and SHOW BINARY LOGS should not error out. - requirePrimaryResults(t, "SHOW MASTER STATUS", [][]any{}) - requirePrimaryResults(t, "SHOW BINARY LOG STATUS", [][]any{}) - requirePrimaryResults(t, "SHOW BINARY LOGS", [][]any{}) + h.requirePrimaryResults("SHOW MASTER STATUS", [][]any{}) + h.requirePrimaryResults("SHOW BINARY LOG STATUS", [][]any{}) + h.requirePrimaryResults("SHOW BINARY LOGS", [][]any{}) - startReplicationAndCreateTestDb(t, doltPort) - status := queryReplicaStatus(t) + h.startReplicationAndCreateTestDb(h.doltPort) + status := h.queryReplicaStatus() require.Equal(t, "13120", status["Last_IO_Errno"]) require.Contains(t, status["Last_IO_Error"], "Source command COM_REGISTER_REPLICA failed: unknown error: no binlog currently being recorded") @@ -62,17 +62,17 @@ func TestBinlogPrimary_BinlogNotEnabled(t *testing.T) { // TestBinlogPrimary_GtidModeNotEnabled asserts that when @@gtid_mode is NOT enabled, // attempting to start replication will fail with an error visible in the replica's status. func TestBinlogPrimary_GtidModeNotEnabled(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, map[string]string{"log_bin": "1"}) - setupForDoltToMySqlReplication() + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(map[string]string{"log_bin": "1"}) + h.setupForDoltToMySqlReplication() - requirePrimaryResults(t, "SHOW MASTER STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) - requirePrimaryResults(t, "SHOW BINARY LOG STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) - requirePrimaryResults(t, "SHOW BINARY LOGS", [][]any{{"binlog-main.000001", "151", "No"}}) + h.requirePrimaryResults("SHOW MASTER STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) + h.requirePrimaryResults("SHOW BINARY LOG STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) + h.requirePrimaryResults("SHOW BINARY LOGS", [][]any{{"binlog-main.000001", "151", "No"}}) - startReplication(t, doltPort) + h.startReplication(h.doltPort) time.Sleep(500 * time.Millisecond) - status := queryReplicaStatus(t) + status := h.queryReplicaStatus() require.Equal(t, "13117", status["Last_IO_Errno"]) require.Contains(t, status["Last_IO_Error"], "The replication receiver thread cannot start because the source has GTID_MODE = OFF and this server has GTID_MODE = ON") @@ -81,17 +81,17 @@ func TestBinlogPrimary_GtidModeNotEnabled(t *testing.T) { // TestBinlogPrimary_EnforceGtidConsistencyNotEnabled asserts that when @@enforce_gtid_consistency is NOT enabled, // attempting to start replication will fail with an error visible in the replica's status. func TestBinlogPrimary_EnforceGtidConsistencyNotEnabled(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, map[string]string{"log_bin": "1", "gtid_mode": "ON"}) - setupForDoltToMySqlReplication() + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(map[string]string{"log_bin": "1", "gtid_mode": "ON"}) + h.setupForDoltToMySqlReplication() - requirePrimaryResults(t, "SHOW MASTER STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) - requirePrimaryResults(t, "SHOW BINARY LOG STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) - requirePrimaryResults(t, "SHOW BINARY LOGS", [][]any{{"binlog-main.000001", "151", "No"}}) + h.requirePrimaryResults("SHOW MASTER STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) + h.requirePrimaryResults("SHOW BINARY LOG STATUS", [][]any{{"binlog-main.000001", "151", "", "", ""}}) + h.requirePrimaryResults("SHOW BINARY LOGS", [][]any{{"binlog-main.000001", "151", "No"}}) - startReplication(t, doltPort) + h.startReplication(h.doltPort) time.Sleep(500 * time.Millisecond) - status := queryReplicaStatus(t) + status := h.queryReplicaStatus() require.Equal(t, "13114", status["Last_IO_Errno"]) require.Contains(t, status["Last_IO_Error"], "@@enforce_gtid_consistency must be enabled for binlog replication") @@ -100,12 +100,12 @@ func TestBinlogPrimary_EnforceGtidConsistencyNotEnabled(t *testing.T) { // TestBinlogPrimary runs a simple sanity check that a MySQL replica can connect to a Dolt primary and receive // binlog events from a wide variety of SQL data types. func TestBinlogPrimary(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (" + + h.primaryDatabase.MustExec("create table db01.t (" + "pk int primary key, " + "c1 varchar(10), c2 int, c3 varchar(100), c4 tinyint, c5 smallint, c6 mediumint, c7 bigint, " + "uc1 tinyint unsigned, uc2 smallint unsigned, uc3 mediumint unsigned, uc4 int unsigned, uc5 bigint unsigned, " + @@ -124,10 +124,10 @@ func TestBinlogPrimary(t *testing.T) { "json6 json, json7 json" + ");") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t"}}) + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t"}}) - primaryDatabase.MustExec("insert into db01.t values (" + + h.primaryDatabase.MustExec("insert into db01.t values (" + "1, " + "'42', NULL, NULL, 123, 123, 123, 123, 200, 200, 200, 200, 200, " + "1.0101, 2.02030405060708, " + @@ -146,19 +146,19 @@ func TestBinlogPrimary(t *testing.T) { `POINT(1,1), 'true', '[true, false]', '[true, [true, false]]', '["foo","bar"]', '["baz", 1.0, 2.0, "bash"]', ` + `'{"foo":"bar"}', '{"foo": {"baz": "bar"}}'` + ");") - waitForReplicaToCatchUp(t) + h.waitForReplicaToCatchUp() // Debugging output – useful to see deeper status from replica when tests are failing - outputReplicaApplierStatus(t) - outputShowReplicaStatus(t) + h.outputReplicaApplierStatus() + h.outputShowReplicaStatus() // Sanity check on SHOW REPLICA STATUS - rows, err := replicaDatabase.Queryx("show replica status;") + rows, err := h.replicaDatabase.Queryx("show replica status;") require.NoError(t, err) allRows := readAllRowsIntoMaps(t, rows) require.Equal(t, 1, len(allRows)) require.NoError(t, rows.Close()) - uuid := queryPrimaryServerUuid(t) + uuid := h.queryPrimaryServerUuid() require.Equal(t, uuid+":1-3", allRows[0]["Executed_Gtid_Set"]) require.Equal(t, "", allRows[0]["Last_IO_Error"]) require.Equal(t, "", allRows[0]["Last_SQL_Error"]) @@ -166,7 +166,7 @@ func TestBinlogPrimary(t *testing.T) { require.Equal(t, "Yes", allRows[0]["Replica_SQL_Running"]) // Test that the table was created and one row inserted - requireReplicaResults(t, "select * from db01.t;", [][]any{ + h.requireReplicaResults("select * from db01.t;", [][]any{ {"1", "42", nil, nil, "123", "123", "123", "123", "200", "200", "200", "200", "200", float32(1.0101), float64(2.02030405060708), "1981", "1981-02-16", "-123:45:30.123456", @@ -187,36 +187,36 @@ func TestBinlogPrimary(t *testing.T) { }, }) - requirePrimaryResults(t, "SHOW BINARY LOG STATUS", [][]any{ + h.requirePrimaryResults("SHOW BINARY LOG STATUS", [][]any{ {"binlog-main.000001", "2377", "", "", uuid + ":1-3"}}) } // TestBinlogPrimary_Rotation tests how a Dolt primary server handles rotating the binary log file when the // size threshold is reached. func TestBinlogPrimary_Rotation(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Change the binlog rotation threshold on the primary to 10KB (instead of the default 1GB) - primaryDatabase.MustExec("SET @@GLOBAL.max_binlog_size = 10240;") + h.primaryDatabase.MustExec("SET @@GLOBAL.max_binlog_size = 10240;") // Generate enough data to trigger a logfile rotation - primaryDatabase.MustExec("create table t (n int);") + h.primaryDatabase.MustExec("create table t (n int);") for i := range 100 { - primaryDatabase.MustExec(fmt.Sprintf("insert into t values (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("insert into t values (%d);", i)) } - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "SELECT MAX(n) FROM t;", [][]any{{"99"}}) + h.waitForReplicaToCatchUp() + h.requireReplicaResults("SELECT MAX(n) FROM t;", [][]any{{"99"}}) // Check the binary log file status and ensure the file has been rotated - uuid := queryPrimaryServerUuid(t) - requirePrimaryResults(t, "show binary log status;", [][]any{ + uuid := h.queryPrimaryServerUuid() + h.requirePrimaryResults("show binary log status;", [][]any{ {"binlog-main.000003", "1027", "", "", uuid + ":1-102"}, }) - requirePrimaryResults(t, "show binary logs;", [][]any{ + h.requirePrimaryResults("show binary logs;", [][]any{ {"binlog-main.000001", "10318", "No"}, {"binlog-main.000002", "10481", "No"}, {"binlog-main.000003", "1027", "No"}, @@ -226,38 +226,38 @@ func TestBinlogPrimary_Rotation(t *testing.T) { // TestBinlogPrimary_AutoPurging tests that the primary server correctly purges binary log files older than // @@binlog_expire_logs_seconds on restart. func TestBinlogPrimary_AutoPurging(t *testing.T) { - defer teardown(t) + h := newHarness(t) mapCopy := copyMap(doltReplicationPrimarySystemVars) mapCopy["binlog_expire_logs_seconds"] = "1" - startSqlServersWithDoltSystemVars(t, mapCopy) - setupForDoltToMySqlReplication() + h.startSqlServersWithDoltSystemVars(mapCopy) + h.setupForDoltToMySqlReplication() // Generate binary log content - primaryDatabase.MustExec("create database db01;") - primaryDatabase.MustExec("create table db01.t (n int);") + h.primaryDatabase.MustExec("create database db01;") + h.primaryDatabase.MustExec("create table db01.t (n int);") for i := range 100 { - primaryDatabase.MustExec(fmt.Sprintf("insert into db01.t values (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("insert into db01.t values (%d);", i)) } - requirePrimaryResults(t, "SHOW BINARY LOGS;", [][]any{ + h.requirePrimaryResults("SHOW BINARY LOGS;", [][]any{ {"binlog-main.000001", "21346", "No"}, }) // Restart and confirm the binary log has been purged - stopDoltSqlServer(t) + h.stopDoltSqlServer() time.Sleep(1 * time.Second) - mustRestartDoltPrimaryServer(t) - requirePrimaryResults(t, "SHOW BINARY LOGS;", [][]any{ + h.mustRestartDoltPrimaryServer() + h.requirePrimaryResults("SHOW BINARY LOGS;", [][]any{ {"binlog-main.000002", "191", "No"}, }) // Check the value of @@gtid_purged - requirePrimaryResults(t, "SELECT @@gtid_purged;", [][]any{ - {fmt.Sprintf("%s:1-102", queryPrimaryServerUuid(t))}, + h.requirePrimaryResults("SELECT @@gtid_purged;", [][]any{ + {fmt.Sprintf("%s:1-102", h.queryPrimaryServerUuid())}, }) // Verify the replica reports an error about the GTIDs not being available - startReplicationAndCreateTestDb(t, doltPort) - status := queryReplicaStatus(t) + h.startReplicationAndCreateTestDb(h.doltPort) + status := h.queryReplicaStatus() require.Equal(t, "13114", status["Last_IO_Errno"]) require.Contains(t, status["Last_IO_Error"], "Got fatal error 1236 from source when reading data from binary log: "+ @@ -267,134 +267,134 @@ func TestBinlogPrimary_AutoPurging(t *testing.T) { // TestBinlogPrimary_InitializeGTIDPurged asserts that @@gtid_purged is set correctly in a variety of // scenarios, such as when a fresh server starts up, or when a server is restarted multiple times. func TestBinlogPrimary_InitializeGTIDPurged(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() // On a fresh server, @@gtid_purged and @@gtid_executed should be empty - requirePrimaryResults(t, "SELECT @@gtid_executed;", [][]any{{""}}) - requirePrimaryResults(t, "SELECT @@gtid_purged;", [][]any{{""}}) + h.requirePrimaryResults("SELECT @@gtid_executed;", [][]any{{""}}) + h.requirePrimaryResults("SELECT @@gtid_purged;", [][]any{{""}}) // Create a GTID in the first binary log file, and restart the server to rotate to a new binary log file // After the first restart, @@gtid_purged should be empty and @@gtid_executed should be the first GTID - primaryDatabase.MustExec("CREATE DATABASE db01;") - stopDoltSqlServer(t) - mustRestartDoltPrimaryServer(t) - requirePrimaryResults(t, "SELECT @@gtid_executed;", [][]any{ - {fmt.Sprintf("%s:1", queryPrimaryServerUuid(t))}, + h.primaryDatabase.MustExec("CREATE DATABASE db01;") + h.stopDoltSqlServer() + h.mustRestartDoltPrimaryServer() + h.requirePrimaryResults("SELECT @@gtid_executed;", [][]any{ + {fmt.Sprintf("%s:1", h.queryPrimaryServerUuid())}, }) - requirePrimaryResults(t, "SELECT @@gtid_purged;", [][]any{{""}}) + h.requirePrimaryResults("SELECT @@gtid_purged;", [][]any{{""}}) // Manually remove the first binary log file, containing GTID 1 and restart the server // When no GTID is found in any available logs, @@gtid_purged should be set to @@gtid_executed - require.NoError(t, os.Remove(filepath.Join(testDir, "dolt", ".dolt", "binlog", "binlog-main.000001"))) - stopDoltSqlServer(t) - mustRestartDoltPrimaryServer(t) - requirePrimaryResults(t, "SELECT @@gtid_executed;", [][]any{ - {fmt.Sprintf("%s:1", queryPrimaryServerUuid(t))}, + require.NoError(t, os.Remove(filepath.Join(h.testDir, "dolt", ".dolt", "binlog", "binlog-main.000001"))) + h.stopDoltSqlServer() + h.mustRestartDoltPrimaryServer() + h.requirePrimaryResults("SELECT @@gtid_executed;", [][]any{ + {fmt.Sprintf("%s:1", h.queryPrimaryServerUuid())}, }) - requirePrimaryResults(t, "SELECT @@gtid_purged;", [][]any{ - {fmt.Sprintf("%s:1", queryPrimaryServerUuid(t))}, + h.requirePrimaryResults("SELECT @@gtid_purged;", [][]any{ + {fmt.Sprintf("%s:1", h.queryPrimaryServerUuid())}, }) // Create a new GTID in the current binary log file, restart, and test @@gtid_executed and @@gtid_purged - primaryDatabase.MustExec("CREATE DATABASE db02;") - stopDoltSqlServer(t) - mustRestartDoltPrimaryServer(t) - requirePrimaryResults(t, "SELECT @@gtid_executed;", [][]any{ - {fmt.Sprintf("%s:1-2", queryPrimaryServerUuid(t))}, + h.primaryDatabase.MustExec("CREATE DATABASE db02;") + h.stopDoltSqlServer() + h.mustRestartDoltPrimaryServer() + h.requirePrimaryResults("SELECT @@gtid_executed;", [][]any{ + {fmt.Sprintf("%s:1-2", h.queryPrimaryServerUuid())}, }) - requirePrimaryResults(t, "SELECT @@gtid_purged;", [][]any{ - {fmt.Sprintf("%s:1", queryPrimaryServerUuid(t))}, + h.requirePrimaryResults("SELECT @@gtid_purged;", [][]any{ + {fmt.Sprintf("%s:1", h.queryPrimaryServerUuid())}, }) } // TestBinlogPrimary_ReplicaAndPrimaryRestart tests that a replica can disconnect and reconnect to the primary to // restart the replication stream, even when the primary has been restarted and log files have rotated. func TestBinlogPrimary_ReplicaAndPrimaryRestart(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Change the binlog rotation threshold on the primary to 10KB (instead of the default 1GB) so // that log files will rotate more often - primaryDatabase.MustExec("SET @@GLOBAL.max_binlog_size = 10240;") + h.primaryDatabase.MustExec("SET @@GLOBAL.max_binlog_size = 10240;") // Create a table on the primary and assert that it gets replicated - primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255));") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}}) + h.primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255));") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}}) // Assert that the executed GTID position on the replica contains GTIDs 1 and 2 - serverUuid := queryPrimaryServerUuid(t) - status := queryReplicaStatus(t) + serverUuid := h.queryPrimaryServerUuid() + status := h.queryReplicaStatus() require.Equal(t, serverUuid+":1-2", status["Executed_Gtid_Set"]) // Stop the MySQL replica server and wait for a few seconds - stopMySqlServer(t) + h.stopMySqlServer() time.Sleep(2_000 * time.Millisecond) // Generate enough data to trigger a logfile rotation - primaryDatabase.MustExec("create table t (n int);") + h.primaryDatabase.MustExec("create table t (n int);") for i := range 100 { - primaryDatabase.MustExec(fmt.Sprintf("insert into t values (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("insert into t values (%d);", i)) } // Stop the primary and restart it to test that it creates a new log file and // applies a rotate event to the last log file - stopDoltSqlServer(t) + h.stopDoltSqlServer() // Restart the Dolt primary server - mustRestartDoltPrimaryServer(t) + h.mustRestartDoltPrimaryServer() // Generate more data on the primary after restarting - primaryDatabase.MustExec("use db01;") + h.primaryDatabase.MustExec("use db01;") for i := range 100 { - primaryDatabase.MustExec(fmt.Sprintf("insert into t values (%d);", i+100)) + h.primaryDatabase.MustExec(fmt.Sprintf("insert into t values (%d);", i+100)) } // Restart the MySQL replica and reconnect to the Dolt primary - mustRestartMySqlReplicaServer(t) - startReplicationAndCreateTestDb(t, doltPort) - waitForReplicaToCatchUp(t) + h.mustRestartMySqlReplicaServer() + h.startReplicationAndCreateTestDb(h.doltPort) + h.waitForReplicaToCatchUp() // Assert the executed GTID position now contains all GTIDs - status = queryReplicaStatus(t) + status = h.queryReplicaStatus() require.Equal(t, serverUuid+":1-203", status["Executed_Gtid_Set"]) - requireReplicaResults(t, "SELECT MAX(n) FROM t;", [][]any{{"199"}}) + h.requireReplicaResults("SELECT MAX(n) FROM t;", [][]any{{"199"}}) } // TestBinlogPrimary_Heartbeats tests that heartbeats sent from the primary to the replica are well-formed and // don't cause the replica to close the stream. For example, if the nextLogPosition field in the heartbeat event // doesn't match up with the nextLogPosition from the previous event, then the replica will quit the connection. func TestBinlogPrimary_Heartbeats(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() // Start replication, with a 45s delay before any commands are sent to the primary. // This gives enough time for the first heartbeat event to be sent, before any user // initiated binlog events, so we can test that scenario. - startReplicationAndCreateTestDbWithDelay(t, doltPort, 45*time.Second) + h.startReplicationAndCreateTestDbWithDelay(h.doltPort, 45*time.Second) // Insert a row every second, for 70s, which gives the server a chance to send two heartbeats - primaryDatabase.MustExec("create table db01.heartbeatTest(pk int);") + h.primaryDatabase.MustExec("create table db01.heartbeatTest(pk int);") endTime := time.Now().Add(70 * time.Second) maxInsertValue := 0 for time.Now().Before(endTime) { maxInsertValue += 1 - primaryDatabase.MustExec(fmt.Sprintf("insert into db01.heartbeatTest values (%d);", maxInsertValue)) + h.primaryDatabase.MustExec(fmt.Sprintf("insert into db01.heartbeatTest values (%d);", maxInsertValue)) time.Sleep(1 * time.Second) } // Ensure the replica is still in sync - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select count(*) from db01.heartbeatTest;", + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select count(*) from db01.heartbeatTest;", [][]any{{fmt.Sprintf("%d", maxInsertValue)}}) // Make sure no errors have occurred - status := queryReplicaStatus(t) + status := h.queryReplicaStatus() require.Equal(t, "", status["Last_SQL_Error"]) require.Equal(t, "", status["Last_IO_Error"]) require.Equal(t, "0", status["Last_SQL_Errno"]) @@ -404,118 +404,118 @@ func TestBinlogPrimary_Heartbeats(t *testing.T) { // TestBinlogPrimary_ReplicaRestart tests that the Dolt primary server behaves correctly when the // replica server is stopped, and then later reconnects. func TestBinlogPrimary_ReplicaRestart(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Create a table on the primary and assert that it gets replicated - primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255));") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}}) + h.primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255));") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}}) // Assert that the executed GTID position on the replica contains GTIDs 1 and 2 - serverUuid := queryPrimaryServerUuid(t) - status := queryReplicaStatus(t) + serverUuid := h.queryPrimaryServerUuid() + status := h.queryReplicaStatus() require.Equal(t, serverUuid+":1-2", status["Executed_Gtid_Set"]) // Stop the MySQL replica server and wait for a few seconds - stopMySqlServer(t) + h.stopMySqlServer() time.Sleep(4_000 * time.Millisecond) // Make a change while the replica is stopped to test that the server // doesn't error out when a registered replica is not available. - primaryDatabase.MustExec("insert into db01.t1 values (1, 'one');") + h.primaryDatabase.MustExec("insert into db01.t1 values (1, 'one');") // Restart the MySQL replica and reconnect to the Dolt primary - prevPrimaryDatabase := primaryDatabase + prevPrimaryDatabase := h.primaryDatabase var err error - mySqlPort, mySqlProcess, err = startMySqlServer(t, testDir) + h.mySqlPort, h.mySqlProcess, err = h.startMySqlServer() require.NoError(t, err) - replicaDatabase = primaryDatabase - primaryDatabase = prevPrimaryDatabase - startReplicationAndCreateTestDb(t, doltPort) + h.replicaDatabase = h.primaryDatabase + h.primaryDatabase = prevPrimaryDatabase + h.startReplicationAndCreateTestDb(h.doltPort) // Create another table and assert that it gets replicated - primaryDatabase.MustExec("create table db01.t2 (pk int primary key, c1 varchar(255));") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}, {"t2"}}) + h.primaryDatabase.MustExec("create table db01.t2 (pk int primary key, c1 varchar(255));") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}, {"t2"}}) // Assert the executed GTID position now contains all GTIDs - status = queryReplicaStatus(t) + status = h.queryReplicaStatus() require.Equal(t, serverUuid+":1-4", status["Executed_Gtid_Set"]) } // TestBinlogPrimary_PrimaryRestart tests that a Dolt primary server can be restarted and that a replica // will successfully reconnect and continue replicating binlog events. func TestBinlogPrimary_PrimaryRestart(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Only one binary log file should be present on a fresh server - requirePrimaryResults(t, "show binary logs;", [][]any{ + h.requirePrimaryResults("show binary logs;", [][]any{ {"binlog-main.000001", "263", "No"}, }) // Create a table on the primary and assert that it gets replicated - primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255));") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}}) + h.primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255));") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}}) // Assert that the executed GTID position on the replica contains GTIDs 1 and 2 - serverUuid := queryPrimaryServerUuid(t) - status := queryReplicaStatus(t) + serverUuid := h.queryPrimaryServerUuid() + status := h.queryReplicaStatus() require.Equal(t, serverUuid+":1-2", status["Executed_Gtid_Set"]) // Stop the Dolt primary server - stopDoltSqlServer(t) + h.stopDoltSqlServer() time.Sleep(2_000 * time.Millisecond) // Restart the Dolt primary server - mustRestartDoltPrimaryServer(t) - waitForReplicaToReconnect(t) + h.mustRestartDoltPrimaryServer() + h.waitForReplicaToReconnect() // A new binary log file is created on each server restart - requirePrimaryResults(t, "show binary logs;", [][]any{ + h.requirePrimaryResults("show binary logs;", [][]any{ {"binlog-main.000001", "549", "No"}, {"binlog-main.000002", "191", "No"}, }) // Create another table and assert that it gets replicated - primaryDatabase.MustExec("create table db01.t2 (pk int primary key, c1 varchar(255));") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}, {"t2"}}) + h.primaryDatabase.MustExec("create table db01.t2 (pk int primary key, c1 varchar(255));") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}, {"t2"}}) // Assert the executed GTID position now contains GTID #2 and GTID #3 - status = queryReplicaStatus(t) + status = h.queryReplicaStatus() require.Equal(t, serverUuid+":1-3", status["Executed_Gtid_Set"]) } // TestBinlogPrimary_PrimaryRestartBeforeReplicaConnects tests that a MySQL replica can connect to a Dolt primary // when the Dolt primary has multiple binlog files and the replica needs events from a non-current binlog file. func TestBinlogPrimary_PrimaryRestartBeforeReplicaConnects(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() // Create a test database to trigger the first GTID binlog event - primaryDatabase.MustExec("CREATE DATABASE db02;") + h.primaryDatabase.MustExec("CREATE DATABASE db02;") // Restart the Dolt primary server to trigger a binlog file rotation - stopDoltSqlServer(t) - mustRestartDoltPrimaryServer(t) + h.stopDoltSqlServer() + h.mustRestartDoltPrimaryServer() // Start replication and verify the replica receives the CREATE DATABASE event from the first binlog file - startReplication(t, doltPort) - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "SHOW DATABASES;", [][]any{ + h.startReplication(h.doltPort) + h.waitForReplicaToCatchUp() + h.requireReplicaResults("SHOW DATABASES;", [][]any{ {"db02"}, {"information_schema"}, {"mysql"}, {"performance_schema"}, {"sys"}, }) // Verify that the Dolt primary server has two binary log files - requirePrimaryResults(t, "SHOW BINARY LOGS;", [][]any{ + h.requirePrimaryResults("SHOW BINARY LOGS;", [][]any{ {"binlog-main.000001", "312", "No"}, {"binlog-main.000002", "191", "No"}, }) @@ -524,275 +524,275 @@ func TestBinlogPrimary_PrimaryRestartBeforeReplicaConnects(t *testing.T) { // TestBinlogPrimary_DisallowBranchesWithSlashes asserts that trying to set @@log_bin_branch to // a branch name containing a slash results in an error. func TestBinlogPrimary_DisallowBranchesWithSlashes(t *testing.T) { - defer teardown(t) + h := newHarness(t) mapCopy := copyMap(doltReplicationPrimarySystemVars) mapCopy["log_bin_branch"] = "'branch/withslash'" - startSqlServersWithDoltSystemVars(t, mapCopy) - setupForDoltToMySqlReplication() + h.startSqlServersWithDoltSystemVars(mapCopy) + h.setupForDoltToMySqlReplication() // Because the replication branch was invalid, the binary log status should be // empty, indicating that no binary logs are being recorded. - requirePrimaryResults(t, "SHOW BINARY LOG STATUS;", [][]any{}) + h.requirePrimaryResults("SHOW BINARY LOG STATUS;", [][]any{}) } // TestBinlogPrimary_ChangeReplicationBranch asserts that the log_bin_branch system variable can // be used to control what branch is replicated. func TestBinlogPrimary_ChangeReplicationBranch(t *testing.T) { - defer teardown(t) + h := newHarness(t) mapCopy := copyMap(doltReplicationPrimarySystemVars) mapCopy["log_bin_branch"] = "branch1" - startSqlServersWithDoltSystemVars(t, mapCopy) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h.startSqlServersWithDoltSystemVars(mapCopy) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // No events should be generated when we're not updating the configured replication branch - primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{}) + h.primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{}) // Create the branch1 branch and make sure it gets replicated - primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") - primaryDatabase.MustExec("insert into db01.t values('hundred', 100, 2000);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t"}}) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"hundred", "100", "2000"}}) + h.primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") + h.primaryDatabase.MustExec("insert into db01.t values('hundred', 100, 2000);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t"}}) + h.requireReplicaResults("select * from db01.t;", [][]any{{"hundred", "100", "2000"}}) // Insert another row on main and make sure it doesn't get replicated - primaryDatabase.MustExec("call dolt_checkout('main');") - primaryDatabase.MustExec("insert into db01.t values('two hundred', 200, 2000);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"hundred", "100", "2000"}}) + h.primaryDatabase.MustExec("call dolt_checkout('main');") + h.primaryDatabase.MustExec("insert into db01.t values('two hundred', 200, 2000);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"hundred", "100", "2000"}}) // Assert that changing log_bin_branch while the server is running has no effect - primaryDatabase.MustExec("SET @@GLOBAL.log_bin_branch='main';") - primaryDatabase.MustExec("call dolt_checkout('main');") - primaryDatabase.MustExec("insert into db01.t values('three hundred', 300, 2023);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"hundred", "100", "2000"}}) + h.primaryDatabase.MustExec("SET @@GLOBAL.log_bin_branch='main';") + h.primaryDatabase.MustExec("call dolt_checkout('main');") + h.primaryDatabase.MustExec("insert into db01.t values('three hundred', 300, 2023);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"hundred", "100", "2000"}}) } // TestBinlogPrimary_SimpleSchemaChangesWithAutocommit tests that we can make simple schema changes (e.g. create table, // alter table, drop table) and replicate the DDL statements correctly. func TestBinlogPrimary_SimpleSchemaChangesWithAutocommit(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Create a table - primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255) NOT NULL comment 'foo bar baz');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}}) - requireReplicaResults(t, "show create table db01.t1;", [][]any{{"t1", + h.primaryDatabase.MustExec("create table db01.t1 (pk int primary key, c1 varchar(255) NOT NULL comment 'foo bar baz');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}}) + h.requireReplicaResults("show create table db01.t1;", [][]any{{"t1", "CREATE TABLE `t1` (\n `pk` int NOT NULL,\n `c1` varchar(255) COLLATE utf8mb4_0900_bin NOT NULL COMMENT 'foo bar baz',\n" + " PRIMARY KEY (`pk`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}}) // Insert some data - primaryDatabase.MustExec("insert into db01.t1 (pk, c1) values (1, 'foo');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t1;", [][]any{{"1", "foo"}}) + h.primaryDatabase.MustExec("insert into db01.t1 (pk, c1) values (1, 'foo');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t1;", [][]any{{"1", "foo"}}) // Modify the table - primaryDatabase.MustExec("alter table db01.t1 rename column c1 to z1;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show create table db01.t1;", [][]any{{"t1", + h.primaryDatabase.MustExec("alter table db01.t1 rename column c1 to z1;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show create table db01.t1;", [][]any{{"t1", "CREATE TABLE `t1` (\n `pk` int NOT NULL,\n `z1` varchar(255) COLLATE utf8mb4_0900_bin NOT NULL COMMENT 'foo bar baz',\n" + " PRIMARY KEY (`pk`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}}) // Insert some data - primaryDatabase.MustExec("insert into db01.t1 (pk, z1) values (2, 'bar');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t1;", [][]any{{"1", "foo"}, {"2", "bar"}}) + h.primaryDatabase.MustExec("insert into db01.t1 (pk, z1) values (2, 'bar');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t1;", [][]any{{"1", "foo"}, {"2", "bar"}}) // Drop the table - primaryDatabase.MustExec("drop table db01.t1;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{}) + h.primaryDatabase.MustExec("drop table db01.t1;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{}) // Rename a table - primaryDatabase.MustExec("create table originalName(pk1 int, pk2 int, c1 varchar(200), c2 varchar(200), primary key (pk1, pk2));") - primaryDatabase.MustExec("insert into originalName values (1, 2, 'one', 'two');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"originalName"}}) - requireReplicaResults(t, "select * from originalName;", [][]any{{"1", "2", "one", "two"}}) - primaryDatabase.MustExec("rename table originalName to newName;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"newName"}}) - requireReplicaResults(t, "select * from newName;", [][]any{{"1", "2", "one", "two"}}) + h.primaryDatabase.MustExec("create table originalName(pk1 int, pk2 int, c1 varchar(200), c2 varchar(200), primary key (pk1, pk2));") + h.primaryDatabase.MustExec("insert into originalName values (1, 2, 'one', 'two');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"originalName"}}) + h.requireReplicaResults("select * from originalName;", [][]any{{"1", "2", "one", "two"}}) + h.primaryDatabase.MustExec("rename table originalName to newName;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"newName"}}) + h.requireReplicaResults("select * from newName;", [][]any{{"1", "2", "one", "two"}}) } // TestBinlogPrimary_SchemaChangesWithManualCommit tests that manually managed transactions, which // contain a mix of schema and data changes, can be correctly replicated. func TestBinlogPrimary_SchemaChangesWithManualCommit(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Create table - primaryDatabase.MustExec("set @@autocommit=0;") - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("create table t (pk int primary key, c1 varchar(100), c2 int);") - primaryDatabase.MustExec("insert into t values (1, 'one', 1);") - primaryDatabase.MustExec("commit;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show create table t;", [][]any{{"t", "CREATE TABLE `t` (\n " + + h.primaryDatabase.MustExec("set @@autocommit=0;") + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("create table t (pk int primary key, c1 varchar(100), c2 int);") + h.primaryDatabase.MustExec("insert into t values (1, 'one', 1);") + h.primaryDatabase.MustExec("commit;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show create table t;", [][]any{{"t", "CREATE TABLE `t` (\n " + "`pk` int NOT NULL,\n `c1` varchar(100) COLLATE utf8mb4_0900_bin DEFAULT NULL,\n " + "`c2` int DEFAULT NULL,\n PRIMARY KEY (`pk`)\n) " + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}}) - requireReplicaResults(t, "select * from t;", [][]any{{"1", "one", "1"}}) + h.requireReplicaResults("select * from t;", [][]any{{"1", "one", "1"}}) // Alter column and update - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("alter table t modify column c2 varchar(100);") - primaryDatabase.MustExec("update t set c2='foo';") - primaryDatabase.MustExec("commit;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show create table t;", [][]any{{"t", "CREATE TABLE `t` (\n " + + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("alter table t modify column c2 varchar(100);") + h.primaryDatabase.MustExec("update t set c2='foo';") + h.primaryDatabase.MustExec("commit;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show create table t;", [][]any{{"t", "CREATE TABLE `t` (\n " + "`pk` int NOT NULL,\n `c1` varchar(100) COLLATE utf8mb4_0900_bin DEFAULT NULL,\n " + "`c2` varchar(100) COLLATE utf8mb4_0900_bin DEFAULT NULL,\n PRIMARY KEY (`pk`)\n) " + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}}) - requireReplicaResults(t, "select * from t;", [][]any{{"1", "one", "foo"}}) + h.requireReplicaResults("select * from t;", [][]any{{"1", "one", "foo"}}) } // TestBinlogPrimary_Rollback asserts that rolled back transactions are not sent to replicas. func TestBinlogPrimary_Rollback(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Create a test table - primaryDatabase.MustExec("set @@autocommit=0;") - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("create table t1 (pk int primary key, c1 varchar(100), c2 int);") - primaryDatabase.MustExec("commit;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}}) - requireReplicaResults(t, "select * from t1;", [][]any{}) + h.primaryDatabase.MustExec("set @@autocommit=0;") + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("create table t1 (pk int primary key, c1 varchar(100), c2 int);") + h.primaryDatabase.MustExec("commit;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}}) + h.requireReplicaResults("select * from t1;", [][]any{}) // Insert data, but roll back the transaction - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("insert into t1 values (1, 'two', 3);") - primaryDatabase.MustExec("rollback;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from t1;", [][]any{}) + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("insert into t1 values (1, 'two', 3);") + h.primaryDatabase.MustExec("rollback;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from t1;", [][]any{}) } // TestBinlogPrimary_MultipleTablesManualCommit tests that binlog events are generated correctly // when multiple tables are changed in a single SQL commit. func TestBinlogPrimary_MultipleTablesManualCommit(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Insert to multiple tables in a single SQL transaction - primaryDatabase.MustExec("set @@autocommit=0;") - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("create table t1 (pk int primary key, c1 varchar(100), c2 int);") - primaryDatabase.MustExec("insert into t1 values (1, 'one', 1);") - primaryDatabase.MustExec("create table t2 (pk int primary key, c1 varchar(100), c2 int);") - primaryDatabase.MustExec("insert into t2 values (1, 'eins', 1);") - primaryDatabase.MustExec("commit;") + h.primaryDatabase.MustExec("set @@autocommit=0;") + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("create table t1 (pk int primary key, c1 varchar(100), c2 int);") + h.primaryDatabase.MustExec("insert into t1 values (1, 'one', 1);") + h.primaryDatabase.MustExec("create table t2 (pk int primary key, c1 varchar(100), c2 int);") + h.primaryDatabase.MustExec("insert into t2 values (1, 'eins', 1);") + h.primaryDatabase.MustExec("commit;") // Verify the results on the replica - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show tables;", [][]any{{"t1"}, {"t2"}}) - requireReplicaResults(t, "select * from t1;", [][]any{{"1", "one", "1"}}) - requireReplicaResults(t, "select * from t2;", [][]any{{"1", "eins", "1"}}) + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show tables;", [][]any{{"t1"}, {"t2"}}) + h.requireReplicaResults("select * from t1;", [][]any{{"1", "one", "1"}}) + h.requireReplicaResults("select * from t2;", [][]any{{"1", "eins", "1"}}) // Update to multiple tables in a single SQL transaction - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("update t1 set c2=1111;") - primaryDatabase.MustExec("update t2 set c2=2222;") - primaryDatabase.MustExec("commit;") + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("update t1 set c2=1111;") + h.primaryDatabase.MustExec("update t2 set c2=2222;") + h.primaryDatabase.MustExec("commit;") // Verify the results on the replica - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from t1;", [][]any{{"1", "one", "1111"}}) - requireReplicaResults(t, "select * from t2;", [][]any{{"1", "eins", "2222"}}) + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from t1;", [][]any{{"1", "one", "1111"}}) + h.requireReplicaResults("select * from t2;", [][]any{{"1", "eins", "2222"}}) // Delete from multiple tables in a single SQL transaction - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("delete from t1 where c2=1111;") - primaryDatabase.MustExec("delete from t2 where c2=2222;") - primaryDatabase.MustExec("commit;") + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("delete from t1 where c2=1111;") + h.primaryDatabase.MustExec("delete from t2 where c2=2222;") + h.primaryDatabase.MustExec("commit;") // Verify the results on the replica - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from t1;", [][]any{}) - requireReplicaResults(t, "select * from t2;", [][]any{}) + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from t1;", [][]any{}) + h.requireReplicaResults("select * from t2;", [][]any{}) } // TestBinlogPrimary_ReplicateCreateDropDatabase tests that Dolt can correctly replicate statements to create, // drop, and undrop databases. func TestBinlogPrimary_ReplicateCreateDropDatabase(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) // Test CREATE DATABASE - primaryDatabase.MustExec("create database foobar1;") - primaryDatabase.MustExec("create table foobar1.table1 (c1 enum('red', 'green', 'blue'));") - primaryDatabase.MustExec("insert into foobar1.table1 values ('blue');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show databases;", [][]any{ + h.primaryDatabase.MustExec("create database foobar1;") + h.primaryDatabase.MustExec("create table foobar1.table1 (c1 enum('red', 'green', 'blue'));") + h.primaryDatabase.MustExec("insert into foobar1.table1 values ('blue');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show databases;", [][]any{ {"db01"}, {"foobar1"}, {"information_schema"}, {"mysql"}, {"performance_schema"}, {"sys"}}) - requireReplicaResults(t, "select * from foobar1.table1;", [][]any{{"blue"}}) + h.requireReplicaResults("select * from foobar1.table1;", [][]any{{"blue"}}) // Test DROP DATABASE - primaryDatabase.MustExec("drop database foobar1;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show databases;", [][]any{ + h.primaryDatabase.MustExec("drop database foobar1;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show databases;", [][]any{ {"db01"}, {"information_schema"}, {"mysql"}, {"performance_schema"}, {"sys"}}) // Test DOLT_UNDROP() - primaryDatabase.MustExec("call dolt_undrop('foobar1');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "show databases;", [][]any{ + h.primaryDatabase.MustExec("call dolt_undrop('foobar1');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("show databases;", [][]any{ {"db01"}, {"foobar1"}, {"information_schema"}, {"mysql"}, {"performance_schema"}, {"sys"}}) - requireReplicaResults(t, "select * from foobar1.table1;", [][]any{{"blue"}}) + h.requireReplicaResults("select * from foobar1.table1;", [][]any{{"blue"}}) } // TestBinlogPrimary_InsertUpdateDelete tests that insert, update, and delete statements can be executed correctly // in autocommit transactions, and also when they mixed together in the same explicit SQL transaction. func TestBinlogPrimary_InsertUpdateDelete(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") + h.primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") // Insert multiple rows - primaryDatabase.MustExec("insert into db01.t values ('1', 1, 1981), ('2', 2, 1982), ('3', 3, 1983), ('4', 4, 1984);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by pk;", [][]any{ + h.primaryDatabase.MustExec("insert into db01.t values ('1', 1, 1981), ('2', 2, 1982), ('3', 3, 1983), ('4', 4, 1984);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by pk;", [][]any{ {"1", "1", "1981"}, {"2", "2", "1982"}, {"3", "3", "1983"}, {"4", "4", "1984"}}) // Delete multiple rows - primaryDatabase.MustExec("delete from db01.t where pk in ('1', '3');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by pk;", [][]any{ + h.primaryDatabase.MustExec("delete from db01.t where pk in ('1', '3');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by pk;", [][]any{ {"2", "2", "1982"}, {"4", "4", "1984"}}) // Update multiple rows - primaryDatabase.MustExec("update db01.t set c2 = 1942;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by pk;", [][]any{ + h.primaryDatabase.MustExec("update db01.t set c2 = 1942;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by pk;", [][]any{ {"2", "2", "1942"}, {"4", "4", "1942"}}) // Turn off @@autocommit and mix inserts/updates/deletes in the same transaction - primaryDatabase.MustExec("SET @@autocommit=0;") - primaryDatabase.MustExec("insert into db01.t values ('10', 10, 2020), ('11', 11, 2021), ('12', 12, 2022), ('13', 13, 2023);") - primaryDatabase.MustExec("delete from db01.t where pk in ('11', '13');") - primaryDatabase.MustExec("update db01.t set c2 = 2042 where c2 > 2000;") - primaryDatabase.MustExec("COMMIT;") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by pk;", [][]any{ + h.primaryDatabase.MustExec("SET @@autocommit=0;") + h.primaryDatabase.MustExec("insert into db01.t values ('10', 10, 2020), ('11', 11, 2021), ('12', 12, 2022), ('13', 13, 2023);") + h.primaryDatabase.MustExec("delete from db01.t where pk in ('11', '13');") + h.primaryDatabase.MustExec("update db01.t set c2 = 2042 where c2 > 2000;") + h.primaryDatabase.MustExec("COMMIT;") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by pk;", [][]any{ {"10", "10", "2042"}, {"12", "12", "2042"}, {"2", "2", "1942"}, {"4", "4", "1942"}, }) @@ -800,259 +800,259 @@ func TestBinlogPrimary_InsertUpdateDelete(t *testing.T) { // TestBinlogPrimary_OnlyReplicateMainBranch tests that binlog events are only generated for the main branch of a Dolt repository. func TestBinlogPrimary_OnlyReplicateMainBranch(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // No events should be generated when we're not updating the main branch - primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") - primaryDatabase.MustExec("insert into db01.t values('hundred', 100, 2000);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") + h.primaryDatabase.MustExec("insert into db01.t values('hundred', 100, 2000);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Insert another row on branch1 and make sure it doesn't get replicated - primaryDatabase.MustExec("insert into db01.t values('two hundred', 200, 2000);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("insert into db01.t values('two hundred', 200, 2000);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Events should be generated from the main branch - primaryDatabase.MustExec("call dolt_checkout('main');") - primaryDatabase.MustExec("insert into db01.t values('42', 42, 2042);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"42", "42", "2042"}}) + h.primaryDatabase.MustExec("call dolt_checkout('main');") + h.primaryDatabase.MustExec("insert into db01.t values('42', 42, 2042);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"42", "42", "2042"}}) } // TestBinlogPrimary_KeylessTables tests that Dolt can replicate changes to keyless tables. func TestBinlogPrimary_KeylessTables(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (c1 varchar(100), c2 int, c3 int unsigned);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("create table db01.t (c1 varchar(100), c2 int, c3 int unsigned);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Test inserts - primaryDatabase.MustExec("insert into db01.t values('one', 1, 11), ('two', 2, 22);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by c2;", [][]any{{"one", "1", "11"}, {"two", "2", "22"}}) + h.primaryDatabase.MustExec("insert into db01.t values('one', 1, 11), ('two', 2, 22);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by c2;", [][]any{{"one", "1", "11"}, {"two", "2", "22"}}) // Test inserting duplicate rows - primaryDatabase.MustExec("insert into db01.t values('one', 1, 11), ('one', 1, 11);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by c2;", [][]any{ + h.primaryDatabase.MustExec("insert into db01.t values('one', 1, 11), ('one', 1, 11);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by c2;", [][]any{ {"one", "1", "11"}, {"one", "1", "11"}, {"one", "1", "11"}, {"two", "2", "22"}}) // Test updating multiple rows - primaryDatabase.MustExec("update db01.t set c1='uno' where c1='one';") - primaryDatabase.MustExec("update db01.t set c1='zwei' where c1='two';") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by c2;", [][]any{ + h.primaryDatabase.MustExec("update db01.t set c1='uno' where c1='one';") + h.primaryDatabase.MustExec("update db01.t set c1='zwei' where c1='two';") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by c2;", [][]any{ {"uno", "1", "11"}, {"uno", "1", "11"}, {"uno", "1", "11"}, {"zwei", "2", "22"}}) // Test deleting multiple rows - primaryDatabase.MustExec("delete from db01.t where c1='uno';") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t order by c2;", [][]any{{"zwei", "2", "22"}}) + h.primaryDatabase.MustExec("delete from db01.t where c1='uno';") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t order by c2;", [][]any{{"zwei", "2", "22"}}) } // TestBinlogPrimary_Merge tests that the binlog is updated when data is merged in from another branch. func TestBinlogPrimary_Merge(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int, c2 year);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // No events should be generated when we're not updating the main branch - primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") - primaryDatabase.MustExec("insert into db01.t values('hundred', 100, 2000), ('two-hundred', 200, 2001);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'inserting rows 100 and 200 on branch1');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") + h.primaryDatabase.MustExec("insert into db01.t values('hundred', 100, 2000), ('two-hundred', 200, 2001);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'inserting rows 100 and 200 on branch1');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Make a commit on main, so that we don't get a fast-forward merge later - primaryDatabase.MustExec("call dolt_checkout('main');") - primaryDatabase.MustExec("insert into db01.t values('42', 42, 2042);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'inserting row 42 on main');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"42", "42", "2042"}}) + h.primaryDatabase.MustExec("call dolt_checkout('main');") + h.primaryDatabase.MustExec("insert into db01.t values('42', 42, 2042);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'inserting row 42 on main');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"42", "42", "2042"}}) // Merge branch1 into main - primaryDatabase.MustExec("call dolt_merge('branch1');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{ + h.primaryDatabase.MustExec("call dolt_merge('branch1');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{ {"42", "42", "2042"}, {"hundred", "100", "2000"}, {"two-hundred", "200", "2001"}}) } // TestBinlogPrimary_Cherrypick tests binlog replication when dolt_cherry_pick() is used to cherry-pick commits. func TestBinlogPrimary_Cherrypick(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") - primaryDatabase.MustExec("SET @EmptyTableCommit=dolt_hashof('HEAD');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") + h.primaryDatabase.MustExec("SET @EmptyTableCommit=dolt_hashof('HEAD');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Make a couple of commits on branch1 so that we can cherry-pick them - primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") - primaryDatabase.MustExec("insert into db01.t values('01', 1);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 01');") - primaryDatabase.MustExec("SET @RowOneCommit=dolt_hashof('HEAD');") - primaryDatabase.MustExec("insert into db01.t values('02', 2);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 02');") - primaryDatabase.MustExec("SET @RowTwoCommit=dolt_hashof('HEAD');") - primaryDatabase.MustExec("insert into db01.t values('03', 3);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 03');") - primaryDatabase.MustExec("SET @RowThreeCommit=dolt_hashof('HEAD');") + h.primaryDatabase.MustExec("call dolt_checkout('-b', 'branch1');") + h.primaryDatabase.MustExec("insert into db01.t values('01', 1);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 01');") + h.primaryDatabase.MustExec("SET @RowOneCommit=dolt_hashof('HEAD');") + h.primaryDatabase.MustExec("insert into db01.t values('02', 2);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 02');") + h.primaryDatabase.MustExec("SET @RowTwoCommit=dolt_hashof('HEAD');") + h.primaryDatabase.MustExec("insert into db01.t values('03', 3);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 03');") + h.primaryDatabase.MustExec("SET @RowThreeCommit=dolt_hashof('HEAD');") // Cherry-pick a commit from branch1 onto main - primaryDatabase.MustExec("call dolt_checkout('main');") - primaryDatabase.MustExec("call dolt_cherry_pick(@RowTwoCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"02", "2"}}) + h.primaryDatabase.MustExec("call dolt_checkout('main');") + h.primaryDatabase.MustExec("call dolt_cherry_pick(@RowTwoCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"02", "2"}}) // Cherry-pick another commit from branch1 onto main - primaryDatabase.MustExec("call dolt_cherry_pick(@RowThreeCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"02", "2"}, {"03", "3"}}) + h.primaryDatabase.MustExec("call dolt_cherry_pick(@RowThreeCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"02", "2"}, {"03", "3"}}) } // TestBinlogPrimary_Revert tests binlog replication when dolt_revert() is used to revert commits. func TestBinlogPrimary_Revert(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") - primaryDatabase.MustExec("SET @EmptyTableCommit=dolt_hashof('HEAD');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") + h.primaryDatabase.MustExec("SET @EmptyTableCommit=dolt_hashof('HEAD');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Make a couple of commits on main so that we can revert one - primaryDatabase.MustExec("insert into db01.t values('01', 1);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 01');") - primaryDatabase.MustExec("SET @RowOneCommit=dolt_hashof('HEAD');") - primaryDatabase.MustExec("insert into db01.t values('02', 2);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 02');") - primaryDatabase.MustExec("SET @RowTwoCommit=dolt_hashof('HEAD');") - primaryDatabase.MustExec("insert into db01.t values('03', 3);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 03');") - primaryDatabase.MustExec("SET @RowThreeCommit=dolt_hashof('HEAD');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}, {"03", "3"}}) + h.primaryDatabase.MustExec("insert into db01.t values('01', 1);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 01');") + h.primaryDatabase.MustExec("SET @RowOneCommit=dolt_hashof('HEAD');") + h.primaryDatabase.MustExec("insert into db01.t values('02', 2);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 02');") + h.primaryDatabase.MustExec("SET @RowTwoCommit=dolt_hashof('HEAD');") + h.primaryDatabase.MustExec("insert into db01.t values('03', 3);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 03');") + h.primaryDatabase.MustExec("SET @RowThreeCommit=dolt_hashof('HEAD');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}, {"03", "3"}}) // Revert a commit - primaryDatabase.MustExec("call dolt_revert(@RowTwoCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"01", "1"}, {"03", "3"}}) + h.primaryDatabase.MustExec("call dolt_revert(@RowTwoCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"01", "1"}, {"03", "3"}}) // Revert another commit - primaryDatabase.MustExec("call dolt_revert(@RowOneCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"03", "3"}}) + h.primaryDatabase.MustExec("call dolt_revert(@RowOneCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"03", "3"}}) } // TestBinlogPrimary_Reset tests that the binlog is updated when a branch head is reset to a different commit. func TestBinlogPrimary_Reset(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicationPrimarySystemVars) - setupForDoltToMySqlReplication() - startReplicationAndCreateTestDb(t, doltPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicationPrimarySystemVars) + h.setupForDoltToMySqlReplication() + h.startReplicationAndCreateTestDb(h.doltPort) - primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int);") - primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") - primaryDatabase.MustExec("SET @EmptyTableCommit=dolt_hashof('HEAD');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("create table db01.t (pk varchar(100) primary key, c1 int);") + h.primaryDatabase.MustExec("call dolt_commit('-Am', 'creating table t');") + h.primaryDatabase.MustExec("SET @EmptyTableCommit=dolt_hashof('HEAD');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Make a couple of commits on main so that we can test resetting to them - primaryDatabase.MustExec("insert into db01.t values('01', 1);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 01');") - primaryDatabase.MustExec("SET @OneRowCommit=dolt_hashof('HEAD');") - primaryDatabase.MustExec("insert into db01.t values('02', 2);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 02');") - primaryDatabase.MustExec("SET @TwoRowsCommit=dolt_hashof('HEAD');") - primaryDatabase.MustExec("insert into db01.t values('03', 3);") - primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 03');") - primaryDatabase.MustExec("SET @ThreeRowsCommit=dolt_hashof('HEAD');") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}, {"03", "3"}}) + h.primaryDatabase.MustExec("insert into db01.t values('01', 1);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 01');") + h.primaryDatabase.MustExec("SET @OneRowCommit=dolt_hashof('HEAD');") + h.primaryDatabase.MustExec("insert into db01.t values('02', 2);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 02');") + h.primaryDatabase.MustExec("SET @TwoRowsCommit=dolt_hashof('HEAD');") + h.primaryDatabase.MustExec("insert into db01.t values('03', 3);") + h.primaryDatabase.MustExec("call dolt_commit('-am', 'inserting 03');") + h.primaryDatabase.MustExec("SET @ThreeRowsCommit=dolt_hashof('HEAD');") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}, {"03", "3"}}) // Reset back to the first commit when no rows are present - primaryDatabase.MustExec("call dolt_reset('--hard', @EmptyTableCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{}) + h.primaryDatabase.MustExec("call dolt_reset('--hard', @EmptyTableCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{}) // Reset back to the second commit when only one row is present - primaryDatabase.MustExec("call dolt_reset('--hard', @OneRowCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"01", "1"}}) + h.primaryDatabase.MustExec("call dolt_reset('--hard', @OneRowCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"01", "1"}}) // Reset back to the second commit when only one row is present - primaryDatabase.MustExec("call dolt_reset('--hard', @TwoRowsCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}}) + h.primaryDatabase.MustExec("call dolt_reset('--hard', @TwoRowsCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}}) // Reset back to the second commit when only one row is present - primaryDatabase.MustExec("call dolt_reset('--hard', @ThreeRowsCommit);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}, {"03", "3"}}) + h.primaryDatabase.MustExec("call dolt_reset('--hard', @ThreeRowsCommit);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.t;", [][]any{{"01", "1"}, {"02", "2"}, {"03", "3"}}) } -func setupForDoltToMySqlReplication() { +func (h *harness) setupForDoltToMySqlReplication() { // Swap the replica and primary databases, since we're // replicating in the other direction in this test. - var tempDatabase = primaryDatabase - primaryDatabase = replicaDatabase - replicaDatabase = tempDatabase + var tempDatabase = h.primaryDatabase + h.primaryDatabase = h.replicaDatabase + h.replicaDatabase = tempDatabase // Set the session's timezone to UTC, to avoid TIMESTAMP test values changing // when they are converted to UTC for storage. - replicaDatabase.MustExec("SET @@time_zone = '+0:00';") + h.replicaDatabase.MustExec("SET @@time_zone = '+0:00';") // Reset binary logs and gtids on the replica, so that @@gtid_executed doesn't contain any // executed GTIDs from the replica server. - replicaDatabase.MustExec("reset master;") + h.replicaDatabase.MustExec("reset master;") } // outputReplicaApplierStatus prints out the replica applier status information from the // performance_schema replication_applier_status_by_worker table. This is useful for debugging // replication from a Dolt primary to a MySQL replica, since this often contains more detailed // information about why MySQL failed to apply a binlog event. -func outputReplicaApplierStatus(t *testing.T) { - newRows, err := replicaDatabase.Queryx("select * from performance_schema.replication_applier_status_by_worker;") - require.NoError(t, err) - allNewRows := readAllRowsIntoMaps(t, newRows) - t.Logf("\n\nreplication_applier_status_by_worker: %v\n", allNewRows) +func (h *harness) outputReplicaApplierStatus() { + newRows, err := h.replicaDatabase.Queryx("select * from performance_schema.replication_applier_status_by_worker;") + require.NoError(h.t, err) + allNewRows := readAllRowsIntoMaps(h.t, newRows) + h.t.Logf("\n\nreplication_applier_status_by_worker: %v\n", allNewRows) } // outputShowReplicaStatus prints out replica status information. This is useful for debugging // replication failures in tests since status will show whether the replica is successfully connected, // any recent errors, and what GTIDs have been executed. -func outputShowReplicaStatus(t *testing.T) { - newRows, err := replicaDatabase.Queryx("show replica status;") - require.NoError(t, err) - allNewRows := readAllRowsIntoMaps(t, newRows) - t.Logf("\n\nSHOW REPLICA STATUS: %v\n", allNewRows) +func (h *harness) outputShowReplicaStatus() { + newRows, err := h.replicaDatabase.Queryx("show replica status;") + require.NoError(h.t, err) + allNewRows := readAllRowsIntoMaps(h.t, newRows) + h.t.Logf("\n\nSHOW REPLICA STATUS: %v\n", allNewRows) } // copyMap returns a copy of the specified map |m|. @@ -1066,51 +1066,51 @@ func copyMap(m map[string]string) map[string]string { // queryPrimaryServerUuid queries the primary server for its server UUID. If any errors are encountered, // this function will fail the current test. -func queryPrimaryServerUuid(t *testing.T) string { - rows, err := primaryDatabase.Queryx("SELECT @@server_uuid;") - require.NoError(t, err) - serverUuid := convertMapScanResultToStrings(readNextRow(t, rows)) - require.NoError(t, rows.Close()) +func (h *harness) queryPrimaryServerUuid() string { + rows, err := h.primaryDatabase.Queryx("SELECT @@server_uuid;") + require.NoError(h.t, err) + serverUuid := convertMapScanResultToStrings(readNextRow(h.t, rows)) + require.NoError(h.t, rows.Close()) return serverUuid["@@server_uuid"].(string) } // waitForReplicaToReconnect will poll the status of the replica and return when the status indicates // the replica has reconnected to the primary. If after 60s the replica hasn't reconnected, this // function will fail the current test. -func waitForReplicaToReconnect(t *testing.T) { +func (h *harness) waitForReplicaToReconnect() { startTime := time.Now() for { time.Sleep(500 * time.Millisecond) - status := queryReplicaStatus(t) + status := h.queryReplicaStatus() if status["Replica_IO_Running"] == "Yes" { break } if startTime.Add(time.Second * 60).Before(time.Now()) { - t.Fatalf("Unable to detect replica reconnect after 60s") + h.t.Fatalf("Unable to detect replica reconnect after 60s") } } } // mustRestartDoltPrimaryServer starts up the Dolt sql-server, after it has already been stopped before this function // is called, and configures it as the primary database. -func mustRestartDoltPrimaryServer(t *testing.T) { +func (h *harness) mustRestartDoltPrimaryServer() { var err error - prevReplicaDatabase := replicaDatabase - doltPort, doltProcess, err = startDoltSqlServer(t, testDir, nil) - require.NoError(t, err) - primaryDatabase = replicaDatabase - replicaDatabase = prevReplicaDatabase + prevReplicaDatabase := h.replicaDatabase + h.doltPort, h.doltProcess, err = h.startDoltSqlServer(nil) + require.NoError(h.t, err) + h.primaryDatabase = h.replicaDatabase + h.replicaDatabase = prevReplicaDatabase } // mustRestartMySqlReplicaServer starts up the MySQL server, after it has already been stopped before this function // is called, and configures it as the replica database. -func mustRestartMySqlReplicaServer(t *testing.T) { +func (h *harness) mustRestartMySqlReplicaServer() { var err error - prevPrimaryDatabase := primaryDatabase - mySqlPort, mySqlProcess, err = startMySqlServer(t, testDir) - require.NoError(t, err) - replicaDatabase = primaryDatabase - primaryDatabase = prevPrimaryDatabase + prevPrimaryDatabase := h.primaryDatabase + h.mySqlPort, h.mySqlProcess, err = h.startMySqlServer() + require.NoError(h.t, err) + h.replicaDatabase = h.primaryDatabase + h.primaryDatabase = prevPrimaryDatabase } diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_alltypes_test.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_alltypes_test.go index c62c240f10..facc2d9dcc 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_alltypes_test.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_alltypes_test.go @@ -27,27 +27,27 @@ import ( // TestBinlogReplicationForAllTypes tests that operations (inserts, updates, and deletes) on all SQL // data types can be successfully replicated. func TestBinlogReplicationForAllTypes(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Set the session's timezone to UTC, to avoid TIMESTAMP test values changing // when they are converted to UTC for storage. - primaryDatabase.MustExec("SET @@time_zone = '+0:00';") + h.primaryDatabase.MustExec("SET @@time_zone = '+0:00';") // Create the test table tableName := "alltypes" createTableStatement := generateCreateTableStatement(tableName) - primaryDatabase.MustExec(createTableStatement) + h.primaryDatabase.MustExec(createTableStatement) // Make inserts on the primary – small, large, and null values - primaryDatabase.MustExec(generateInsertValuesStatement(tableName, 0)) - primaryDatabase.MustExec(generateInsertValuesStatement(tableName, 1)) - primaryDatabase.MustExec(generateInsertNullValuesStatement(tableName)) + h.primaryDatabase.MustExec(generateInsertValuesStatement(tableName, 0)) + h.primaryDatabase.MustExec(generateInsertValuesStatement(tableName, 1)) + h.primaryDatabase.MustExec(generateInsertNullValuesStatement(tableName)) // Verify inserts on replica - waitForReplicaToCatchUp(t) - rows, err := replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;") + h.waitForReplicaToCatchUp() + rows, err := h.replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "1", row["pk"]) @@ -62,14 +62,14 @@ func TestBinlogReplicationForAllTypes(t *testing.T) { require.NoError(t, rows.Close()) // Make updates on the primary - primaryDatabase.MustExec(generateUpdateToNullValuesStatement(tableName, 1)) - primaryDatabase.MustExec(generateUpdateValuesStatement(tableName, 2, 0)) - primaryDatabase.MustExec(generateUpdateValuesStatement(tableName, 3, 1)) + h.primaryDatabase.MustExec(generateUpdateToNullValuesStatement(tableName, 1)) + h.primaryDatabase.MustExec(generateUpdateValuesStatement(tableName, 2, 0)) + h.primaryDatabase.MustExec(generateUpdateValuesStatement(tableName, 3, 1)) // Verify updates on the replica - waitForReplicaToCatchUp(t) - replicaDatabase.MustExec("use db01;") - rows, err = replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;") + h.waitForReplicaToCatchUp() + h.replicaDatabase.MustExec("use db01;") + rows, err = h.replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "1", row["pk"]) @@ -84,13 +84,13 @@ func TestBinlogReplicationForAllTypes(t *testing.T) { require.NoError(t, rows.Close()) // Make deletes on the primary - primaryDatabase.MustExec("delete from alltypes where pk=1;") - primaryDatabase.MustExec("delete from alltypes where pk=2;") - primaryDatabase.MustExec("delete from alltypes where pk=3;") + h.primaryDatabase.MustExec("delete from alltypes where pk=1;") + h.primaryDatabase.MustExec("delete from alltypes where pk=2;") + h.primaryDatabase.MustExec("delete from alltypes where pk=3;") // Verify deletes on the replica - waitForReplicaToCatchUp(t) - rows, err = replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;") + h.waitForReplicaToCatchUp() + rows, err = h.replicaDatabase.Queryx("select * from db01.alltypes order by pk asc;") require.NoError(t, err) require.False(t, rows.Next()) require.NoError(t, rows.Close()) diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_filters_test.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_filters_test.go index 4d6adc0061..2ddfcd8481 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_filters_test.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_filters_test.go @@ -24,37 +24,37 @@ import ( // TestBinlogReplicationFilters_ignoreTablesOnly tests that the ignoreTables replication // filtering option is correctly applied and honored. func TestBinlogReplicationFilters_ignoreTablesOnly(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Ignore replication events for db01.t2. Also tests that the first filter setting is overwritten by // the second and that db and that db and table names are case-insensitive. - replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(db01.t1);") - replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(DB01.T2);") + h.replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(db01.t1);") + h.replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(DB01.T2);") // Assert that status shows replication filters - status := showReplicaStatus(t) + status := h.showReplicaStatus() require.Equal(t, "db01.t2", status["Replicate_Ignore_Table"]) require.Equal(t, "", status["Replicate_Do_Table"]) // Make changes on the primary - primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);") - primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);") + h.primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);") + h.primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);") for i := 1; i < 12; i++ { - primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i)) - primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i)) } - primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;") - primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;") - primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;") - primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;") + h.primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;") + h.primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;") + h.primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;") + h.primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;") // Pause to let the replica catch up - waitForReplicaToCatchUp(t) + h.waitForReplicaToCatchUp() // Verify that all changes from t1 were applied on the replica - rows, err := replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;") + rows, err := h.replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "10", row["count"]) @@ -63,7 +63,7 @@ func TestBinlogReplicationFilters_ignoreTablesOnly(t *testing.T) { require.NoError(t, rows.Close()) // Verify that no changes from t2 were applied on the replica - rows, err = replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;") + rows, err = h.replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "0", row["count"]) @@ -75,37 +75,37 @@ func TestBinlogReplicationFilters_ignoreTablesOnly(t *testing.T) { // TestBinlogReplicationFilters_doTablesOnly tests that the doTables replication // filtering option is correctly applied and honored. func TestBinlogReplicationFilters_doTablesOnly(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Do replication events for db01.t1. Also tests that the first filter setting is overwritten by // the second and that db and that db and table names are case-insensitive. - replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(db01.t2);") - replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(DB01.T1);") + h.replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(db01.t2);") + h.replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(DB01.T1);") // Assert that status shows replication filters - status := showReplicaStatus(t) + status := h.showReplicaStatus() require.Equal(t, "db01.t1", status["Replicate_Do_Table"]) require.Equal(t, "", status["Replicate_Ignore_Table"]) // Make changes on the primary - primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);") - primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);") + h.primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);") + h.primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);") for i := 1; i < 12; i++ { - primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i)) - primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i)) } - primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;") - primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;") - primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;") - primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;") + h.primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;") + h.primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;") + h.primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;") + h.primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;") // Pause to let the replica catch up - waitForReplicaToCatchUp(t) + h.waitForReplicaToCatchUp() // Verify that all changes from t1 were applied on the replica - rows, err := replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;") + rows, err := h.replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "10", row["count"]) @@ -114,7 +114,7 @@ func TestBinlogReplicationFilters_doTablesOnly(t *testing.T) { require.NoError(t, rows.Close()) // Verify that no changes from t2 were applied on the replica - rows, err = replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;") + rows, err = h.replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "0", row["count"]) @@ -126,38 +126,38 @@ func TestBinlogReplicationFilters_doTablesOnly(t *testing.T) { // TestBinlogReplicationFilters_doTablesAndIgnoreTables tests that the doTables and ignoreTables // replication filtering options are correctly applied and honored when used together. func TestBinlogReplicationFilters_doTablesAndIgnoreTables(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Do replication events for db01.t1, and db01.t2 - replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(db01.t1, db01.t2);") + h.replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(db01.t1, db01.t2);") // Ignore replication events for db01.t2 - replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(db01.t2);") + h.replicaDatabase.MustExec("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(db01.t2);") // Assert that replica status shows replication filters - status := showReplicaStatus(t) + status := h.showReplicaStatus() require.True(t, status["Replicate_Do_Table"] == "db01.t1,db01.t2" || status["Replicate_Do_Table"] == "db01.t2,db01.t1") require.Equal(t, "db01.t2", status["Replicate_Ignore_Table"]) // Make changes on the primary - primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);") - primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);") + h.primaryDatabase.MustExec("CREATE TABLE db01.t1 (pk INT PRIMARY KEY);") + h.primaryDatabase.MustExec("CREATE TABLE db01.t2 (pk INT PRIMARY KEY);") for i := 1; i < 12; i++ { - primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i)) - primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t1 VALUES (%d);", i)) + h.primaryDatabase.MustExec(fmt.Sprintf("INSERT INTO db01.t2 VALUES (%d);", i)) } - primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;") - primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;") - primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;") - primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;") + h.primaryDatabase.MustExec("UPDATE db01.t1 set pk = pk-1;") + h.primaryDatabase.MustExec("UPDATE db01.t2 set pk = pk-1;") + h.primaryDatabase.MustExec("DELETE FROM db01.t1 WHERE pk = 10;") + h.primaryDatabase.MustExec("DELETE FROM db01.t2 WHERE pk = 10;") // Pause to let the replica catch up - waitForReplicaToCatchUp(t) + h.waitForReplicaToCatchUp() // Verify that all changes from t1 were applied on the replica - rows, err := replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;") + rows, err := h.replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t1;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "10", row["count"]) @@ -166,7 +166,7 @@ func TestBinlogReplicationFilters_doTablesAndIgnoreTables(t *testing.T) { require.NoError(t, rows.Close()) // Verify that no changes from t2 were applied on the replica - rows, err = replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;") + rows, err = h.replicaDatabase.Queryx("SELECT COUNT(pk) as count, MIN(pk) as min, MAX(pk) as max from db01.t2;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "0", row["count"]) @@ -177,15 +177,15 @@ func TestBinlogReplicationFilters_doTablesAndIgnoreTables(t *testing.T) { // TestBinlogReplicationFilters_errorCases test returned errors for various error cases. func TestBinlogReplicationFilters_errorCases(t *testing.T) { - defer teardown(t) - startSqlServers(t) + h := newHarness(t) + h.startSqlServers() // All tables must be qualified with a database - _, err := replicaDatabase.Queryx("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(t1);") + _, err := h.replicaDatabase.Queryx("CHANGE REPLICATION FILTER REPLICATE_DO_TABLE=(t1);") require.Error(t, err) require.ErrorContains(t, err, "no database specified for table") - _, err = replicaDatabase.Queryx("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(t1);") + _, err = h.replicaDatabase.Queryx("CHANGE REPLICATION FILTER REPLICATE_IGNORE_TABLE=(t1);") require.Error(t, err) require.ErrorContains(t, err, "no database specified for table") } diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_multidb_test.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_multidb_test.go index 61436050d8..2f531adc72 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_multidb_test.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_multidb_test.go @@ -23,30 +23,30 @@ import ( // TestBinlogReplicationMultiDb tests that binlog events spanning multiple databases are correctly // applied by a replica. func TestBinlogReplicationMultiDb(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Make changes on the primary to db01 and db02 - primaryDatabase.MustExec("create database db02;") - primaryDatabase.MustExec("use db01;") - primaryDatabase.MustExec("create table t01 (pk int primary key, c1 int default (0))") - primaryDatabase.MustExec("use db02;") - primaryDatabase.MustExec("create table t02 (pk int primary key, c1 int default (0))") - primaryDatabase.MustExec("use db01;") - primaryDatabase.MustExec("insert into t01 (pk) values (1), (3), (5), (8), (9);") - primaryDatabase.MustExec("use db02;") - primaryDatabase.MustExec("insert into t02 (pk) values (2), (4), (6), (7), (10);") - primaryDatabase.MustExec("use db01;") - primaryDatabase.MustExec("delete from t01 where pk=9;") - primaryDatabase.MustExec("delete from db02.t02 where pk=10;") - primaryDatabase.MustExec("use db02;") - primaryDatabase.MustExec("update db01.t01 set pk=7 where pk=8;") - primaryDatabase.MustExec("update t02 set pk=8 where pk=7;") + h.primaryDatabase.MustExec("create database db02;") + h.primaryDatabase.MustExec("use db01;") + h.primaryDatabase.MustExec("create table t01 (pk int primary key, c1 int default (0))") + h.primaryDatabase.MustExec("use db02;") + h.primaryDatabase.MustExec("create table t02 (pk int primary key, c1 int default (0))") + h.primaryDatabase.MustExec("use db01;") + h.primaryDatabase.MustExec("insert into t01 (pk) values (1), (3), (5), (8), (9);") + h.primaryDatabase.MustExec("use db02;") + h.primaryDatabase.MustExec("insert into t02 (pk) values (2), (4), (6), (7), (10);") + h.primaryDatabase.MustExec("use db01;") + h.primaryDatabase.MustExec("delete from t01 where pk=9;") + h.primaryDatabase.MustExec("delete from db02.t02 where pk=10;") + h.primaryDatabase.MustExec("use db02;") + h.primaryDatabase.MustExec("update db01.t01 set pk=7 where pk=8;") + h.primaryDatabase.MustExec("update t02 set pk=8 where pk=7;") // Verify the changes in db01 on the replica - waitForReplicaToCatchUp(t) - rows, err := replicaDatabase.Queryx("select * from db01.t01 order by pk asc;") + h.waitForReplicaToCatchUp() + rows, err := h.replicaDatabase.Queryx("select * from db01.t01 order by pk asc;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "1", row["pk"]) @@ -61,8 +61,8 @@ func TestBinlogReplicationMultiDb(t *testing.T) { require.NoError(t, rows.Close()) // Verify db01.dolt_diff - replicaDatabase.MustExec("use db01;") - rows, err = replicaDatabase.Queryx("select * from db01.dolt_diff;") + h.replicaDatabase.MustExec("use db01;") + rows, err = h.replicaDatabase.Queryx("select * from db01.dolt_diff;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "t01", row["table_name"]) @@ -85,8 +85,8 @@ func TestBinlogReplicationMultiDb(t *testing.T) { require.NoError(t, rows.Close()) // Verify the changes in db02 on the replica - replicaDatabase.MustExec("use db02;") - rows, err = replicaDatabase.Queryx("select * from db02.t02 order by pk asc;") + h.replicaDatabase.MustExec("use db02;") + rows, err = h.replicaDatabase.Queryx("select * from db02.t02 order by pk asc;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "2", row["pk"]) @@ -100,7 +100,7 @@ func TestBinlogReplicationMultiDb(t *testing.T) { require.NoError(t, rows.Close()) // Verify db02.dolt_diff - rows, err = replicaDatabase.Queryx("select * from db02.dolt_diff;") + rows, err = h.replicaDatabase.Queryx("select * from db02.dolt_diff;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "t02", row["table_name"]) @@ -125,28 +125,28 @@ func TestBinlogReplicationMultiDb(t *testing.T) { // TestBinlogReplicationMultiDbTransactions tests that binlog events for transactions that span // multiple DBs are applied correctly to a replica. func TestBinlogReplicationMultiDbTransactions(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Make changes on the primary to db01 and db02 - primaryDatabase.MustExec("create database db02;") - primaryDatabase.MustExec("create table db01.t01 (pk int primary key, c1 int default (0))") - primaryDatabase.MustExec("create table db02.t02 (pk int primary key, c1 int default (0))") - primaryDatabase.MustExec("set @autocommit = 0;") + h.primaryDatabase.MustExec("create database db02;") + h.primaryDatabase.MustExec("create table db01.t01 (pk int primary key, c1 int default (0))") + h.primaryDatabase.MustExec("create table db02.t02 (pk int primary key, c1 int default (0))") + h.primaryDatabase.MustExec("set @autocommit = 0;") - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("insert into db01.t01 (pk) values (1), (3), (5), (8), (9);") - primaryDatabase.MustExec("insert into db02.t02 (pk) values (2), (4), (6), (7), (10);") - primaryDatabase.MustExec("delete from db01.t01 where pk=9;") - primaryDatabase.MustExec("delete from db02.t02 where pk=10;") - primaryDatabase.MustExec("update db01.t01 set pk=7 where pk=8;") - primaryDatabase.MustExec("update db02.t02 set pk=8 where pk=7;") - primaryDatabase.MustExec("commit;") + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("insert into db01.t01 (pk) values (1), (3), (5), (8), (9);") + h.primaryDatabase.MustExec("insert into db02.t02 (pk) values (2), (4), (6), (7), (10);") + h.primaryDatabase.MustExec("delete from db01.t01 where pk=9;") + h.primaryDatabase.MustExec("delete from db02.t02 where pk=10;") + h.primaryDatabase.MustExec("update db01.t01 set pk=7 where pk=8;") + h.primaryDatabase.MustExec("update db02.t02 set pk=8 where pk=7;") + h.primaryDatabase.MustExec("commit;") // Verify the changes in db01 on the replica - waitForReplicaToCatchUp(t) - rows, err := replicaDatabase.Queryx("select * from db01.t01 order by pk asc;") + h.waitForReplicaToCatchUp() + rows, err := h.replicaDatabase.Queryx("select * from db01.t01 order by pk asc;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "1", row["pk"]) @@ -160,8 +160,8 @@ func TestBinlogReplicationMultiDbTransactions(t *testing.T) { require.NoError(t, rows.Close()) // Verify db01.dolt_diff - replicaDatabase.MustExec("use db01;") - rows, err = replicaDatabase.Queryx("select * from db01.dolt_diff;") + h.replicaDatabase.MustExec("use db01;") + rows, err = h.replicaDatabase.Queryx("select * from db01.dolt_diff;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "t01", row["table_name"]) @@ -175,9 +175,9 @@ func TestBinlogReplicationMultiDbTransactions(t *testing.T) { require.NoError(t, rows.Close()) // Verify the changes in db02 on the replica - waitForReplicaToCatchUp(t) - replicaDatabase.MustExec("use db02;") - rows, err = replicaDatabase.Queryx("select * from db02.t02 order by pk asc;") + h.waitForReplicaToCatchUp() + h.replicaDatabase.MustExec("use db02;") + rows, err = h.replicaDatabase.Queryx("select * from db02.t02 order by pk asc;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "2", row["pk"]) @@ -191,7 +191,7 @@ func TestBinlogReplicationMultiDbTransactions(t *testing.T) { require.NoError(t, rows.Close()) // Verify db02.dolt_diff - rows, err = replicaDatabase.Queryx("select * from db02.dolt_diff;") + rows, err = h.replicaDatabase.Queryx("select * from db02.dolt_diff;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "t02", row["table_name"]) diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_reconnect_test.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_reconnect_test.go index 30e44e3308..9e26df9b74 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_reconnect_test.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_reconnect_test.go @@ -28,38 +28,34 @@ import ( "github.com/stretchr/testify/require" ) -var toxiClient *toxiproxyclient.Client -var mysqlProxy *toxiproxyclient.Proxy -var proxyPort int - // TestBinlogReplicationAutoReconnect tests that the replica's connection to the primary is correctly // reestablished if it drops. func TestBinlogReplicationAutoReconnect(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - configureToxiProxy(t) - configureFastConnectionRetry(t) - startReplicationAndCreateTestDb(t, proxyPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.configureToxiProxy() + h.configureFastConnectionRetry() + h.startReplicationAndCreateTestDb(h.proxyPort) // Get the replica started up and ensure it's in sync with the primary before turning on the limit_data toxic - testInitialReplicaStatus(t) - primaryDatabase.MustExec("create table reconnect_test(pk int primary key, c1 varchar(255));") - waitForReplicaToCatchUp(t) - turnOnLimitDataToxic(t) + h.testInitialReplicaStatus() + h.primaryDatabase.MustExec("create table reconnect_test(pk int primary key, c1 varchar(255));") + h.waitForReplicaToCatchUp() + h.turnOnLimitDataToxic() for i := 0; i < 1000; i++ { value := "foobarbazbashfoobarbazbashfoobarbazbashfoobarbazbashfoobarbazbash" - primaryDatabase.MustExec(fmt.Sprintf("insert into reconnect_test values (%v, %q)", i, value)) + h.primaryDatabase.MustExec(fmt.Sprintf("insert into reconnect_test values (%v, %q)", i, value)) } // Remove the limit_data toxic so that a connection can be reestablished - err := mysqlProxy.RemoveToxic("limit_data") + err := h.mysqlProxy.RemoveToxic("limit_data") require.NoError(t, err) t.Logf("Toxiproxy proxy limit_data toxic removed") // Assert that all records get written to the table - waitForReplicaToCatchUp(t) + h.waitForReplicaToCatchUp() - rows, err := replicaDatabase.Queryx("select min(pk) as min, max(pk) as max, count(pk) as count from db01.reconnect_test;") + rows, err := h.replicaDatabase.Queryx("select min(pk) as min, max(pk) as max, count(pk) as count from db01.reconnect_test;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) @@ -69,7 +65,7 @@ func TestBinlogReplicationAutoReconnect(t *testing.T) { require.NoError(t, rows.Close()) // Assert that show replica status show reconnection IO error - status := showReplicaStatus(t) + status := h.showReplicaStatus() require.Equal(t, "1158", status["Last_IO_Errno"]) require.True(t, strings.Contains(status["Last_IO_Error"].(string), "EOF")) requireRecentTimeString(t, status["Last_IO_Error_Timestamp"]) @@ -77,54 +73,54 @@ func TestBinlogReplicationAutoReconnect(t *testing.T) { // configureFastConnectionRetry configures the replica to retry a failed connection after 5s, instead of the default 60s // connection retry interval. This is used for testing connection retry logic without waiting the full default period. -func configureFastConnectionRetry(_ *testing.T) { - replicaDatabase.MustExec( +func (h *harness) configureFastConnectionRetry() { + h.replicaDatabase.MustExec( "change replication source to SOURCE_CONNECT_RETRY=5;") } // testInitialReplicaStatus tests the data returned by SHOW REPLICA STATUS and errors // out if any values are not what is expected for a replica that has just connected // to a MySQL primary. -func testInitialReplicaStatus(t *testing.T) { - status := showReplicaStatus(t) +func (h *harness) testInitialReplicaStatus() { + status := h.showReplicaStatus() // Positioning settings - require.Equal(t, "1", status["Auto_Position"]) + require.Equal(h.t, "1", status["Auto_Position"]) // Connection settings - require.Equal(t, "5", status["Connect_Retry"]) - require.Equal(t, "86400", status["Source_Retry_Count"]) - require.Equal(t, "localhost", status["Source_Host"]) - require.NotEmpty(t, status["Source_Port"]) - require.NotEmpty(t, status["Source_User"]) + require.Equal(h.t, "5", status["Connect_Retry"]) + require.Equal(h.t, "86400", status["Source_Retry_Count"]) + require.Equal(h.t, "localhost", status["Source_Host"]) + require.NotEmpty(h.t, status["Source_Port"]) + require.NotEmpty(h.t, status["Source_User"]) // Error status - require.Equal(t, "0", status["Last_Errno"]) - require.Equal(t, "", status["Last_Error"]) - require.Equal(t, "0", status["Last_IO_Errno"]) - require.Equal(t, "", status["Last_IO_Error"]) - require.Equal(t, "", status["Last_IO_Error_Timestamp"]) - require.Equal(t, "0", status["Last_SQL_Errno"]) - require.Equal(t, "", status["Last_SQL_Error"]) - require.Equal(t, "", status["Last_SQL_Error_Timestamp"]) + require.Equal(h.t, "0", status["Last_Errno"]) + require.Equal(h.t, "", status["Last_Error"]) + require.Equal(h.t, "0", status["Last_IO_Errno"]) + require.Equal(h.t, "", status["Last_IO_Error"]) + require.Equal(h.t, "", status["Last_IO_Error_Timestamp"]) + require.Equal(h.t, "0", status["Last_SQL_Errno"]) + require.Equal(h.t, "", status["Last_SQL_Error"]) + require.Equal(h.t, "", status["Last_SQL_Error_Timestamp"]) // Empty filter configuration - require.Equal(t, "", status["Replicate_Do_Table"]) - require.Equal(t, "", status["Replicate_Ignore_Table"]) + require.Equal(h.t, "", status["Replicate_Do_Table"]) + require.Equal(h.t, "", status["Replicate_Ignore_Table"]) // Thread status - require.True(t, + require.True(h.t, status["Replica_IO_Running"] == "Yes" || status["Replica_IO_Running"] == "Connecting") - require.Equal(t, "Yes", status["Replica_SQL_Running"]) + require.Equal(h.t, "Yes", status["Replica_SQL_Running"]) // Unsupported fields - require.Equal(t, "INVALID", status["Source_Log_File"]) - require.Equal(t, "Ignored", status["Source_SSL_Allowed"]) - require.Equal(t, "None", status["Until_Condition"]) - require.Equal(t, "0", status["SQL_Delay"]) - require.Equal(t, "0", status["SQL_Remaining_Delay"]) - require.Equal(t, "0", status["Seconds_Behind_Source"]) + require.Equal(h.t, "INVALID", status["Source_Log_File"]) + require.Equal(h.t, "Ignored", status["Source_SSL_Allowed"]) + require.Equal(h.t, "None", status["Until_Condition"]) + require.Equal(h.t, "0", status["SQL_Delay"]) + require.Equal(h.t, "0", status["SQL_Remaining_Delay"]) + require.Equal(h.t, "0", status["Seconds_Behind_Source"]) } // requireRecentTimeString asserts that the specified |datetime| is a non-nil timestamp string @@ -141,14 +137,14 @@ func requireRecentTimeString(t *testing.T, datetime interface{}) { // showReplicaStatus returns a map with the results of SHOW REPLICA STATUS, keyed by the // name of each column. -func showReplicaStatus(t *testing.T) map[string]interface{} { - rows, err := replicaDatabase.Queryx("show replica status;") - require.NoError(t, err) +func (h *harness) showReplicaStatus() map[string]interface{} { + rows, err := h.replicaDatabase.Queryx("show replica status;") + require.NoError(h.t, err) defer rows.Close() - return convertMapScanResultToStrings(readNextRow(t, rows)) + return convertMapScanResultToStrings(readNextRow(h.t, rows)) } -func configureToxiProxy(t *testing.T) { +func (h *harness) configureToxiProxy() { toxiproxyPort := findFreePort() metrics := toxiproxy.NewMetricsContainer(prometheus.NewRegistry()) @@ -157,31 +153,31 @@ func configureToxiProxy(t *testing.T) { toxiproxyServer.Listen("localhost", strconv.Itoa(toxiproxyPort)) }() time.Sleep(500 * time.Millisecond) - t.Logf("Toxiproxy control plane running on port %d", toxiproxyPort) + h.t.Logf("Toxiproxy control plane running on port %d", toxiproxyPort) - toxiClient = toxiproxyclient.NewClient(fmt.Sprintf("localhost:%d", toxiproxyPort)) + h.toxiClient = toxiproxyclient.NewClient(fmt.Sprintf("localhost:%d", toxiproxyPort)) - proxyPort = findFreePort() + h.proxyPort = findFreePort() var err error - mysqlProxy, err = toxiClient.CreateProxy("mysql", - fmt.Sprintf("localhost:%d", proxyPort), // downstream - fmt.Sprintf("localhost:%d", mySqlPort)) // upstream + h.mysqlProxy, err = h.toxiClient.CreateProxy("mysql", + fmt.Sprintf("localhost:%d", h.proxyPort), // downstream + fmt.Sprintf("localhost:%d", h.mySqlPort)) // upstream if err != nil { panic(fmt.Sprintf("unable to create toxiproxy: %v", err.Error())) } - t.Logf("Toxiproxy proxy started on port %d", proxyPort) + h.t.Logf("Toxiproxy proxy started on port %d", h.proxyPort) } // turnOnLimitDataToxic adds a limit_data toxic to the active Toxiproxy, which prevents more than 1KB of data // from being sent from the primary through the proxy to the replica. Callers MUST call configureToxiProxy // before calling this function. -func turnOnLimitDataToxic(t *testing.T) { - require.NotNil(t, mysqlProxy) - _, err := mysqlProxy.AddToxic("limit_data", "limit_data", "downstream", 1.0, toxiproxyclient.Attributes{ +func (h *harness) turnOnLimitDataToxic() { + require.NotNil(h.t, h.mysqlProxy) + _, err := h.mysqlProxy.AddToxic("limit_data", "limit_data", "downstream", 1.0, toxiproxyclient.Attributes{ "bytes": 1_000, }) - require.NoError(t, err) - t.Logf("Toxiproxy proxy with limit_data toxic (1KB) started on port %d", proxyPort) + require.NoError(h.t, err) + h.t.Logf("Toxiproxy proxy with limit_data toxic (1KB) started on port %d", h.proxyPort) } // convertMapScanResultToStrings converts each value in the specified map |m| into a string. diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_restart_test.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_restart_test.go index 1ebb0a046c..dcfca9b54a 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_restart_test.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_restart_test.go @@ -25,11 +25,11 @@ import ( // TestBinlogReplicationServerRestart tests that a replica can be configured and started, then the // server process can be restarted and replica can be restarted without problems. func TestBinlogReplicationServerRestart(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) - primaryDatabase.MustExec("create table t (pk int auto_increment primary key)") + h.primaryDatabase.MustExec("create table t (pk int auto_increment primary key)") // Launch a goroutine that inserts data for 5 seconds var wg sync.WaitGroup @@ -38,22 +38,22 @@ func TestBinlogReplicationServerRestart(t *testing.T) { defer wg.Done() limit := 5 * time.Second for startTime := time.Now(); time.Now().Sub(startTime) <= limit; { - primaryDatabase.MustExec("insert into t values (DEFAULT);") + h.primaryDatabase.MustExec("insert into t values (DEFAULT);") time.Sleep(100 * time.Millisecond) } }() // Let the replica process a few transactions, then stop the server and pause a second - waitForReplicaToReachGtid(t, 3) - stopDoltSqlServer(t) + h.waitForReplicaToReachGtid(3) + h.stopDoltSqlServer() time.Sleep(1000 * time.Millisecond) var err error - doltPort, doltProcess, err = startDoltSqlServer(t, testDir, nil) + h.doltPort, h.doltProcess, err = h.startDoltSqlServer(nil) require.NoError(t, err) // Check replication status on the replica and assert configuration persisted - status := showReplicaStatus(t) + status := h.showReplicaStatus() // The default Connect_Retry interval is 60s; but some tests configure a faster connection retry interval require.True(t, status["Connect_Retry"] == "5" || status["Connect_Retry"] == "60") require.Equal(t, "86400", status["Source_Retry_Count"]) @@ -64,16 +64,16 @@ func TestBinlogReplicationServerRestart(t *testing.T) { // Restart replication on replica // TODO: For now, we have to set server_id each time we start the service. // Turn this into a persistent sys var - replicaDatabase.MustExec("set @@global.server_id=123;") - replicaDatabase.MustExec("START REPLICA") + h.replicaDatabase.MustExec("set @@global.server_id=123;") + h.replicaDatabase.MustExec("START REPLICA") // Assert that all changes have replicated from the primary wg.Wait() - waitForReplicaToCatchUp(t) + h.waitForReplicaToCatchUp() countMaxQuery := "SELECT COUNT(pk) AS count, MAX(pk) as max FROM db01.t;" - primaryRows, err := primaryDatabase.Queryx(countMaxQuery) + primaryRows, err := h.primaryDatabase.Queryx(countMaxQuery) require.NoError(t, err) - replicaRows, err := replicaDatabase.Queryx(countMaxQuery) + replicaRows, err := h.replicaDatabase.Queryx(countMaxQuery) require.NoError(t, err) primaryRow := convertMapScanResultToStrings(readNextRow(t, primaryRows)) replicaRow := convertMapScanResultToStrings(readNextRow(t, replicaRows)) diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_test.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_test.go index 9834da63b7..8839a44d3d 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_test.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_replication_test.go @@ -16,6 +16,7 @@ package binlogreplication import ( "bufio" + "context" "fmt" "io" "net" @@ -23,17 +24,16 @@ import ( "os/exec" "os/user" "path/filepath" - "reflect" "regexp" "runtime" "slices" "strconv" "strings" "sync" - "syscall" "testing" "time" + toxiproxyclient "github.com/Shopify/toxiproxy/v2/client" _ "github.com/go-sql-driver/mysql" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" @@ -42,12 +42,38 @@ import ( "github.com/dolthub/go-mysql-server/sql/binlogreplication" ) -var mySqlPort, doltPort int -var primaryDatabase, replicaDatabase *sqlx.DB -var mySqlProcess, doltProcess *os.Process -var doltLogFilePath, oldDoltLogFilePath, mysqlLogFilePath string -var doltLogFile, mysqlLogFile *os.File -var testDir string +type harness struct { + t *testing.T + mySqlPort int + doltPort int + primaryDatabase *sqlx.DB + replicaDatabase *sqlx.DB + mySqlProcess *os.Process + doltProcess *os.Process + doltLogFilePath string + oldDoltLogFilePath string + mysqlLogFilePath string + doltLogFile *os.File + mysqlLogFile *os.File + testDir string + toxiClient *toxiproxyclient.Client + mysqlProxy *toxiproxyclient.Proxy + proxyPort int +} + +var commandCtx context.Context +var commandCtxCancel func() + +func init() { + commandCtx, commandCtxCancel = context.WithCancel(context.Background()) +} + +func newHarness(t *testing.T) *harness { + ret := &harness{t: t} + t.Cleanup(ret.teardown) + t.Parallel() + return ret +} // doltReplicaSystemVars are the common system variables that need // to be set on a Dolt replica before replication is turned on. @@ -56,6 +82,7 @@ var doltReplicaSystemVars = map[string]string{ } func TestMain(m *testing.M) { + InstallSignalHandlers() res := func() int { defer func() { cachedDoltDevBuildPathOnce.Do(func() {}) @@ -86,7 +113,7 @@ func DoltDevBuildPath() string { goDirPath := filepath.Join(originalWorkingDir, "..", "..", "..", "..") - cmd := exec.Command("go", "build", "-o", fullpath, "./cmd/dolt") + cmd := exec.CommandContext(commandCtx, "go", "build", "-o", fullpath, "./cmd/dolt") cmd.Dir = goDirPath output, err := cmd.CombinedOutput() if err != nil { @@ -97,44 +124,57 @@ func DoltDevBuildPath() string { return cachedDoltDevBuildPath } -func teardown(t *testing.T) { - if mySqlProcess != nil { - stopMySqlServer(t) +func (h *harness) teardown() { + // Some of this work can take a bit of time. Do some of it in parallel. + var wg sync.WaitGroup + if h.mySqlProcess != nil { + wg.Add(1) + go func() { + defer wg.Done() + h.stopMySqlServer() + }() } - if doltProcess != nil { - stopDoltSqlServer(t) + if h.doltProcess != nil { + wg.Add(1) + go func() { + defer wg.Done() + h.stopDoltSqlServer() + }() } - if mysqlLogFile != nil { - mysqlLogFile.Close() + if h.mysqlLogFile != nil { + h.mysqlLogFile.Close() } - if doltLogFile != nil { - doltLogFile.Close() + if h.doltLogFile != nil { + h.doltLogFile.Close() + } + wg.Wait() + + if h.toxiClient != nil { + proxies, err := h.toxiClient.Proxies() + if err != nil { + for _, value := range proxies { + value.Delete() + } + } } // Output server logs on failure for easier debugging - if t.Failed() { - if oldDoltLogFilePath != "" { - t.Logf("\nDolt server log from %s:\n", oldDoltLogFilePath) - printFile(t, oldDoltLogFilePath) + if h.t.Failed() { + if h.oldDoltLogFilePath != "" { + h.t.Logf("\nDolt server log from %s:\n", h.oldDoltLogFilePath) + printFile(h.t, h.oldDoltLogFilePath) } - t.Logf("\nDolt server log from %s:\n", doltLogFilePath) - printFile(t, doltLogFilePath) - t.Logf("\nMySQL server log from %s:\n", mysqlLogFilePath) - printFile(t, mysqlLogFilePath) - mysqlErrorLogFilePath := filepath.Join(filepath.Dir(mysqlLogFilePath), "error_log.err") - t.Logf("\nMySQL server error log from %s:\n", mysqlErrorLogFilePath) - printFile(t, mysqlErrorLogFilePath) + h.t.Logf("\nDolt server log from %s:\n", h.doltLogFilePath) + printFile(h.t, h.doltLogFilePath) + h.t.Logf("\nMySQL server log from %s:\n", h.mysqlLogFilePath) + printFile(h.t, h.mysqlLogFilePath) + mysqlErrorLogFilePath := filepath.Join(filepath.Dir(h.mysqlLogFilePath), "error_log.err") + h.t.Logf("\nMySQL server error log from %s:\n", mysqlErrorLogFilePath) + printFile(h.t, mysqlErrorLogFilePath) } else { // clean up temp files on clean test runs - defer os.RemoveAll(testDir) - } - - if toxiClient != nil { - proxies, _ := toxiClient.Proxies() - for _, value := range proxies { - value.Delete() - } + os.RemoveAll(h.testDir) } } @@ -142,67 +182,67 @@ func teardown(t *testing.T) { // a MySQL primary and a Dolt replica, and asserts that a CREATE TABLE statement properly replicates to the // Dolt replica, along with simple insert, update, and delete statements. func TestBinlogReplicationSanityCheck(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Create a table on the primary and verify on the replica - primaryDatabase.MustExec("create table tableT (pk int primary key)") - waitForReplicaToCatchUp(t) - assertCreateTableStatement(t, replicaDatabase, "tableT", + h.primaryDatabase.MustExec("create table tableT (pk int primary key)") + h.waitForReplicaToCatchUp() + assertCreateTableStatement(h.t, h.replicaDatabase, "tableT", "CREATE TABLE tableT ( pk int NOT NULL, PRIMARY KEY (pk)) "+ "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin") - assertRepoStateFileExists(t, "db01") + h.assertRepoStateFileExists("db01") // Insert/Update/Delete on the primary - primaryDatabase.MustExec("insert into tableT values(100), (200)") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.tableT", [][]any{{"100"}, {"200"}}) - primaryDatabase.MustExec("delete from tableT where pk = 100") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.tableT", [][]any{{"200"}}) - primaryDatabase.MustExec("update tableT set pk = 300") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.tableT", [][]any{{"300"}}) + h.primaryDatabase.MustExec("insert into tableT values(100), (200)") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.tableT", [][]any{{"100"}, {"200"}}) + h.primaryDatabase.MustExec("delete from tableT where pk = 100") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.tableT", [][]any{{"200"}}) + h.primaryDatabase.MustExec("update tableT set pk = 300") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.tableT", [][]any{{"300"}}) } // TestBinlogReplicationWithHundredsOfDatabases asserts that we can efficiently replicate the creation of hundreds of databases. func TestBinlogReplicationWithHundredsOfDatabases(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Create a table on the primary and verify on the replica - primaryDatabase.MustExec("create table tableT (pk int primary key)") - waitForReplicaToCatchUp(t) - assertCreateTableStatement(t, replicaDatabase, "tableT", + h.primaryDatabase.MustExec("create table tableT (pk int primary key)") + h.waitForReplicaToCatchUp() + assertCreateTableStatement(h.t, h.replicaDatabase, "tableT", "CREATE TABLE tableT ( pk int NOT NULL, PRIMARY KEY (pk)) "+ "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin") - assertRepoStateFileExists(t, "db01") + h.assertRepoStateFileExists("db01") // Create a few hundred databases on the primary and let them replicate to the replica dbCount := 300 startTime := time.Now() for i := range dbCount { dbName := fmt.Sprintf("db%03d", i) - primaryDatabase.MustExec(fmt.Sprintf("create database %s", dbName)) + h.primaryDatabase.MustExec(fmt.Sprintf("create database %s", dbName)) } - waitForReplicaToCatchUp(t) + h.waitForReplicaToCatchUp() endTime := time.Now() logrus.Infof("Time to replicate %d databases: %v", dbCount, endTime.Sub(startTime)) // Spot check the presence of a database on the replica - assertRepoStateFileExists(t, "db042") + h.assertRepoStateFileExists("db042") // Insert some data in one database startTime = time.Now() - primaryDatabase.MustExec("use db042;") - primaryDatabase.MustExec("create table t (pk int primary key);") - primaryDatabase.MustExec("insert into t values (100), (101), (102);") + h.primaryDatabase.MustExec("use db042;") + h.primaryDatabase.MustExec("create table t (pk int primary key);") + h.primaryDatabase.MustExec("insert into t values (100), (101), (102);") // Verify the results on the replica - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db042.t;", [][]any{{"100"}, {"101"}, {"102"}}) + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db042.t;", [][]any{{"100"}, {"101"}, {"102"}}) endTime = time.Now() logrus.Infof("Time to replicate inserts to 1 database (out of %d): %v", endTime.Sub(startTime), dbCount) } @@ -210,11 +250,11 @@ func TestBinlogReplicationWithHundredsOfDatabases(t *testing.T) { // TestAutoRestartReplica tests that a Dolt replica automatically starts up replication if // replication was running when the replica was shut down. func TestAutoRestartReplica(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) // Assert that replication is not running yet - status := queryReplicaStatus(t) + status := h.queryReplicaStatus() require.Equal(t, "0", status["Last_IO_Errno"]) require.Equal(t, "", status["Last_IO_Error"]) require.Equal(t, "0", status["Last_SQL_Errno"]) @@ -223,30 +263,30 @@ func TestAutoRestartReplica(t *testing.T) { require.Equal(t, "No", status["Replica_SQL_Running"]) // Start up replication and replicate some test data - startReplicationAndCreateTestDb(t, mySqlPort) - primaryDatabase.MustExec("create table db01.autoRestartTest(pk int primary key);") - waitForReplicaToCatchUp(t) - primaryDatabase.MustExec("insert into db01.autoRestartTest values (100);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.autoRestartTest;", [][]any{{"100"}}) + h.startReplicationAndCreateTestDb(h.mySqlPort) + h.primaryDatabase.MustExec("create table db01.autoRestartTest(pk int primary key);") + h.waitForReplicaToCatchUp() + h.primaryDatabase.MustExec("insert into db01.autoRestartTest values (100);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.autoRestartTest;", [][]any{{"100"}}) // Test for the presence of the replica-running state file - require.True(t, fileExists(filepath.Join(testDir, "dolt", ".doltcfg", "replica-running"))) + require.True(t, fileExists(filepath.Join(h.testDir, "dolt", ".doltcfg", "replica-running"))) // Restart the Dolt replica - stopDoltSqlServer(t) + h.stopDoltSqlServer() var err error - doltPort, doltProcess, err = startDoltSqlServer(t, testDir, nil) + h.doltPort, h.doltProcess, err = h.startDoltSqlServer(nil) require.NoError(t, err) // Assert that some test data replicates correctly - primaryDatabase.MustExec("insert into db01.autoRestartTest values (200);") - waitForReplicaToCatchUp(t) - requireReplicaResults(t, "select * from db01.autoRestartTest;", + h.primaryDatabase.MustExec("insert into db01.autoRestartTest values (200);") + h.waitForReplicaToCatchUp() + h.requireReplicaResults("select * from db01.autoRestartTest;", [][]any{{"100"}, {"200"}}) // SHOW REPLICA STATUS should show that replication is running, with no errors - status = queryReplicaStatus(t) + status = h.queryReplicaStatus() require.Equal(t, "0", status["Last_IO_Errno"]) require.Equal(t, "", status["Last_IO_Error"]) require.Equal(t, "0", status["Last_SQL_Errno"]) @@ -255,16 +295,16 @@ func TestAutoRestartReplica(t *testing.T) { require.Equal(t, "Yes", status["Replica_SQL_Running"]) // Stop replication and assert the replica-running marker file is removed - replicaDatabase.MustExec("stop replica") - require.False(t, fileExists(filepath.Join(testDir, "dolt", ".doltcfg", "replica-running"))) + h.replicaDatabase.MustExec("stop replica") + require.False(t, fileExists(filepath.Join(h.testDir, "dolt", ".doltcfg", "replica-running"))) // Restart the Dolt replica - stopDoltSqlServer(t) - doltPort, doltProcess, err = startDoltSqlServer(t, testDir, nil) + h.stopDoltSqlServer() + h.doltPort, h.doltProcess, err = h.startDoltSqlServer(nil) require.NoError(t, err) // SHOW REPLICA STATUS should show that replication is NOT running, with no errors - status = queryReplicaStatus(t) + status = h.queryReplicaStatus() require.Equal(t, "0", status["Last_IO_Errno"]) require.Equal(t, "", status["Last_IO_Error"]) require.Equal(t, "0", status["Last_SQL_Errno"]) @@ -275,10 +315,10 @@ func TestAutoRestartReplica(t *testing.T) { // TestBinlogSystemUserIsLocked tests that the binlog applier user is locked and cannot be used to connect to the server. func TestBinlogSystemUserIsLocked(t *testing.T) { - defer teardown(t) - startSqlServers(t) + h := newHarness(t) + h.startSqlServers() - dsn := fmt.Sprintf("%s@tcp(127.0.0.1:%v)/", binlogApplierUser, doltPort) + dsn := fmt.Sprintf("%s@tcp(127.0.0.1:%v)/", binlogApplierUser, h.doltPort) db, err := sqlx.Open("mysql", dsn) require.NoError(t, err) @@ -288,7 +328,7 @@ func TestBinlogSystemUserIsLocked(t *testing.T) { require.ErrorContains(t, err, "No authentication") // After starting replication, the system account is locked - startReplicationAndCreateTestDb(t, mySqlPort) + h.startReplicationAndCreateTestDb(h.mySqlPort) err = db.Ping() require.Error(t, err) require.ErrorContains(t, err, "Access denied for user") @@ -298,23 +338,23 @@ func TestBinlogSystemUserIsLocked(t *testing.T) { // including sending new Rotate and FormatDescription events to the replica. This is a simple sanity tests that we can // process the events without errors. func TestFlushLogs(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Make changes on the primary and verify on the replica - primaryDatabase.MustExec("create table t (pk int primary key)") - waitForReplicaToCatchUp(t) + h.primaryDatabase.MustExec("create table t (pk int primary key)") + h.waitForReplicaToCatchUp() expectedStatement := "CREATE TABLE t ( pk int NOT NULL, PRIMARY KEY (pk)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin" - assertCreateTableStatement(t, replicaDatabase, "t", expectedStatement) + assertCreateTableStatement(t, h.replicaDatabase, "t", expectedStatement) - primaryDatabase.MustExec("flush binary logs;") - waitForReplicaToCatchUp(t) + h.primaryDatabase.MustExec("flush binary logs;") + h.waitForReplicaToCatchUp() - primaryDatabase.MustExec("insert into t values (1), (2), (3);") - waitForReplicaToCatchUp(t) + h.primaryDatabase.MustExec("insert into t values (1), (2), (3);") + h.waitForReplicaToCatchUp() - requireReplicaResults(t, "select * from db01.t;", [][]any{ + h.requireReplicaResults("select * from db01.t;", [][]any{ {"1"}, {"2"}, {"3"}, }) } @@ -322,22 +362,22 @@ func TestFlushLogs(t *testing.T) { // TestResetReplica tests that "RESET REPLICA" and "RESET REPLICA ALL" correctly clear out // replication configuration and metadata. func TestResetReplica(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // RESET REPLICA returns an error if replication is running - _, err := replicaDatabase.Queryx("RESET REPLICA") + _, err := h.replicaDatabase.Queryx("RESET REPLICA") require.Error(t, err) require.ErrorContains(t, err, "unable to reset replica while replication is running") // Calling RESET REPLICA clears out any errors - replicaDatabase.MustExec("STOP REPLICA;") - rows, err := replicaDatabase.Queryx("RESET REPLICA;") + h.replicaDatabase.MustExec("STOP REPLICA;") + rows, err := h.replicaDatabase.Queryx("RESET REPLICA;") require.NoError(t, err) require.NoError(t, rows.Close()) - status := queryReplicaStatus(t) + status := h.queryReplicaStatus() require.Equal(t, "0", status["Last_Errno"]) require.Equal(t, "", status["Last_Error"]) require.Equal(t, "0", status["Last_IO_Errno"]) @@ -348,10 +388,10 @@ func TestResetReplica(t *testing.T) { require.Equal(t, "", status["Last_SQL_Error_Timestamp"]) // Calling RESET REPLICA ALL clears out all replica configuration - rows, err = replicaDatabase.Queryx("RESET REPLICA ALL;") + rows, err = h.replicaDatabase.Queryx("RESET REPLICA ALL;") require.NoError(t, err) require.NoError(t, rows.Close()) - status = queryReplicaStatus(t) + status = h.queryReplicaStatus() require.Equal(t, "", status["Source_Host"]) require.Equal(t, "", status["Source_User"]) require.Equal(t, "No", status["Replica_IO_Running"]) @@ -359,20 +399,20 @@ func TestResetReplica(t *testing.T) { // Now try querying the status using the older, deprecated 'show slave status' statement // and spot check that the data is the same, but the column names have changed - status = querySlaveStatus(t) + status = h.querySlaveStatus() require.Equal(t, "", status["Master_Host"]) require.Equal(t, "", status["Master_User"]) require.Equal(t, "No", status["Slave_IO_Running"]) require.Equal(t, "No", status["Slave_SQL_Running"]) - rows, err = replicaDatabase.Queryx("select * from mysql.slave_master_info;") + rows, err = h.replicaDatabase.Queryx("select * from mysql.slave_master_info;") require.NoError(t, err) require.False(t, rows.Next()) require.NoError(t, rows.Close()) // Start replication again and verify that we can still query replica status - startReplicationAndCreateTestDb(t, mySqlPort) - replicaStatus := showReplicaStatus(t) + h.startReplicationAndCreateTestDb(h.mySqlPort) + replicaStatus := h.showReplicaStatus() require.Equal(t, "0", replicaStatus["Last_Errno"]) require.Equal(t, "", replicaStatus["Last_Error"]) require.True(t, replicaStatus["Replica_IO_Running"] == binlogreplication.ReplicaIoRunning || @@ -382,126 +422,126 @@ func TestResetReplica(t *testing.T) { // TestStartReplicaErrors tests that the "START REPLICA" command returns appropriate responses // for various error conditions. func TestStartReplicaErrors(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) // START REPLICA returns an error when no replication source is configured - _, err := replicaDatabase.Queryx("START REPLICA;") + _, err := h.replicaDatabase.Queryx("START REPLICA;") require.Error(t, err) require.ErrorContains(t, err, ErrServerNotConfiguredAsReplica.Error()) // For an incomplete source configuration, throw an error as early as possible to make sure the user notices it. - replicaDatabase.MustExec("CHANGE REPLICATION SOURCE TO SOURCE_PORT=1234, SOURCE_HOST='localhost';") - rows, err := replicaDatabase.Queryx("START REPLICA;") + h.replicaDatabase.MustExec("CHANGE REPLICATION SOURCE TO SOURCE_PORT=1234, SOURCE_HOST='localhost';") + rows, err := h.replicaDatabase.Queryx("START REPLICA;") require.Error(t, err) require.ErrorContains(t, err, "Invalid (empty) username") require.Nil(t, rows) // SOURCE_AUTO_POSITION cannot be disabled – we only support GTID positioning - rows, err = replicaDatabase.Queryx("CHANGE REPLICATION SOURCE TO SOURCE_PORT=1234, " + + rows, err = h.replicaDatabase.Queryx("CHANGE REPLICATION SOURCE TO SOURCE_PORT=1234, " + "SOURCE_HOST='localhost', SOURCE_USER='replicator', SOURCE_AUTO_POSITION=0;") require.Error(t, err) require.ErrorContains(t, err, "Error 1105 (HY000): SOURCE_AUTO_POSITION cannot be disabled") require.Nil(t, rows) // START REPLICA logs a warning if replication is already running - startReplicationAndCreateTestDb(t, mySqlPort) - replicaDatabase.MustExec("START REPLICA;") - assertWarning(t, replicaDatabase, 3083, "Replication thread(s) for channel '' are already running.") + h.startReplicationAndCreateTestDb(h.mySqlPort) + h.replicaDatabase.MustExec("START REPLICA;") + assertWarning(t, h.replicaDatabase, 3083, "Replication thread(s) for channel '' are already running.") } // TestShowReplicaStatus tests various cases "SHOW REPLICA STATUS" that aren't covered by other tests. func TestShowReplicaStatus(t *testing.T) { - defer teardown(t) - startSqlServers(t) + h := newHarness(t) + h.startSqlServers() // Assert that very long hostnames are handled correctly longHostname := "really.really.really.really.long.host.name.012345678901234567890123456789012345678901234567890123456789.com" - replicaDatabase.MustExec(fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s';", longHostname)) - status := showReplicaStatus(t) + h.replicaDatabase.MustExec(fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s';", longHostname)) + status := h.showReplicaStatus() require.Equal(t, longHostname, status["Source_Host"]) } // TestStopReplica tests that STOP REPLICA correctly stops the replication process, and that // warnings are logged when STOP REPLICA is invoked when replication is not running. func TestStopReplica(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) // STOP REPLICA logs a warning if replication is not running - replicaDatabase.MustExec("STOP REPLICA;") - assertWarning(t, replicaDatabase, 3084, "Replication thread(s) for channel '' are already stopped.") + h.replicaDatabase.MustExec("STOP REPLICA;") + assertWarning(t, h.replicaDatabase, 3084, "Replication thread(s) for channel '' are already stopped.") // Start replication with bad connection params - replicaDatabase.MustExec("CHANGE REPLICATION SOURCE TO SOURCE_HOST='doesnotexist', SOURCE_PORT=111, SOURCE_USER='nobody';") - replicaDatabase.MustExec("START REPLICA;") + h.replicaDatabase.MustExec("CHANGE REPLICATION SOURCE TO SOURCE_HOST='doesnotexist', SOURCE_PORT=111, SOURCE_USER='nobody';") + h.replicaDatabase.MustExec("START REPLICA;") time.Sleep(200 * time.Millisecond) - status := showReplicaStatus(t) + status := h.showReplicaStatus() require.Equal(t, "Connecting", status["Replica_IO_Running"]) require.Equal(t, "Yes", status["Replica_SQL_Running"]) // STOP REPLICA works when replication cannot establish a connection - replicaDatabase.MustExec("STOP REPLICA;") - status = showReplicaStatus(t) + h.replicaDatabase.MustExec("STOP REPLICA;") + status = h.showReplicaStatus() require.Equal(t, "No", status["Replica_IO_Running"]) require.Equal(t, "No", status["Replica_SQL_Running"]) // START REPLICA and verify status - startReplicationAndCreateTestDb(t, mySqlPort) + h.startReplicationAndCreateTestDb(h.mySqlPort) time.Sleep(100 * time.Millisecond) - status = showReplicaStatus(t) + status = h.showReplicaStatus() require.True(t, status["Replica_IO_Running"] == "Connecting" || status["Replica_IO_Running"] == "Yes") require.Equal(t, "Yes", status["Replica_SQL_Running"]) // STOP REPLICA stops replication when it is running and connected to the source - replicaDatabase.MustExec("STOP REPLICA;") - status = showReplicaStatus(t) + h.replicaDatabase.MustExec("STOP REPLICA;") + status = h.showReplicaStatus() require.Equal(t, "No", status["Replica_IO_Running"]) require.Equal(t, "No", status["Replica_SQL_Running"]) // STOP REPLICA logs a warning if replication is not running - replicaDatabase.MustExec("STOP REPLICA;") - assertWarning(t, replicaDatabase, 3084, "Replication thread(s) for channel '' are already stopped.") + h.replicaDatabase.MustExec("STOP REPLICA;") + assertWarning(t, h.replicaDatabase, 3084, "Replication thread(s) for channel '' are already stopped.") } // TestDoltCommits tests that Dolt commits are created and use correct transaction boundaries. func TestDoltCommits(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // First transaction (DDL) - primaryDatabase.MustExec("create table t1 (pk int primary key);") + h.primaryDatabase.MustExec("create table t1 (pk int primary key);") // Second transaction (DDL) - primaryDatabase.MustExec("create table t2 (pk int primary key);") + h.primaryDatabase.MustExec("create table t2 (pk int primary key);") // Third transaction (autocommit DML) - primaryDatabase.MustExec("insert into t2 values (0);") + h.primaryDatabase.MustExec("insert into t2 values (0);") // Disable autocommit so we can manually control transactions - primaryDatabase.MustExec("set autocommit=0;") + h.primaryDatabase.MustExec("set autocommit=0;") // Fourth transaction (explicitly controlled transaction) - primaryDatabase.MustExec("start transaction;") - primaryDatabase.MustExec("insert into t1 values(1);") - primaryDatabase.MustExec("insert into t1 values(2);") - primaryDatabase.MustExec("insert into t1 values(3);") - primaryDatabase.MustExec("insert into t2 values(3), (2), (1);") - primaryDatabase.MustExec("commit;") + h.primaryDatabase.MustExec("start transaction;") + h.primaryDatabase.MustExec("insert into t1 values(1);") + h.primaryDatabase.MustExec("insert into t1 values(2);") + h.primaryDatabase.MustExec("insert into t1 values(3);") + h.primaryDatabase.MustExec("insert into t2 values(3), (2), (1);") + h.primaryDatabase.MustExec("commit;") // Verify Dolt commit on replica - waitForReplicaToCatchUp(t) - rows, err := replicaDatabase.Queryx("select count(*) as count from db01.dolt_log;") + h.waitForReplicaToCatchUp() + rows, err := h.replicaDatabase.Queryx("select count(*) as count from db01.dolt_log;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "5", row["count"]) require.NoError(t, rows.Close()) // Use dolt_diff so we can see what tables were edited and schema/data changes - replicaDatabase.MustExec("use db01;") + h.replicaDatabase.MustExec("use db01;") // Note: we don't use an order by clause, since the commits come in so quickly that they get the same timestamp - rows, err = replicaDatabase.Queryx("select * from db01.dolt_diff;") + rows, err = h.replicaDatabase.Queryx("select * from db01.dolt_diff;") require.NoError(t, err) // Fourth transaction @@ -537,7 +577,7 @@ func TestDoltCommits(t *testing.T) { require.NoError(t, rows.Close()) // Verify that commit timestamps are unique - rows, err = replicaDatabase.Queryx("select distinct date from db01.dolt_log;") + rows, err = h.replicaDatabase.Queryx("select distinct date from db01.dolt_log;") require.NoError(t, err) allRows := readAllRowsIntoMaps(t, rows) require.Equal(t, 5, len(allRows)) // 4 transactions + 1 initial commit @@ -546,38 +586,38 @@ func TestDoltCommits(t *testing.T) { // TestForeignKeyChecks tests that foreign key constraints replicate correctly when foreign key checks are // enabled and disabled. func TestForeignKeyChecks(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Test that we can execute statement-based replication that requires foreign_key_checks // being turned off (referenced table doesn't exist yet). - primaryDatabase.MustExec("SET foreign_key_checks = 0;") - primaryDatabase.MustExec("CREATE TABLE t1 (pk int primary key, color varchar(100), FOREIGN KEY (color) REFERENCES colors(name));") - primaryDatabase.MustExec("CREATE TABLE colors (name varchar(100) primary key);") - primaryDatabase.MustExec("SET foreign_key_checks = 1;") + h.primaryDatabase.MustExec("SET foreign_key_checks = 0;") + h.primaryDatabase.MustExec("CREATE TABLE t1 (pk int primary key, color varchar(100), FOREIGN KEY (color) REFERENCES colors(name));") + h.primaryDatabase.MustExec("CREATE TABLE colors (name varchar(100) primary key);") + h.primaryDatabase.MustExec("SET foreign_key_checks = 1;") // Insert a record with foreign key checks enabled - primaryDatabase.MustExec("START TRANSACTION;") - primaryDatabase.MustExec("INSERT INTO colors VALUES ('green'), ('red'), ('blue');") - primaryDatabase.MustExec("INSERT INTO t1 VALUES (1, 'red'), (2, 'green');") - primaryDatabase.MustExec("COMMIT;") + h.primaryDatabase.MustExec("START TRANSACTION;") + h.primaryDatabase.MustExec("INSERT INTO colors VALUES ('green'), ('red'), ('blue');") + h.primaryDatabase.MustExec("INSERT INTO t1 VALUES (1, 'red'), (2, 'green');") + h.primaryDatabase.MustExec("COMMIT;") // Test the Insert path with foreign key checks turned off - primaryDatabase.MustExec("START TRANSACTION;") - primaryDatabase.MustExec("SET foreign_key_checks = 0;") - primaryDatabase.MustExec("INSERT INTO t1 VALUES (3, 'not-a-color');") - primaryDatabase.MustExec("COMMIT;") + h.primaryDatabase.MustExec("START TRANSACTION;") + h.primaryDatabase.MustExec("SET foreign_key_checks = 0;") + h.primaryDatabase.MustExec("INSERT INTO t1 VALUES (3, 'not-a-color');") + h.primaryDatabase.MustExec("COMMIT;") // Test the Update and Delete paths with foreign key checks turned off - primaryDatabase.MustExec("START TRANSACTION;") - primaryDatabase.MustExec("DELETE FROM colors WHERE name='red';") - primaryDatabase.MustExec("UPDATE t1 SET color='still-not-a-color' WHERE pk=2;") - primaryDatabase.MustExec("COMMIT;") + h.primaryDatabase.MustExec("START TRANSACTION;") + h.primaryDatabase.MustExec("DELETE FROM colors WHERE name='red';") + h.primaryDatabase.MustExec("UPDATE t1 SET color='still-not-a-color' WHERE pk=2;") + h.primaryDatabase.MustExec("COMMIT;") // Verify the changes on the replica - waitForReplicaToCatchUp(t) - rows, err := replicaDatabase.Queryx("select * from db01.t1 order by pk;") + h.waitForReplicaToCatchUp() + rows, err := h.replicaDatabase.Queryx("select * from db01.t1 order by pk;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "1", row["pk"]) @@ -591,7 +631,7 @@ func TestForeignKeyChecks(t *testing.T) { require.False(t, rows.Next()) require.NoError(t, rows.Close()) - rows, err = replicaDatabase.Queryx("select * from db01.colors order by name;") + rows, err = h.replicaDatabase.Queryx("select * from db01.colors order by name;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "blue", row["name"]) @@ -603,24 +643,24 @@ func TestForeignKeyChecks(t *testing.T) { // TestCharsetsAndCollations tests that we can successfully replicate data using various charsets and collations. func TestCharsetsAndCollations(t *testing.T) { - defer teardown(t) - startSqlServersWithDoltSystemVars(t, doltReplicaSystemVars) - startReplicationAndCreateTestDb(t, mySqlPort) + h := newHarness(t) + h.startSqlServersWithDoltSystemVars(doltReplicaSystemVars) + h.startReplicationAndCreateTestDb(h.mySqlPort) // Use non-default charset/collations to create data on the primary - primaryDatabase.MustExec("CREATE TABLE t1 (pk int primary key, c1 varchar(255) COLLATE ascii_general_ci, c2 varchar(255) COLLATE utf16_general_ci);") - primaryDatabase.MustExec("insert into t1 values (1, \"one\", \"one\");") + h.primaryDatabase.MustExec("CREATE TABLE t1 (pk int primary key, c1 varchar(255) COLLATE ascii_general_ci, c2 varchar(255) COLLATE utf16_general_ci);") + h.primaryDatabase.MustExec("insert into t1 values (1, \"one\", \"one\");") // Verify on the replica - waitForReplicaToCatchUp(t) - rows, err := replicaDatabase.Queryx("show create table db01.t1;") + h.waitForReplicaToCatchUp() + rows, err := h.replicaDatabase.Queryx("show create table db01.t1;") require.NoError(t, err) row := convertMapScanResultToStrings(readNextRow(t, rows)) require.Contains(t, row["Create Table"], "ascii_general_ci") require.Contains(t, row["Create Table"], "utf16_general_ci") require.NoError(t, rows.Close()) - rows, err = replicaDatabase.Queryx("select * from db01.t1;") + rows, err = h.replicaDatabase.Queryx("select * from db01.t1;") require.NoError(t, err) row = convertMapScanResultToStrings(readNextRow(t, rows)) require.Equal(t, "one", row["c1"]) @@ -635,14 +675,14 @@ func TestCharsetsAndCollations(t *testing.T) { // waitForReplicaToCatchUp waits for the replica to catch up with the primary database. The // lag is measured by checking that gtid_executed is the same on the primary and replica. If // no progress is made in 30 seconds, this function will fail the test. -func waitForReplicaToCatchUp(t *testing.T) { +func (h *harness) waitForReplicaToCatchUp() { timeLimit := 30 * time.Second lastReplicaGtid := "" endTime := time.Now().Add(timeLimit) for time.Now().Before(endTime) { - replicaGtid := queryGtid(t, replicaDatabase) - primaryGtid := queryGtid(t, primaryDatabase) + replicaGtid := queryGtid(h.t, h.replicaDatabase) + primaryGtid := queryGtid(h.t, h.primaryDatabase) if primaryGtid == replicaGtid { return @@ -650,46 +690,46 @@ func waitForReplicaToCatchUp(t *testing.T) { lastReplicaGtid = replicaGtid endTime = time.Now().Add(timeLimit) } else { - t.Logf("primary and replica not in sync yet... (primary: %s, replica: %s)\n", primaryGtid, replicaGtid) + h.t.Logf("primary and replica not in sync yet... (primary: %s, replica: %s)\n", primaryGtid, replicaGtid) time.Sleep(250 * time.Millisecond) } } // Log some status of the replica, before failing the test - outputShowReplicaStatus(t) - t.Fatal("primary and replica did not synchronize within " + timeLimit.String()) + h.outputShowReplicaStatus() + h.t.Fatal("primary and replica did not synchronize within " + timeLimit.String()) } // waitForReplicaToReachGtid waits (up to 10s) for the replica's @@gtid_executed sys var to show that // it has executed the |target| gtid transaction number. -func waitForReplicaToReachGtid(t *testing.T, target int) { +func (h *harness) waitForReplicaToReachGtid(target int) { timeLimit := 10 * time.Second endTime := time.Now().Add(timeLimit) for time.Now().Before(endTime) { time.Sleep(250 * time.Millisecond) - replicaGtid := queryGtid(t, replicaDatabase) + replicaGtid := queryGtid(h.t, h.replicaDatabase) if replicaGtid != "" { components := strings.Split(replicaGtid, ":") - require.Equal(t, 2, len(components)) + require.Equal(h.t, 2, len(components)) sourceGtid := components[1] if strings.Contains(sourceGtid, "-") { gtidRange := strings.Split(sourceGtid, "-") - require.Equal(t, 2, len(gtidRange)) + require.Equal(h.t, 2, len(gtidRange)) sourceGtid = gtidRange[1] } i, err := strconv.Atoi(sourceGtid) - require.NoError(t, err) + require.NoError(h.t, err) if i >= target { return } } - t.Logf("replica has not reached transaction %d yet; currently at: %s \n", target, replicaGtid) + h.t.Logf("replica has not reached transaction %d yet; currently at: %s \n", target, replicaGtid) } - t.Fatal("replica did not reach target GTID within " + timeLimit.String()) + h.t.Fatal("replica did not reach target GTID within " + timeLimit.String()) } // assertWarning asserts that the specified |database| has a warning with |code| and |message|, @@ -755,108 +795,89 @@ func readAllRowsIntoSlices(t *testing.T, rows *sqlx.Rows) [][]any { } // startSqlServers starts a MySQL server and a Dolt sql-server for use in tests. -func startSqlServers(t *testing.T) { - startSqlServersWithDoltSystemVars(t, nil) +func (h *harness) startSqlServers() { + h.startSqlServersWithDoltSystemVars(nil) } // startSqlServersWithDoltSystemVars starts a MySQL server and a Dolt sql-server for use in tests. Before the // Dolt sql-server is started, the specified |doltPersistentSystemVars| are persisted in the Dolt sql-server's // local configuration. These are useful when you need to set system variables that must be available when the // sql-server starts up, such as replication system variables. -func startSqlServersWithDoltSystemVars(t *testing.T, doltPersistentSystemVars map[string]string) { +func (h *harness) startSqlServersWithDoltSystemVars(doltPersistentSystemVars map[string]string) { if runtime.GOOS == "windows" { - t.Skip("Skipping binlog replication integ tests on Windows OS") + h.t.Skip("Skipping binlog replication integ tests on Windows OS") } else if runtime.GOOS == "darwin" && os.Getenv("CI") == "true" { - t.Skip("Skipping binlog replication integ tests in CI environment on Mac OS") + h.t.Skip("Skipping binlog replication integ tests in CI environment on Mac OS") } - testDir = filepath.Join(os.TempDir(), fmt.Sprintf("%s-%v", t.Name(), time.Now().Unix())) - err := os.MkdirAll(testDir, 0777) - require.NoError(t, err) - t.Logf("temp dir: %v \n", testDir) + h.testDir = filepath.Join(os.TempDir(), fmt.Sprintf("%s-%v", h.t.Name(), time.Now().Unix())) + err := os.MkdirAll(h.testDir, 0777) + require.NoError(h.t, err) + h.t.Logf("temp dir: %v \n", h.testDir) // Start up primary and replica databases - mySqlPort, mySqlProcess, err = startMySqlServer(t, testDir) - require.NoError(t, err) - doltPort, doltProcess, err = startDoltSqlServer(t, testDir, doltPersistentSystemVars) - require.NoError(t, err) + h.mySqlPort, h.mySqlProcess, err = h.startMySqlServer() + require.NoError(h.t, err) + h.doltPort, h.doltProcess, err = h.startDoltSqlServer(doltPersistentSystemVars) + require.NoError(h.t, err) } // stopMySqlServer stops the running MySQL server. If any errors are encountered while stopping // the MySQL server, this function will fail the current test. -func stopMySqlServer(t *testing.T) { - require.NoError(t, mySqlProcess.Kill()) +func (h *harness) stopMySqlServer() { + require.NoError(h.t, StopProcess(h.mySqlProcess)) } // stopDoltSqlServer stops the running Dolt sql-server. If any errors are encountered while // stopping the Dolt sql-server, this function will fail the current test. -func stopDoltSqlServer(t *testing.T) { - // Use the negative process ID so that we grab the entire process group. - // This is necessary to kill all the processes the child spawns. - // Note that we use os.FindProcess, instead of syscall.Kill, since syscall.Kill - // is not available on windows. - p, err := os.FindProcess(-doltProcess.Pid) - require.NoError(t, err) - - err = p.Signal(syscall.SIGKILL) - require.NoError(t, err) - time.Sleep(250 * time.Millisecond) +func (h *harness) stopDoltSqlServer() { + require.NoError(h.t, StopProcess(h.doltProcess)) // Remove the sql-server lock file so that we can restart cleanly - lockFilepath := filepath.Join(testDir, "dolt", "db01", ".dolt", "sql-server.lock") - stat, _ := os.Stat(lockFilepath) - if stat != nil { - err = os.Remove(lockFilepath) - require.NoError(t, err) - } + lockFilepath := filepath.Join(h.testDir, "dolt", "db01", ".dolt", "sql-server.lock") + _, err := os.Stat(lockFilepath) + require.ErrorIs(h.t, err, os.ErrNotExist) + // Remove the global sql-server lock file as well - lockFilepath = filepath.Join(testDir, "dolt", ".dolt", "sql-server.lock") - stat, _ = os.Stat(lockFilepath) - if stat != nil { - err = os.Remove(lockFilepath) - require.NoError(t, err) - } + lockFilepath = filepath.Join(h.testDir, "dolt", ".dolt", "sql-server.lock") + _, err = os.Stat(lockFilepath) + require.ErrorIs(h.t, err, os.ErrNotExist) } // startReplication configures the replication source on the replica and runs the START REPLICA statement. -func startReplication(t *testing.T, port int) { - defer func() { - if r := recover(); r != nil { - t.Fatalf("failed to start replication and caught a panic: %v", r) - } - }() - replicaDatabase.MustExec( +func (h *harness) startReplication(port int) { + h.replicaDatabase.MustExec( fmt.Sprintf("change replication source to SOURCE_HOST='localhost', "+ "SOURCE_USER='replicator', SOURCE_PASSWORD='Zqr8_blrGm1!', "+ "SOURCE_PORT=%v, SOURCE_AUTO_POSITION=1, SOURCE_CONNECT_RETRY=5;", port)) - replicaDatabase.MustExec("start replica;") + h.replicaDatabase.MustExec("start replica;") } // startReplicationAndCreateTestDb starts up replication on the replica, connecting to |port| on the primary, // creates the test database, db01, on the primary, and ensures it gets replicated to the replica. -func startReplicationAndCreateTestDb(t *testing.T, port int) { - startReplicationAndCreateTestDbWithDelay(t, port, 100*time.Millisecond) +func (h *harness) startReplicationAndCreateTestDb(port int) { + h.startReplicationAndCreateTestDbWithDelay(port, 100*time.Millisecond) } // startReplicationAndCreateTestDbWithDelay starts up replication on the replica, connecting to |port| on the primary, // pauses for |delay| before creating the test database, db01, on the primary, and ensures it // gets replicated to the replica. -func startReplicationAndCreateTestDbWithDelay(t *testing.T, port int, delay time.Duration) { - startReplication(t, port) +func (h *harness) startReplicationAndCreateTestDbWithDelay(port int, delay time.Duration) { + h.startReplication(port) time.Sleep(delay) // Look to see if the test database, db01, has been created yet. If not, create it and wait for it to // replicate to the replica. Note that when re-starting replication in certain tests, we can't rely on // the replica to contain all GTIDs (i.e. Dolt -> MySQL replication when restarting the replica, since // Dolt doesn't yet resend events that occurred while the replica wasn't connected). - dbNames := mustListDatabases(t, primaryDatabase) + dbNames := mustListDatabases(h.t, h.primaryDatabase) if !slices.Contains(dbNames, "db01") { - primaryDatabase.MustExec("create database db01;") - waitForReplicaToCatchUp(t) + h.primaryDatabase.MustExec("create database db01;") + h.waitForReplicaToCatchUp() } - primaryDatabase.MustExec("use db01;") - _, _ = replicaDatabase.Exec("use db01;") + h.primaryDatabase.MustExec("use db01;") + _, _ = h.replicaDatabase.Exec("use db01;") } func assertCreateTableStatement(t *testing.T, database *sqlx.DB, table string, expectedStatement string) { @@ -901,14 +922,15 @@ func findFreePort() int { // startMySqlServer configures a starts a fresh MySQL server instance and returns the port it is running on, // and the os.Process handle. If unable to start up the MySQL server, an error is returned. -func startMySqlServer(t *testing.T, dir string) (int, *os.Process, error) { +func (h *harness) startMySqlServer() (int, *os.Process, error) { + dir := h.testDir dir = filepath.Join(dir, "mysql") err := os.MkdirAll(dir, 0777) if err != nil { return -1, nil, err } - mySqlPort = findFreePort() + h.mySqlPort = findFreePort() // MySQL will NOT start up as the root user, so if we're running as root // (e.g. in a CI env), use the "mysql" user instead. @@ -918,7 +940,7 @@ func startMySqlServer(t *testing.T, dir string) (int, *os.Process, error) { } username := user.Username if username == "root" { - t.Logf("overriding current user (root) to run mysql as 'mysql' user instead\n") + h.t.Logf("overriding current user (root) to run mysql as 'mysql' user instead\n") username = "mysql" } @@ -929,52 +951,54 @@ func startMySqlServer(t *testing.T, dir string) (int, *os.Process, error) { initialized := directoryExists(filepath.Join(dataDir, "mysql")) if !initialized { // Create a fresh MySQL server for the primary - initCmd := exec.Command("mysqld", + initCmd := exec.CommandContext(commandCtx, "mysqld", "--no-defaults", "--user="+username, "--initialize-insecure", "--datadir="+dataDir, "--default-authentication-plugin=mysql_native_password") initCmd.Dir = dir + ApplyCmdAttributes(initCmd) output, err := initCmd.CombinedOutput() if err != nil { return -1, nil, fmt.Errorf("unable to execute command %v: %v – %v", initCmd.String(), err.Error(), string(output)) } } - cmd := exec.Command("mysqld", + cmd := exec.CommandContext(commandCtx, "mysqld", "--no-defaults", "--user="+username, "--datadir="+dataDir, "--gtid-mode=ON", "--skip-replica-start=ON", "--enforce-gtid-consistency=ON", - fmt.Sprintf("--port=%v", mySqlPort), + fmt.Sprintf("--port=%v", h.mySqlPort), "--server-id=11223344", - fmt.Sprintf("--socket=mysql-%v.sock", mySqlPort), + fmt.Sprintf("--socket=mysql-%v.sock", h.mySqlPort), "--general_log_file="+filepath.Join(dir, "general_log"), "--slow_query_log_file="+filepath.Join(dir, "slow_query_log"), "--log-error="+dir+"error_log", - fmt.Sprintf("--pid-file="+filepath.Join(dir, "pid-%v.pid"), mySqlPort)) + fmt.Sprintf("--pid-file="+filepath.Join(dir, "pid-%v.pid"), h.mySqlPort)) cmd.Dir = dir + ApplyCmdAttributes(cmd) - mysqlLogFilePath = filepath.Join(dir, fmt.Sprintf("mysql-%d.out.log", time.Now().Unix())) - mysqlLogFile, err = os.Create(mysqlLogFilePath) + h.mysqlLogFilePath = filepath.Join(dir, fmt.Sprintf("mysql-%d.out.log", time.Now().Unix())) + h.mysqlLogFile, err = os.Create(h.mysqlLogFilePath) if err != nil { return -1, nil, err } - t.Logf("MySQL server logs at: %s \n", mysqlLogFilePath) - cmd.Stdout = mysqlLogFile - cmd.Stderr = mysqlLogFile + h.t.Logf("MySQL server logs at: %s \n", h.mysqlLogFilePath) + cmd.Stdout = h.mysqlLogFile + cmd.Stderr = h.mysqlLogFile err = cmd.Start() if err != nil { return -1, nil, fmt.Errorf("unable to start process %q: %v", cmd.String(), err.Error()) } - dsn := fmt.Sprintf("root@tcp(127.0.0.1:%v)/", mySqlPort) - primaryDatabase = sqlx.MustOpen("mysql", dsn) + dsn := fmt.Sprintf("root@tcp(127.0.0.1:%v)/", h.mySqlPort) + h.primaryDatabase = sqlx.MustOpen("mysql", dsn) - err = waitForSqlServerToStart(t, primaryDatabase) + err = waitForSqlServerToStart(h.t, h.primaryDatabase) if err != nil { return -1, nil, err } @@ -982,15 +1006,15 @@ func startMySqlServer(t *testing.T, dir string) (int, *os.Process, error) { // Ensure the replication user exists with the right grants when we initialize // the MySQL server for the first time if !initialized { - mustCreateReplicatorUser(primaryDatabase) + mustCreateReplicatorUser(h.primaryDatabase) } - dsn = fmt.Sprintf("root@tcp(127.0.0.1:%v)/", mySqlPort) - primaryDatabase = sqlx.MustOpen("mysql", dsn) + dsn = fmt.Sprintf("root@tcp(127.0.0.1:%v)/", h.mySqlPort) + h.primaryDatabase = sqlx.MustOpen("mysql", dsn) - t.Logf("MySQL server started on port %v \n", mySqlPort) + h.t.Logf("MySQL server started on port %v \n", h.mySqlPort) - return mySqlPort, cmd.Process, nil + return h.mySqlPort, cmd.Process, nil } // directoryExists returns true if the specified |path| is to a directory that exists, otherwise, @@ -1006,7 +1030,8 @@ func directoryExists(path string) bool { // startDoltSqlServer starts a Dolt sql-server on a free port from the specified directory |dir|. If // |doltPeristentSystemVars| is populated, then those system variables will be set, persistently, for // the Dolt database, before the Dolt sql-server is started. -func startDoltSqlServer(t *testing.T, dir string, doltPersistentSystemVars map[string]string) (int, *os.Process, error) { +func (h *harness) startDoltSqlServer(doltPersistentSystemVars map[string]string) (int, *os.Process, error) { + dir := h.testDir dir = filepath.Join(dir, "dolt") err := os.MkdirAll(dir, 0777) if err != nil { @@ -1015,24 +1040,24 @@ func startDoltSqlServer(t *testing.T, dir string, doltPersistentSystemVars map[s // If we already assigned a port, re-use it. This is useful when testing restarting a primary, since // we want the primary to come back up on the same port, so the replica can reconnect. - if doltPort < 1 { - doltPort = findFreePort() + if h.doltPort < 1 { + h.doltPort = findFreePort() } - t.Logf("Starting Dolt sql-server on port: %d, with data dir %s\n", doltPort, dir) + h.t.Logf("Starting Dolt sql-server on port: %d, with data dir %s\n", h.doltPort, dir) // use an admin user NOT named "root" to test that we don't require the "root" account adminUser := "admin" if doltPersistentSystemVars != nil && len(doltPersistentSystemVars) > 0 { // Initialize the dolt directory first - err = runDoltCommand(t, dir, "init", "--name=binlog-test", "--email=binlog@test") + err = runDoltCommand(h.t, dir, "init", "--name=binlog-test", "--email=binlog@test") if err != nil { return -1, nil, err } for systemVar, value := range doltPersistentSystemVars { query := fmt.Sprintf("SET @@PERSIST.%s=%s;", systemVar, value) - err = runDoltCommand(t, dir, "sql", fmt.Sprintf("-q=%s", query)) + err = runDoltCommand(h.t, dir, "sql", fmt.Sprintf("-q=%s", query)) if err != nil { return -1, nil, err } @@ -1043,55 +1068,46 @@ func startDoltSqlServer(t *testing.T, dir string, doltPersistentSystemVars map[s "sql-server", fmt.Sprintf("-u%s", adminUser), "--loglevel=TRACE", + "--socket=/dev/null", fmt.Sprintf("--data-dir=%s", dir), - fmt.Sprintf("--port=%v", doltPort)} + fmt.Sprintf("--port=%v", h.doltPort)} - cmd := exec.Command(args[0], args[1:]...) - - // Set a unique process group ID so that we can cleanly kill this process, as well as - // any spawned child processes later. Mac/Unix can set the "Setpgid" field directly, but - // on windows, this field isn't present, so we need to use reflection so that this code - // can still compile for windows, even though we don't run it there. - procAttr := &syscall.SysProcAttr{} - ps := reflect.ValueOf(procAttr) - s := ps.Elem() - f := s.FieldByName("Setpgid") - f.SetBool(true) - cmd.SysProcAttr = procAttr + cmd := exec.CommandContext(commandCtx, args[0], args[1:]...) + ApplyCmdAttributes(cmd) // Some tests restart the Dolt sql-server, so if we have a current log file, save a reference // to it so we can print the results later if the test fails. - if doltLogFilePath != "" { - oldDoltLogFilePath = doltLogFilePath + if h.doltLogFilePath != "" { + h.oldDoltLogFilePath = h.doltLogFilePath } - doltLogFilePath = filepath.Join(dir, fmt.Sprintf("dolt-%d.out.log", time.Now().Unix())) - doltLogFile, err = os.Create(doltLogFilePath) + h.doltLogFilePath = filepath.Join(dir, fmt.Sprintf("dolt-%d.out.log", time.Now().Unix())) + h.doltLogFile, err = os.Create(h.doltLogFilePath) if err != nil { return -1, nil, err } - t.Logf("dolt sql-server logs at: %s \n", doltLogFilePath) - cmd.Stdout = doltLogFile - cmd.Stderr = doltLogFile + h.t.Logf("dolt sql-server logs at: %s \n", h.doltLogFilePath) + cmd.Stdout = h.doltLogFile + cmd.Stderr = h.doltLogFile err = cmd.Start() if err != nil { return -1, nil, fmt.Errorf("unable to execute command %v: %v", cmd.String(), err.Error()) } - t.Logf("Dolt CMD: %s\n", cmd.String()) + h.t.Logf("Dolt CMD: %s\n", cmd.String()) - dsn := fmt.Sprintf("%s@tcp(127.0.0.1:%v)/", adminUser, doltPort) - replicaDatabase = sqlx.MustOpen("mysql", dsn) + dsn := fmt.Sprintf("%s@tcp(127.0.0.1:%v)/", adminUser, h.doltPort) + h.replicaDatabase = sqlx.MustOpen("mysql", dsn) - err = waitForSqlServerToStart(t, replicaDatabase) + err = waitForSqlServerToStart(h.t, h.replicaDatabase) if err != nil { return -1, nil, err } - mustCreateReplicatorUser(replicaDatabase) - t.Logf("Dolt server started on port %v \n", doltPort) + mustCreateReplicatorUser(h.replicaDatabase) + h.t.Logf("Dolt server started on port %v \n", h.doltPort) - return doltPort, cmd.Process, nil + return h.doltPort, cmd.Process, nil } // mustCreateReplicatorUser creates the replicator user on the specified |db| and grants them replication slave privs. @@ -1108,8 +1124,9 @@ func runDoltCommand(t *testing.T, doltDataDir string, doltArgs ...string) error args := append([]string{DoltDevBuildPath(), fmt.Sprintf("--data-dir=%s", doltDataDir)}, doltArgs...) - cmd := exec.Command(args[0], args[1:]...) + cmd := exec.CommandContext(commandCtx, args[0], args[1:]...) t.Logf("Running Dolt CMD: %s\n", cmd.String()) + ApplyCmdAttributes(cmd) output, err := cmd.CombinedOutput() t.Logf("Dolt CMD output: %s\n", string(output)) if err != nil { @@ -1160,27 +1177,27 @@ func printFile(t *testing.T, path string) { // assertRepoStateFileExists asserts that the repo_state.json file is present for the specified // database |db|. -func assertRepoStateFileExists(t *testing.T, db string) { - repoStateFile := filepath.Join(testDir, "dolt", db, ".dolt", "repo_state.json") +func (h *harness) assertRepoStateFileExists(db string) { + repoStateFile := filepath.Join(h.testDir, "dolt", db, ".dolt", "repo_state.json") _, err := os.Stat(repoStateFile) - require.NoError(t, err) + require.NoError(h.t, err) } // requireReplicaResults runs the specified |query| on the replica database and asserts that the results match // |expectedResults|. Note that the actual results are converted to string values in almost all cases, due to // limitations in the SQL library we use to query the replica database, so |expectedResults| should generally // be expressed in strings. -func requireReplicaResults(t *testing.T, query string, expectedResults [][]any) { - requireResults(t, replicaDatabase, query, expectedResults) +func (h *harness) requireReplicaResults(query string, expectedResults [][]any) { + requireResults(h.t, h.replicaDatabase, query, expectedResults) } // requireReplicaResults runs the specified |query| on the primary database and asserts that the results match // |expectedResults|. Note that the actual results are converted to string values in almost all cases, due to // limitations in the SQL library we use to query the replica database, so |expectedResults| should generally // be expressed in strings. -func requirePrimaryResults(t *testing.T, query string, expectedResults [][]any) { - requireResults(t, primaryDatabase, query, expectedResults) +func (h *harness) requirePrimaryResults(query string, expectedResults [][]any) { + requireResults(h.t, h.primaryDatabase, query, expectedResults) } func requireResults(t *testing.T, db *sqlx.DB, query string, expectedResults [][]any) { @@ -1196,11 +1213,11 @@ func requireResults(t *testing.T, db *sqlx.DB, query string, expectedResults [][ // queryReplicaStatus returns the results of `SHOW REPLICA STATUS` as a map, for the replica // database. If any errors are encountered, this function will fail the current test. -func queryReplicaStatus(t *testing.T) map[string]any { - rows, err := replicaDatabase.Queryx("SHOW REPLICA STATUS;") - require.NoError(t, err) - status := convertMapScanResultToStrings(readNextRow(t, rows)) - require.NoError(t, rows.Close()) +func (h *harness) queryReplicaStatus() map[string]any { + rows, err := h.replicaDatabase.Queryx("SHOW REPLICA STATUS;") + require.NoError(h.t, err) + status := convertMapScanResultToStrings(readNextRow(h.t, rows)) + require.NoError(h.t, rows.Close()) return status } @@ -1209,11 +1226,11 @@ func queryReplicaStatus(t *testing.T) map[string]any { // The queryReplicaStatus() function should generally be favored over this function for // getting the status of a replica. This function exists only to help test that the // deprecated 'show slave status' statement works. -func querySlaveStatus(t *testing.T) map[string]any { - rows, err := replicaDatabase.Queryx("SHOW SLAVE STATUS;") - require.NoError(t, err) - status := convertMapScanResultToStrings(readNextRow(t, rows)) - require.NoError(t, rows.Close()) +func (h *harness) querySlaveStatus() map[string]any { + rows, err := h.replicaDatabase.Queryx("SHOW SLAVE STATUS;") + require.NoError(h.t, err) + status := convertMapScanResultToStrings(readNextRow(h.t, rows)) + require.NoError(h.t, rows.Close()) return status } diff --git a/go/libraries/doltcore/sqle/binlogreplication/cmd_attributes_unix_test.go b/go/libraries/doltcore/sqle/binlogreplication/cmd_attributes_unix_test.go new file mode 100644 index 0000000000..f0284111a4 --- /dev/null +++ b/go/libraries/doltcore/sqle/binlogreplication/cmd_attributes_unix_test.go @@ -0,0 +1,72 @@ +// Copyright 2025 Dolthub, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd + +package binlogreplication + +import ( + "os" + "os/exec" + "os/signal" + "syscall" + "time" +) + +func ApplyCmdAttributes(cmd *exec.Cmd) { + // Nothing... +} + +func StopProcess(proc *os.Process) error { + err := proc.Signal(syscall.SIGTERM) + if err != nil { + return err + } + _, err = proc.Wait() + return err +} + +// These tests spawn child process for go compiling, dolt sql-server +// and for mysqld. We would like to clean up these child processes +// when the program exits. In general, we use *testing.T.Cleanup to +// terminate any running processes associated with the test. +// +// On a shell, when a user runs 'go test .', and then they deliver +// an interrupt, '^C', the shell delivers a SIGINT to the process +// group of the foreground process. In our case, `dolt`, `go`, and +// the default signal handler for the golang runtime (this test +// program) will all terminate the program on delivery of a SIGINT. +// `mysqld`, however, does not terminate on receiving SIGINT. Thus, +// we install a handler here, and we translate the Interrupt into +// a SIGTERM against the process group. That will get `mysqld` to +// shutdown as well. +func InstallSignalHandlers() { + interrupts := make(chan os.Signal, 1) + signal.Notify(interrupts, os.Interrupt) + go func() { + <-interrupts + // |mysqld| will exit on SIGTERM + syscall.Kill(-os.Getpid(), syscall.SIGTERM) + time.Sleep(1 * time.Second) + // Canceling this context will cause os.Process.Kill + // to get called on any still-running processes. + commandCtxCancel() + time.Sleep(1 * time.Second) + // Redeliver SIGINT to ourselves with the default + // signal handler restored. + signal.Reset(os.Interrupt) + syscall.Kill(-os.Getpid(), syscall.SIGINT) + }() +} diff --git a/go/libraries/doltcore/sqle/binlogreplication/cmd_attributes_windows_test.go b/go/libraries/doltcore/sqle/binlogreplication/cmd_attributes_windows_test.go new file mode 100644 index 0000000000..871c8252fc --- /dev/null +++ b/go/libraries/doltcore/sqle/binlogreplication/cmd_attributes_windows_test.go @@ -0,0 +1,50 @@ +// Copyright 2025 Dolthub, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows +// +build windows + +package binlogreplication + +import ( + "os" + "os/exec" + "syscall" + + "golang.org/x/sys/windows" +) + +func ApplyCmdAttributes(cmd *exec.Cmd) { + // Creating a new process group for the process will allow GracefulStop to send the break signal to that process + // without also killing the parent process + cmd.SysProcAttr = &syscall.SysProcAttr{ + CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, + } + +} + +func StopProcess(proc *os.Process) error { + err := windows.GenerateConsoleCtrlEvent(windows.CTRL_BREAK_EVENT, uint32(proc.Pid)) + if err != nil { + return err + } + _, err = proc.Wait() + return err +} + +// I don't know if there is any magic necessary here, but regardless, +// we don't run these tests on windows, so there are never child +// mysqld processes to worry about. +func InstallSignalHandlers() { +} diff --git a/go/libraries/doltcore/sqle/cluster/controller.go b/go/libraries/doltcore/sqle/cluster/controller.go index 3845a1e9fa..4be3f36b34 100644 --- a/go/libraries/doltcore/sqle/cluster/controller.go +++ b/go/libraries/doltcore/sqle/cluster/controller.go @@ -688,9 +688,14 @@ func (c *Controller) RemoteSrvServerArgs(ctxFactory func(context.Context) (*sql. listenaddr := c.RemoteSrvListenAddr() args.HttpListenAddr = listenaddr args.GrpcListenAddr = listenaddr - args.Options = c.ServerOptions() + ctxInterceptor := sqle.SqlContextServerInterceptor{ + Factory: ctxFactory, + } + args.Options = append(args.Options, ctxInterceptor.Options()...) + args.Options = append(args.Options, c.ServerOptions()...) + args.HttpInterceptor = ctxInterceptor.HTTP(args.HttpInterceptor) var err error - args.FS, args.DBCache, err = sqle.RemoteSrvFSAndDBCache(ctxFactory, sqle.CreateUnknownDatabases) + args.DBCache, err = sqle.RemoteSrvDBCache(sqle.GetInterceptorSqlContext, sqle.CreateUnknownDatabases) if err != nil { return remotesrv.ServerArgs{}, err } @@ -699,7 +704,7 @@ func (c *Controller) RemoteSrvServerArgs(ctxFactory func(context.Context) (*sql. keyID := creds.PubKeyToKID(c.pub) keyIDStr := creds.B32CredsEncoding.EncodeToString(keyID) - args.HttpInterceptor = JWKSHandlerInterceptor(keyIDStr, c.pub) + args.HttpInterceptor = JWKSHandlerInterceptor(args.HttpInterceptor, keyIDStr, c.pub) return args, nil } diff --git a/go/libraries/doltcore/sqle/cluster/jwks.go b/go/libraries/doltcore/sqle/cluster/jwks.go index 1e3c357c1c..36511ae62b 100644 --- a/go/libraries/doltcore/sqle/cluster/jwks.go +++ b/go/libraries/doltcore/sqle/cluster/jwks.go @@ -46,16 +46,21 @@ func (h JWKSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Write(b) } -func JWKSHandlerInterceptor(keyID string, pub ed25519.PublicKey) func(http.Handler) http.Handler { +func JWKSHandlerInterceptor(existing func(http.Handler) http.Handler, keyID string, pub ed25519.PublicKey) func(http.Handler) http.Handler { jh := JWKSHandler{KeyID: keyID, PublicKey: pub} return func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + this := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.EscapedPath() == "/.well-known/jwks.json" { jh.ServeHTTP(w, r) return } h.ServeHTTP(w, r) }) + if existing != nil { + return existing(this) + } else { + return this + } } } diff --git a/go/libraries/doltcore/sqle/database_provider.go b/go/libraries/doltcore/sqle/database_provider.go index 56e0eee55e..3b130add6b 100644 --- a/go/libraries/doltcore/sqle/database_provider.go +++ b/go/libraries/doltcore/sqle/database_provider.go @@ -688,7 +688,8 @@ func (p *DoltDatabaseProvider) CloneDatabaseFromRemote( if exists { deleteErr := p.fs.Delete(dbName, true) if deleteErr != nil { - err = fmt.Errorf("%s: unable to clean up failed clone in directory '%s'", err.Error(), dbName) + err = fmt.Errorf("%s: unable to clean up failed clone in directory '%s': %s", + err.Error(), dbName, deleteErr.Error()) } } return err diff --git a/go/libraries/doltcore/sqle/remotesrv.go b/go/libraries/doltcore/sqle/remotesrv.go index 87e6164764..3d63cb68e7 100644 --- a/go/libraries/doltcore/sqle/remotesrv.go +++ b/go/libraries/doltcore/sqle/remotesrv.go @@ -16,13 +16,15 @@ package sqle import ( "context" + "errors" + "net/http" "github.com/dolthub/go-mysql-server/sql" + "google.golang.org/grpc" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/remotesrv" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess" - "github.com/dolthub/dolt/go/libraries/utils/filesys" "github.com/dolthub/dolt/go/store/datas" ) @@ -81,17 +83,12 @@ type CreateUnknownDatabasesSetting bool const CreateUnknownDatabases CreateUnknownDatabasesSetting = true const DoNotCreateUnknownDatabases CreateUnknownDatabasesSetting = false -// Considers |args| and returns a new |remotesrv.ServerArgs| instance which -// will serve databases accessible through |ctxFactory|. -func RemoteSrvFSAndDBCache(ctxFactory func(context.Context) (*sql.Context, error), createSetting CreateUnknownDatabasesSetting) (filesys.Filesys, remotesrv.DBCache, error) { - sqlCtx, err := ctxFactory(context.Background()) - if err != nil { - return nil, nil, err - } - sess := dsess.DSessFromSess(sqlCtx.Session) - fs := sess.Provider().FileSystem() +// Returns a remotesrv.DBCache instance which will use the *sql.Context +// returned from |ctxFactory| to access a database in the session +// DatabaseProvider. +func RemoteSrvDBCache(ctxFactory func(context.Context) (*sql.Context, error), createSetting CreateUnknownDatabasesSetting) (remotesrv.DBCache, error) { dbcache := remotesrvStore{ctxFactory, bool(createSetting)} - return fs, dbcache, nil + return dbcache, nil } func WithUserPasswordAuth(args remotesrv.ServerArgs, authnz remotesrv.AccessControl) remotesrv.ServerArgs { @@ -102,3 +99,88 @@ func WithUserPasswordAuth(args remotesrv.ServerArgs, authnz remotesrv.AccessCont args.Options = append(args.Options, si.Options()...) return args } + +type SqlContextServerInterceptor struct { + Factory func(context.Context) (*sql.Context, error) +} + +type serverStreamWrapper struct { + grpc.ServerStream + ctx context.Context +} + +func (s serverStreamWrapper) Context() context.Context { + return s.ctx +} + +type sqlContextInterceptorKey struct{} + +func GetInterceptorSqlContext(ctx context.Context) (*sql.Context, error) { + if v := ctx.Value(sqlContextInterceptorKey{}); v != nil { + return v.(*sql.Context), nil + } + return nil, errors.New("misconfiguration; a sql.Context should always be available from the interceptor chain.") +} + +func (si SqlContextServerInterceptor) Stream() grpc.StreamServerInterceptor { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + sqlCtx, err := si.Factory(ss.Context()) + if err != nil { + return err + } + sql.SessionCommandBegin(sqlCtx.Session) + defer sql.SessionCommandEnd(sqlCtx.Session) + defer sql.SessionEnd(sqlCtx.Session) + newCtx := context.WithValue(ss.Context(), sqlContextInterceptorKey{}, sqlCtx) + newSs := serverStreamWrapper{ + ServerStream: ss, + ctx: newCtx, + } + return handler(srv, newSs) + } +} + +func (si SqlContextServerInterceptor) Unary() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + sqlCtx, err := si.Factory(ctx) + if err != nil { + return nil, err + } + sql.SessionCommandBegin(sqlCtx.Session) + defer sql.SessionCommandEnd(sqlCtx.Session) + defer sql.SessionEnd(sqlCtx.Session) + newCtx := context.WithValue(ctx, sqlContextInterceptorKey{}, sqlCtx) + return handler(newCtx, req) + } +} + +func (si SqlContextServerInterceptor) HTTP(existing func(http.Handler) http.Handler) func(http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + this := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + sqlCtx, err := si.Factory(ctx) + if err != nil { + http.Error(w, "could not initialize sql.Context", http.StatusInternalServerError) + return + } + sql.SessionCommandBegin(sqlCtx.Session) + defer sql.SessionCommandEnd(sqlCtx.Session) + defer sql.SessionEnd(sqlCtx.Session) + newCtx := context.WithValue(ctx, sqlContextInterceptorKey{}, sqlCtx) + newReq := r.WithContext(newCtx) + h.ServeHTTP(w, newReq) + }) + if existing != nil { + return existing(this) + } else { + return this + } + } +} + +func (si SqlContextServerInterceptor) Options() []grpc.ServerOption { + return []grpc.ServerOption{ + grpc.ChainUnaryInterceptor(si.Unary()), + grpc.ChainStreamInterceptor(si.Stream()), + } +} diff --git a/go/store/types/serial_message.go b/go/store/types/serial_message.go index 50ee98c326..e5c4060f14 100644 --- a/go/store/types/serial_message.go +++ b/go/store/types/serial_message.go @@ -769,10 +769,10 @@ func (sm SerialMessage) WalkAddrs(nbf *NomsBinFormat, cb func(addr hash.Hash) er return err } } - case serial.TableSchemaFileID, serial.ForeignKeyCollectionFileID: + case serial.TableSchemaFileID, serial.ForeignKeyCollectionFileID, serial.TupleFileID: // no further references from these file types return nil - case serial.ProllyTreeNodeFileID, serial.AddressMapFileID, serial.MergeArtifactsFileID, serial.BlobFileID, serial.CommitClosureFileID: + case serial.ProllyTreeNodeFileID, serial.AddressMapFileID, serial.MergeArtifactsFileID, serial.BlobFileID, serial.CommitClosureFileID, serial.VectorIndexNodeFileID: return message.WalkAddresses(context.TODO(), serial.Message(sm), func(ctx context.Context, addr hash.Hash) error { return cb(addr) }) diff --git a/integration-tests/bats/vector-index.bats b/integration-tests/bats/vector-index.bats index 5693b7d862..8d233f6088 100644 --- a/integration-tests/bats/vector-index.bats +++ b/integration-tests/bats/vector-index.bats @@ -430,3 +430,14 @@ SQL [[ "$output" =~ "pk1" ]] || false [[ "${#lines[@]}" = "1" ]] || false } + +@test "vector-index: can GC" { + dolt sql <