integration-tests/go-sql-server-driver: Convert some more sql-server-cluster.bats tests.

This commit is contained in:
Aaron Son
2022-10-03 17:23:53 -07:00
parent 0f39022a7a
commit fa7db054ad
4 changed files with 327 additions and 608 deletions
@@ -55,6 +55,13 @@ type Connection struct {
On string `yaml:"on"`
Queries []Query `yaml:"queries"`
RestartServer *RestartArgs `yaml:"restart_server"`
// Rarely needed, allows the entire connection assertion to be retried
// on an assertion failure. Use this is only for idempotent connection
// interactions and only if the sql-server is prone to tear down the
// connection based on things that are happening, such as cluster role
// transitions.
RetryAttempts int `yaml:"retry_attempts"`
}
// |RestartArgs| are possible arguments, to change the arguments which are
@@ -296,19 +303,35 @@ func (test Test) Run(t *testing.T) {
for i, c := range test.Conns {
server := servers[c.On]
require.NotNilf(t, server, "error in test spec: could not find server %s for connection %d", c.On, i)
func() {
db, err := server.DB()
require.NoError(t, err)
defer db.Close()
if c.RetryAttempts > 1 {
RetryTestRun(t, c.RetryAttempts, func(t require.TestingT) {
db, err := server.DB()
require.NoError(t, err)
defer db.Close()
conn, err := db.Conn(context.Background())
require.NoError(t, err)
defer conn.Close()
conn, err := db.Conn(context.Background())
require.NoError(t, err)
defer conn.Close()
for _, q := range c.Queries {
RunQuery(t, conn, q)
}
}()
for _, q := range c.Queries {
RunQueryAttempt(t, conn, q)
}
})
} else {
func() {
db, err := server.DB()
require.NoError(t, err)
defer db.Close()
conn, err := db.Conn(context.Background())
require.NoError(t, err)
defer conn.Close()
for _, q := range c.Queries {
RunQuery(t, conn, q)
}
}()
}
if c.RestartServer != nil {
err := server.Restart(c.RestartServer.Args)
require.NoError(t, err)
@@ -343,17 +366,14 @@ func (r *retryTestingT) FailNow() {
panic(r)
}
func RetryTestRun(t *testing.T, attempts int, test func(require.TestingT)) {
if attempts == 0 {
attempts = 1
}
var rtt *retryTestingT
func (r *retryTestingT) try(attempts int, test func(require.TestingT)) {
for i := 0; i < attempts; i++ {
r.errorfStrings = nil
r.errorfArgs = nil
r.failNow = false
if i != 0 {
time.Sleep(RetrySleepDuration)
}
rtt = new(retryTestingT)
rtt.T = t
func() {
defer func() {
if r := recover(); r != nil {
@@ -363,20 +383,28 @@ func RetryTestRun(t *testing.T, attempts int, test func(require.TestingT)) {
}
}
}()
test(rtt)
test(r)
}()
if !rtt.failNow && len(rtt.errorfStrings) == 0 {
if !r.failNow && len(r.errorfStrings) == 0 {
return
}
}
for i := range rtt.errorfStrings {
t.Errorf(rtt.errorfStrings[i], rtt.errorfArgs[i]...)
for i := range r.errorfStrings {
r.T.Errorf(r.errorfStrings[i], r.errorfArgs[i]...)
}
if rtt.failNow {
t.FailNow()
if r.failNow {
r.T.FailNow()
}
}
func RetryTestRun(t *testing.T, attempts int, test func(require.TestingT)) {
if attempts == 0 {
attempts = 1
}
rtt := &retryTestingT{T: t}
rtt.try(attempts, test)
}
func RunQuery(t *testing.T, conn *sql.Conn, q Query) {
RetryTestRun(t, q.RetryAttempts, func(t require.TestingT) {
RunQueryAttempt(t, conn, q)
@@ -119,10 +119,6 @@ tests:
result:
columns: ["status"]
rows: [["0"]]
- query: "select 2 from dual"
result:
columns: ["2"]
rows: [["2"]]
- query: "select @@GLOBAL.dolt_cluster_role, @@GLOBAL.dolt_cluster_role_epoch"
result:
columns: ["@@GLOBAL.dolt_cluster_role","@@GLOBAL.dolt_cluster_role_epoch"]
@@ -133,7 +129,7 @@ tests:
rows: [["0"]]
# Connection should be broken now.
- query: "select 2 from dual"
error_match: "no longer be used"
error_match: "this connection no longer be used"
- on: server1
queries:
- query: "select @@GLOBAL.dolt_cluster_role, @@GLOBAL.dolt_cluster_role_epoch"
@@ -300,3 +296,277 @@ tests:
result:
columns: ["count(*)"]
rows: [["5"]]
- name: booted standby server is read only
multi_repos:
- name: server1
repos:
- name: repo1
with_remotes:
- name: standby
url: http://localhost:50052/repo1
- name: repo2
with_remotes:
- name: standby
url: http://localhost:50052/repo2
with_files:
- name: standby_server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50052/{database}
bootstrap_role: standby
bootstrap_epoch: 10
remotesapi:
port: 50051
server:
args: ["--config", "standby_server.yaml"]
port: 3309
connections:
- on: server1
queries:
- exec: "use repo1"
- exec: "create table vals (i int primary key)"
error_match: "repo1 is read-only"
- name: booted primary server is read write
multi_repos:
- name: server1
repos:
- name: repo1
with_remotes:
- name: standby
url: http://localhost:50052/repo1
- name: repo2
with_remotes:
- name: standby
url: http://localhost:50052/repo2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50052/{database}
bootstrap_role: primary
bootstrap_epoch: 10
remotesapi:
port: 50051
server:
args: ["--config", "server.yaml"]
port: 3309
connections:
- on: server1
queries:
- exec: "use repo1"
- exec: "create table vals (i int primary key)"
- name: standby transitioned to primary becomes read write
multi_repos:
- name: server1
repos:
- name: repo1
with_remotes:
- name: standby
url: http://localhost:50052/repo1
- name: repo2
with_remotes:
- name: standby
url: http://localhost:50052/repo2
with_files:
- name: standby_server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50052/{database}
bootstrap_role: standby
bootstrap_epoch: 10
remotesapi:
port: 50051
server:
args: ["--config", "standby_server.yaml"]
port: 3309
connections:
- on: server1
queries:
- exec: "use repo1"
- exec: "create table vals (i int primary key)"
error_match: "repo1 is read-only"
- query: "call dolt_assume_cluster_role('primary', 11)"
result:
columns: ["status"]
rows: [["0"]]
- on: server1
queries:
- exec: "use repo1"
- exec: "create table vals (i int primary key)"
- query: "select count(*) from vals"
result:
columns: ["count(*)"]
rows: [["0"]]
- name: primary transitioned to standby becomes read only
multi_repos:
- name: server1
repos:
- name: repo1
with_remotes:
- name: standby
url: http://localhost:50052/repo1
- name: repo2
with_remotes:
- name: standby
url: http://localhost:50052/repo2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50052/{database}
bootstrap_role: primary
bootstrap_epoch: 10
remotesapi:
port: 50051
server:
args: ["--config", "server.yaml"]
port: 3309
- name: server2
repos:
- name: repo1
with_remotes:
- name: standby
url: http://localhost:50051/repo1
- name: repo2
with_remotes:
- name: standby
url: http://localhost:50051/repo2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3310
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: standby
bootstrap_epoch: 10
remotesapi:
port: 50052
server:
args: ["--config", "server.yaml"]
port: 3310
connections:
- on: server1
queries:
- exec: "use repo1"
- exec: "create table vals (i int primary key)"
- exec: "insert into vals values (1),(2),(3),(4),(5)"
- query: "call dolt_assume_cluster_role('standby', 11)"
result:
columns: ["status"]
rows: [["0"]]
- on: server1
queries:
- exec: "use repo1"
- exec: "insert into vals values (6),(7),(8),(9),(10)"
error_match: "repo1 is read-only"
- query: "select count(*) from vals"
result:
columns: ["count(*)"]
rows: [["5"]]
- name: misconfigured cluster with primaries at same epoch, both transition to detected_broken_config
multi_repos:
- name: server1
repos:
- name: repo1
with_remotes:
- name: standby
url: http://localhost:50052/repo1
- name: repo2
with_remotes:
- name: standby
url: http://localhost:50052/repo2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3309
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50052/{database}
bootstrap_role: primary
bootstrap_epoch: 10
remotesapi:
port: 50051
server:
args: ["--config", "server.yaml"]
port: 3309
- name: server2
repos:
- name: repo1
with_remotes:
- name: standby
url: http://localhost:50051/repo1
- name: repo2
with_remotes:
- name: standby
url: http://localhost:50051/repo2
with_files:
- name: server.yaml
contents: |
log_level: trace
listener:
host: 0.0.0.0
port: 3310
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 10
remotesapi:
port: 50052
server:
args: ["--config", "server.yaml"]
port: 3310
connections:
- on: server1
retry_attempts: 10
queries:
- query: "SELECT @@GLOBAL.dolt_cluster_role,@@GLOBAL.dolt_cluster_role_epoch"
result:
columns: ["@@GLOBAL.dolt_cluster_role", "@@GLOBAL.dolt_cluster_role_epoch"]
rows: [["detected_broken_config", "10"]]
- exec: "use repo1"
- exec: "create table more_vals (i int primary key)"
error_match: "repo1 is read-only"
- on: server2
queries:
- query: "SELECT @@GLOBAL.dolt_cluster_role,@@GLOBAL.dolt_cluster_role_epoch"
result:
columns: ["@@GLOBAL.dolt_cluster_role", "@@GLOBAL.dolt_cluster_role_epoch"]
rows: [["detected_broken_config", "10"]]
- exec: "use repo1"
- exec: "create table more_vals (i int primary key)"
error_match: "repo1 is read-only"