Merge main

This commit is contained in:
Zach Musgrave
2023-11-01 15:58:09 -07:00
61 changed files with 657 additions and 1182 deletions

View File

@@ -19,7 +19,7 @@ issueNumber="$9"
initBigRepo="${10}"
nomsBinFormat="${11}"
withTpcc="${12}"
precision="1"
precision="3"
tpccRegex="tpcc%"
if [ -n "$initBigRepo" ]; then
@@ -35,16 +35,16 @@ if [ -n "$withTpcc" ]; then
fi
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan', 'index_join_scan', 'types_table_scan', 'index_join')"
medianLatencyMultiplierReadsQuery="select f.test_name as read_tests, f.server_name, f.server_version, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, t.server_name, t.server_version, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) < 1.0 then 1.0 else ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) end as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
meanMultiplierReadsQuery="select round(avg(multipliers), $precision) as reads_mean_multiplier from (select case when (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) < 1.0 then 1.0 else (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) end as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name)"
medianLatencyMultiplierReadsQuery="select f.test_name as read_tests, f.server_name, f.server_version, avg(f.latency_percentile) as from_latency_median, t.server_name, t.server_version, avg(t.latency_percentile) as to_latency_median, ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
meanMultiplierReadsQuery="select round(avg(multipliers), $precision) as reads_mean_multiplier from (select (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name)"
writeTests="('oltp_read_write', 'oltp_update_index', 'oltp_update_non_index', 'oltp_insert', 'bulk_insert', 'oltp_write_only', 'oltp_delete_insert', 'types_delete_insert')"
medianLatencyMultiplierWritesQuery="select f.test_name as write_tests, f.server_name, f.server_version, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, t.server_name, t.server_version, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) < 1.0 then 1.0 else ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) end as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $writeTests group by f.test_name;"
meanMultiplierWritesQuery="select round(avg(multipliers), $precision) as writes_mean_multiplier from (select case when (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) < 1.0 then 1.0 else (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) end as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $writeTests group by f.test_name)"
medianLatencyMultiplierWritesQuery="select f.test_name as write_tests, f.server_name, f.server_version, avg(f.latency_percentile) as from_latency_median, t.server_name, t.server_version, avg(t.latency_percentile) as to_latency_median, ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $writeTests group by f.test_name;"
meanMultiplierWritesQuery="select round(avg(multipliers), $precision) as writes_mean_multiplier from (select (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $writeTests group by f.test_name)"
meanMultiplierOverallQuery="select round(avg(multipliers), $precision) as overall_mean_multiplier from (select case when (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) < 1.0 then 1.0 else (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) end as multipliers from from_results as f join to_results as t on f.test_name = t.test_name group by f.test_name)"
meanMultiplierOverallQuery="select round(avg(multipliers), $precision) as overall_mean_multiplier from (select (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) as multipliers from from_results as f join to_results as t on f.test_name = t.test_name group by f.test_name)"
tpccLatencyQuery="select f.test_name as test_name, f.server_name, f.server_version, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, t.server_name, t.server_version, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) < 1.0 then 1.0 else ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) end as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name LIKE '$tpccRegex' group by f.test_name;"
tpccLatencyQuery="select f.test_name as test_name, f.server_name, f.server_version, avg(f.latency_percentile) as from_latency_median, t.server_name, t.server_version, avg(t.latency_percentile) as to_latency_median, ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name LIKE '$tpccRegex' group by f.test_name;"
tpccTpsQuery="select f.test_name as test_name, f.server_name, f.server_version, avg(f.sql_transactions_per_second) as tps, t.test_name as test_name, t.server_name, t.server_version, avg(t.sql_transactions_per_second) as tps from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name LIKE 'tpcc%' group by f.test_name;"
echo '

View File

@@ -33,7 +33,7 @@ const (
"---\n" +
"title: CLI\n" +
"---\n\n" +
"# CLI\n\n"
"# Command Line Interface Reference\n\n"
)
type DumpDocsCmd struct {

View File

@@ -64,7 +64,7 @@ import (
)
const (
Version = "1.21.4"
Version = "1.22.0"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}

View File

@@ -3,14 +3,14 @@ module github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi
go 1.19
require (
google.golang.org/grpc v1.55.0
google.golang.org/grpc v1.56.3
google.golang.org/protobuf v1.30.0
)
require (
github.com/golang/protobuf v1.5.3 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
)

View File

@@ -3,17 +3,17 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=

View File

@@ -40,12 +40,12 @@ require (
github.com/tealeg/xlsx v1.0.5
github.com/tklauser/go-sysconf v0.3.9 // indirect
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.13.0
golang.org/x/net v0.15.0
golang.org/x/crypto v0.14.0
golang.org/x/net v0.17.0
golang.org/x/sync v0.3.0
golang.org/x/sys v0.12.0
golang.org/x/sys v0.13.0
google.golang.org/api v0.126.0
google.golang.org/grpc v1.57.0
google.golang.org/grpc v1.57.1
google.golang.org/protobuf v1.31.0
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/src-d/go-errors.v1 v1.0.0
@@ -59,7 +59,7 @@ require (
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
github.com/dolthub/go-mysql-server v0.17.1-0.20231101214538-17133d65440d
github.com/dolthub/go-mysql-server v0.17.1-0.20231101171732-15bf29b50c09
github.com/dolthub/swiss v0.1.0
github.com/goccy/go-json v0.10.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
@@ -147,7 +147,7 @@ require (
golang.org/x/image v0.5.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/term v0.12.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/time v0.1.0 // indirect
golang.org/x/tools v0.13.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect

View File

@@ -181,8 +181,8 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e h1:kPsT4a47cw1+y/N5SSCkma7FhAPw7KeGmD6c9PBZW9Y=
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e/go.mod h1:KPUcpx070QOfJK1gNe0zx4pA5sicIK1GMikIGLKC168=
github.com/dolthub/go-mysql-server v0.17.1-0.20231101214538-17133d65440d h1:d2p9YUIJQwZuG3EmDemjgXSRHCFJBETKjxfM93Y/ooQ=
github.com/dolthub/go-mysql-server v0.17.1-0.20231101214538-17133d65440d/go.mod h1:YKNZpEARxfNl5LUyIZ8oHEoMDM7DzjnPlhl9cj89QBg=
github.com/dolthub/go-mysql-server v0.17.1-0.20231101171732-15bf29b50c09 h1:dwusb2oFbrVhTVofUZ8BvLLFEP798IbsofhRXW4QMWo=
github.com/dolthub/go-mysql-server v0.17.1-0.20231101171732-15bf29b50c09/go.mod h1:YKNZpEARxfNl5LUyIZ8oHEoMDM7DzjnPlhl9cj89QBg=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
github.com/dolthub/jsonpath v0.0.2-0.20230525180605-8dc13778fd72 h1:NfWmngMi1CYUWU4Ix8wM+USEhjc+mhPlT9JUR/anvbQ=
@@ -756,8 +756,8 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -849,8 +849,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -936,13 +936,13 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1114,8 +1114,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg=
google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@@ -270,16 +270,15 @@ func matchTableDeltas(fromDeltas, toDeltas []TableDelta) (deltas []TableDelta) {
for _, name := range matchedNames {
t := to[name]
f := from[name]
if schemasOverlap(t.ToSch, f.FromSch) {
matched := match(t, f)
deltas = append(deltas, matched)
delete(from, f.FromName)
delete(to, t.ToName)
}
matched := match(t, f)
deltas = append(deltas, matched)
delete(from, f.FromName)
delete(to, t.ToName)
}
for _, f := range from {
for _, t := range to {
// check for overlapping schemas to try and match tables when names don't match
if schemasOverlap(f.FromSch, t.ToSch) {
matched := match(t, f)
deltas = append(deltas, matched)

View File

@@ -61,7 +61,11 @@ func (c *Connection) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c Connection) Password() (string, error) {
if c.PassFile != "" {
bs, err := os.ReadFile(c.PassFile)
passFile := c.PassFile
if v := os.Getenv("TESTGENDIR"); v != "" {
passFile = strings.ReplaceAll(passFile, "$TESTGENDIR", v)
}
bs, err := os.ReadFile(passFile)
if err != nil {
return "", err
}
@@ -135,7 +139,11 @@ func (f WithFile) WriteAtDir(dir string) error {
return err
}
if f.SourcePath != "" {
source, err := os.Open(f.SourcePath)
sourcePath := f.SourcePath
if v := os.Getenv("TESTGENDIR"); v != "" {
sourcePath = strings.ReplaceAll(sourcePath, "$TESTGENDIR", v)
}
source, err := os.Open(sourcePath)
if err != nil {
return err
}

View File

@@ -21,27 +21,27 @@ import (
var DoltProcedures = []sql.ExternalStoredProcedureDetails{
{Name: "dolt_add", Schema: int64Schema("status"), Function: doltAdd},
{Name: "dolt_backup", Schema: int64Schema("status"), Function: doltBackup, ReadOnly: true},
{Name: "dolt_backup", Schema: int64Schema("status"), Function: doltBackup, ReadOnly: true, AdminOnly: true},
{Name: "dolt_branch", Schema: int64Schema("status"), Function: doltBranch},
{Name: "dolt_checkout", Schema: doltCheckoutSchema, Function: doltCheckout, ReadOnly: true},
{Name: "dolt_cherry_pick", Schema: cherryPickSchema, Function: doltCherryPick},
{Name: "dolt_clean", Schema: int64Schema("status"), Function: doltClean},
{Name: "dolt_clone", Schema: int64Schema("status"), Function: doltClone},
{Name: "dolt_clone", Schema: int64Schema("status"), Function: doltClone, AdminOnly: true},
{Name: "dolt_commit", Schema: stringSchema("hash"), Function: doltCommit},
{Name: "dolt_commit_hash_out", Schema: stringSchema("hash"), Function: doltCommitHashOut},
{Name: "dolt_conflicts_resolve", Schema: int64Schema("status"), Function: doltConflictsResolve},
{Name: "dolt_count_commits", Schema: int64Schema("ahead", "behind"), Function: doltCountCommits, ReadOnly: true},
{Name: "dolt_fetch", Schema: int64Schema("status"), Function: doltFetch},
{Name: "dolt_undrop", Schema: int64Schema("status"), Function: doltUndrop},
{Name: "dolt_purge_dropped_databases", Schema: int64Schema("status"), Function: doltPurgeDroppedDatabases},
{Name: "dolt_fetch", Schema: int64Schema("status"), Function: doltFetch, AdminOnly: true},
{Name: "dolt_undrop", Schema: int64Schema("status"), Function: doltUndrop, AdminOnly: true},
{Name: "dolt_purge_dropped_databases", Schema: int64Schema("status"), Function: doltPurgeDroppedDatabases, AdminOnly: true},
// dolt_gc is enabled behind a feature flag for now, see dolt_gc.go
{Name: "dolt_gc", Schema: int64Schema("status"), Function: doltGC, ReadOnly: true},
{Name: "dolt_gc", Schema: int64Schema("status"), Function: doltGC, ReadOnly: true, AdminOnly: true},
{Name: "dolt_merge", Schema: doltMergeSchema, Function: doltMerge},
{Name: "dolt_pull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull},
{Name: "dolt_push", Schema: doltPushSchema, Function: doltPush},
{Name: "dolt_remote", Schema: int64Schema("status"), Function: doltRemote},
{Name: "dolt_pull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull, AdminOnly: true},
{Name: "dolt_push", Schema: doltPushSchema, Function: doltPush, AdminOnly: true},
{Name: "dolt_remote", Schema: int64Schema("status"), Function: doltRemote, AdminOnly: true},
{Name: "dolt_reset", Schema: int64Schema("status"), Function: doltReset},
{Name: "dolt_revert", Schema: int64Schema("status"), Function: doltRevert},
{Name: "dolt_tag", Schema: int64Schema("status"), Function: doltTag},
@@ -56,14 +56,14 @@ var DoltProcedures = []sql.ExternalStoredProcedureDetails{
{Name: "dclean", Schema: int64Schema("status"), Function: doltClean},
{Name: "dclone", Schema: int64Schema("status"), Function: doltClone},
{Name: "dcommit", Schema: stringSchema("hash"), Function: doltCommit},
{Name: "dfetch", Schema: int64Schema("status"), Function: doltFetch},
{Name: "dfetch", Schema: int64Schema("status"), Function: doltFetch, AdminOnly: true},
// {Name: "dgc", Schema: int64Schema("status"), Function: doltGC},
{Name: "dmerge", Schema: doltMergeSchema, Function: doltMerge},
{Name: "dpull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull},
{Name: "dpush", Schema: doltPushSchema, Function: doltPush},
{Name: "dremote", Schema: int64Schema("status"), Function: doltRemote},
{Name: "dpull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull, AdminOnly: true},
{Name: "dpush", Schema: doltPushSchema, Function: doltPush, AdminOnly: true},
{Name: "dremote", Schema: int64Schema("status"), Function: doltRemote, AdminOnly: true},
{Name: "dreset", Schema: int64Schema("status"), Function: doltReset},
{Name: "drevert", Schema: int64Schema("status"), Function: doltRevert},
{Name: "dtag", Schema: int64Schema("status"), Function: doltTag},

View File

@@ -1100,7 +1100,7 @@ func (d *DoltSession) setForeignKeyChecksSessionVar(ctx *sql.Context, key string
}
}
} else {
return fmt.Errorf("variable 'foreign_key_checks' can't be set to the value of '%d'", intVal)
return sql.ErrInvalidSystemVariableValue.New("foreign_key_checks", intVal)
}
return d.Session.SetSessionVariable(ctx, key, value)

View File

@@ -806,6 +806,7 @@ func TestJSONTableScriptsPrepared(t *testing.T) {
func TestUserPrivileges(t *testing.T) {
h := newDoltHarness(t)
h.setupTestProcedures = true
defer h.Close()
enginetest.TestUserPrivileges(t, h)
}

View File

@@ -24,6 +24,7 @@ import (
gms "github.com/dolthub/go-mysql-server"
"github.com/dolthub/go-mysql-server/enginetest"
"github.com/dolthub/go-mysql-server/enginetest/scriptgen/setup"
"github.com/dolthub/go-mysql-server/memory"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
"github.com/dolthub/go-mysql-server/sql/rowexec"
@@ -40,18 +41,19 @@ import (
)
type DoltHarness struct {
t *testing.T
provider dsess.DoltDatabaseProvider
multiRepoEnv *env.MultiRepoEnv
session *dsess.DoltSession
branchControl *branch_control.Controller
parallelism int
skippedQueries []string
setupData []setup.SetupScript
resetData []setup.SetupScript
engine *gms.Engine
skipSetupCommit bool
useLocalFilesystem bool
t *testing.T
provider dsess.DoltDatabaseProvider
multiRepoEnv *env.MultiRepoEnv
session *dsess.DoltSession
branchControl *branch_control.Controller
parallelism int
skippedQueries []string
setupData []setup.SetupScript
resetData []setup.SetupScript
engine *gms.Engine
skipSetupCommit bool
useLocalFilesystem bool
setupTestProcedures bool
}
var _ enginetest.Harness = (*DoltHarness)(nil)
@@ -180,6 +182,9 @@ func (d *DoltHarness) NewEngine(t *testing.T) (enginetest.QueryEngine, error) {
d.branchControl = branch_control.CreateDefaultController(context.Background())
pro := d.newProvider()
if d.setupTestProcedures {
pro = d.newProviderWithProcedures()
}
doltProvider, ok := pro.(*sqle.DoltDatabaseProvider)
require.True(t, ok)
d.provider = doltProvider
@@ -422,9 +427,20 @@ func (d *DoltHarness) newProvider() sql.MutableDatabaseProvider {
b := env.GetDefaultInitBranch(d.multiRepoEnv.Config())
pro, err := sqle.NewDoltDatabaseProvider(b, d.multiRepoEnv.FileSystem())
require.NoError(d.t, err)
return pro
}
func (d *DoltHarness) newProviderWithProcedures() sql.MutableDatabaseProvider {
pro := d.newProvider()
provider, ok := pro.(*sqle.DoltDatabaseProvider)
require.True(d.t, ok)
for _, esp := range memory.ExternalStoredProcedures {
provider.Register(esp)
}
return provider
}
func (d *DoltHarness) newTable(db sql.Database, name string, schema sql.PrimaryKeySchema) (sql.Table, error) {
tc := db.(sql.TableCreator)

View File

@@ -1962,6 +1962,13 @@ var HistorySystemTableScriptTests = []queries.ScriptTest{
Query: "select pk, c2 from dolt_history_t where commit_hash=@Commit2 order by pk;",
Expected: []sql.Row{{1, 3}, {4, 6}},
},
{
// When filtering on a column from the original table, we use the primary index here, but because
// column tags have changed in previous versions of the table, the index tags don't match up completely.
// https://github.com/dolthub/dolt/issues/6891
Query: "select pk, c1, c2 from dolt_history_t where pk=4;",
Expected: []sql.Row{{4, 5, 6}},
},
},
},
{
@@ -3080,7 +3087,7 @@ var DoltBranchScripts = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "show tables",
Expected: []sql.Row{{"a"}, {"myview"}},
Expected: []sql.Row{{"a"}},
},
{
Query: "CALL DOLT_CHECKOUT('-b', 'newBranch', 'head~1')",
@@ -3088,7 +3095,7 @@ var DoltBranchScripts = []queries.ScriptTest{
},
{
Query: "show tables",
Expected: []sql.Row{{"myview"}},
Expected: []sql.Row{},
},
{
Query: "CALL DOLT_CHECKOUT('-b', 'newBranch2', @commit1)",
@@ -3096,7 +3103,7 @@ var DoltBranchScripts = []queries.ScriptTest{
},
{
Query: "show tables",
Expected: []sql.Row{{"a"}, {"myview"}},
Expected: []sql.Row{{"a"}},
},
{
Query: "CALL DOLT_CHECKOUT('-b', 'otherBranch', 'unknownCommit')",
@@ -4974,7 +4981,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SHOW TABLES;",
Expected: []sql.Row{{"myview"}},
Expected: []sql.Row{},
},
{
Query: "call dolt_cherry_pick(@commit1);",
@@ -4987,7 +4994,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "SHOW TABLES;",
Expected: []sql.Row{{"myview"}, {"table_a"}},
Expected: []sql.Row{{"table_a"}},
},
{
Query: "SELECT * FROM table_a;",
@@ -5010,7 +5017,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SHOW TABLES;",
Expected: []sql.Row{{"myview"}, {"dropme"}},
Expected: []sql.Row{{"dropme"}},
},
{
Query: "call dolt_cherry_pick(@commit1);",
@@ -5018,7 +5025,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "SHOW TABLES;",
Expected: []sql.Row{{"myview"}},
Expected: []sql.Row{},
},
},
},
@@ -6321,7 +6328,6 @@ var DoltSystemVariables = []queries.ScriptTest{
{"dolt_remote_branches"},
{"dolt_remotes"},
{"dolt_status"},
{"myview"},
{"test"},
},
},

View File

@@ -515,7 +515,7 @@ var DoltOnlyRevisionDbPrivilegeTests = []queries.UserPrivilegeTest{
User: "tester",
Host: "localhost",
Query: "show tables;",
Expected: []sql.Row{{"mytable"}, {"test"}, {"t2"}},
Expected: []sql.Row{{"mytable"}, {"myview"}, {"test"}, {"t2"}},
},
},
},
@@ -552,7 +552,7 @@ var DoltOnlyRevisionDbPrivilegeTests = []queries.UserPrivilegeTest{
User: "tester",
Host: "localhost",
Query: "show tables;",
Expected: []sql.Row{{"mytable"}},
Expected: []sql.Row{{"mytable"}, {"myview"}},
},
},
},

View File

@@ -281,8 +281,13 @@ func NewLookupBuilder(
}, nil
case idx.coversColumns(s, projections):
return newCoveringLookupBuilder(base), nil
case idx.ID() == "PRIMARY":
// If we are using the primary index, always use a covering lookup builder. In some cases, coversColumns
// can return false, for example if a column was modified in an older version and has a different tag than
// the current schema. In those cases, the primary index is still the best we have, so go ahead and use it.
return newCoveringLookupBuilder(base), nil
default:
return newNonCoveringLookupBuilder(s, base), nil
return newNonCoveringLookupBuilder(s, base)
}
}
@@ -301,7 +306,17 @@ func newCoveringLookupBuilder(b *baseLookupBuilder) *coveringLookupBuilder {
}
}
func newNonCoveringLookupBuilder(s *durableIndexState, b *baseLookupBuilder) *nonCoveringLookupBuilder {
// newNonCoveringLookupBuilder returns a LookupBuilder that uses the specified index state and
// base lookup builder to create a nonCoveringLookupBuilder that uses the secondary index (from
// |b|) to find the PK row identifier, and then uses that PK to look up the complete row from
// the primary index (from |s|). If a baseLookupBuilder built on the primary index is passed in,
// this function returns an error.
func newNonCoveringLookupBuilder(s *durableIndexState, b *baseLookupBuilder) (*nonCoveringLookupBuilder, error) {
if b.idx.ID() == "PRIMARY" {
return nil, fmt.Errorf("incompatible index passed to newNonCoveringLookupBuilder: " +
"primary index passed, but only secondary indexes are supported")
}
primary := durable.ProllyMapFromIndex(s.Primary)
priKd, _ := primary.Descriptors()
tbBld := val.NewTupleBuilder(priKd)
@@ -316,7 +331,7 @@ func newNonCoveringLookupBuilder(s *durableIndexState, b *baseLookupBuilder) *no
keyMap: keyProj,
valMap: valProj,
ordMap: ordProj,
}
}, nil
}
var _ LookupBuilder = (*baseLookupBuilder)(nil)
@@ -411,7 +426,7 @@ func (lb *coveringLookupBuilder) NewRowIter(ctx *sql.Context, part sql.Partition
// nonCoveringLookupBuilder constructs row iters for non-covering lookups,
// where we need to seek on the secondary table for key identity, and then
// the primary table to fill all requrested projections.
// the primary table to fill all requested projections.
type nonCoveringLookupBuilder struct {
*baseLookupBuilder

View File

@@ -28,18 +28,7 @@ import (
"time"
)
func tableExistsInChunkSource(ctx context.Context, ddb *ddbTableStore, s3 *s3ObjectReader, al awsLimits, name addr, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (bool, error) {
if al.tableMayBeInDynamo(chunkCount) {
data, err := ddb.ReadTable(ctx, name, nil)
if err != nil {
return false, err
}
if data == nil {
return false, nil
}
return true, nil
}
func tableExistsInChunkSource(ctx context.Context, s3 *s3ObjectReader, al awsLimits, name addr, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (bool, error) {
magic := make([]byte, magicNumberSize)
n, _, err := s3.ReadFromEnd(ctx, name, magic, stats)
if err != nil {
@@ -51,28 +40,9 @@ func tableExistsInChunkSource(ctx context.Context, ddb *ddbTableStore, s3 *s3Obj
return bytes.Equal(magic, []byte(magicNumber)), nil
}
func newAWSChunkSource(ctx context.Context, ddb *ddbTableStore, s3 *s3ObjectReader, al awsLimits, name addr, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (cs chunkSource, err error) {
func newAWSChunkSource(ctx context.Context, s3 *s3ObjectReader, al awsLimits, name addr, chunkCount uint32, q MemoryQuotaProvider, stats *Stats) (cs chunkSource, err error) {
var tra tableReaderAt
index, err := loadTableIndex(ctx, stats, chunkCount, q, func(p []byte) error {
if al.tableMayBeInDynamo(chunkCount) {
data, err := ddb.ReadTable(ctx, name, stats)
if data == nil && err == nil { // There MUST be either data or an error
return errors.New("no data available")
}
if data != nil {
if len(p) > len(data) {
return errors.New("not enough data for chunk count")
}
indexBytes := data[len(data)-len(p):]
copy(p, indexBytes)
tra = &dynamoTableReaderAt{ddb: ddb, h: name}
return nil
}
if _, ok := err.(tableNotInDynamoErr); !ok {
return err
}
}
n, _, err := s3.ReadFromEnd(ctx, name, p, stats)
if err != nil {
return err

View File

@@ -39,17 +39,14 @@ func TestAWSChunkSource(t *testing.T) {
require.NoError(t, err)
s3 := makeFakeS3(t)
ddb := makeFakeDDB(t)
s3or := &s3ObjectReader{s3, "bucket", nil, ""}
dts := &ddbTableStore{ddb, "table", nil, nil}
makeSrc := func(chunkMax int) chunkSource {
cs, err := newAWSChunkSource(
context.Background(),
dts,
s3or,
awsLimits{itemMax: maxDynamoItemSize, chunkMax: uint32(chunkMax)},
awsLimits{},
h,
uint32(len(chunks)),
NewUnlimitedMemQuotaProvider(),
@@ -61,16 +58,6 @@ func TestAWSChunkSource(t *testing.T) {
return cs
}
t.Run("Dynamo", func(t *testing.T) {
ddb.putData(fmtTableName(h), tableData)
t.Run("Has Chunks", func(t *testing.T) {
src := makeSrc(len(chunks) + 1)
assertChunksInReader(chunks, src, assert.New(t))
src.close()
})
})
t.Run("S3", func(t *testing.T) {
s3.data[h.String()] = tableData

View File

@@ -47,12 +47,6 @@ const (
maxS3PartSize = 64 * 1 << 20 // 64MiB
maxS3Parts = 10000
// Disable persisting tables in DynamoDB. This is currently unused by
// Dolthub and keeping it requires provisioning DynamoDB throughout for
// the noop reads.
maxDynamoChunks = 0
maxDynamoItemSize = 0
defaultS3PartSize = minS3PartSize // smallest allowed by S3 allows for most throughput
)
@@ -60,7 +54,6 @@ type awsTablePersister struct {
s3 s3iface.S3API
bucket string
rl chan struct{}
ddb *ddbTableStore
limits awsLimits
ns string
q MemoryQuotaProvider
@@ -71,25 +64,11 @@ var _ tableFilePersister = awsTablePersister{}
type awsLimits struct {
partTarget, partMin, partMax uint64
itemMax int
chunkMax uint32
}
func (al awsLimits) tableFitsInDynamo(name addr, dataLen int, chunkCount uint32) bool {
calcItemSize := func(n addr, dataLen int) int {
return len(dbAttr) + len(tablePrefix) + len(n.String()) + len(dataAttr) + dataLen
}
return chunkCount <= al.chunkMax && calcItemSize(name, dataLen) < al.itemMax
}
func (al awsLimits) tableMayBeInDynamo(chunkCount uint32) bool {
return chunkCount <= al.chunkMax
}
func (s3p awsTablePersister) Open(ctx context.Context, name addr, chunkCount uint32, stats *Stats) (chunkSource, error) {
return newAWSChunkSource(
ctx,
s3p.ddb,
&s3ObjectReader{s3: s3p.s3, bucket: s3p.bucket, readRl: s3p.rl, ns: s3p.ns},
s3p.limits,
name,
@@ -102,7 +81,6 @@ func (s3p awsTablePersister) Open(ctx context.Context, name addr, chunkCount uin
func (s3p awsTablePersister) Exists(ctx context.Context, name addr, chunkCount uint32, stats *Stats) (bool, error) {
return tableExistsInChunkSource(
ctx,
s3p.ddb,
&s3ObjectReader{s3: s3p.s3, bucket: s3p.bucket, readRl: s3p.rl, ns: s3p.ns},
s3p.limits,
name,
@@ -112,29 +90,7 @@ func (s3p awsTablePersister) Exists(ctx context.Context, name addr, chunkCount u
)
}
func (s3p awsTablePersister) CopyTableFile(ctx context.Context, r io.ReadCloser, fileId string, fileSz uint64, chunkCount uint32) error {
var err error
defer func() {
cerr := r.Close()
if err == nil {
err = cerr
}
}()
name, err := parseAddr(fileId)
if err != nil {
return err
}
if s3p.limits.tableFitsInDynamo(name, int(fileSz), chunkCount) {
data, err := io.ReadAll(r)
if err != nil {
return err
}
return s3p.ddb.Write(ctx, name, data)
}
func (s3p awsTablePersister) CopyTableFile(ctx context.Context, r io.Reader, fileId string, fileSz uint64, chunkCount uint32) error {
return s3p.multipartUpload(ctx, r, fileSz, fileId)
}
@@ -165,16 +121,6 @@ func (s3p awsTablePersister) Persist(ctx context.Context, mt *memTable, haver ch
return emptyChunkSource{}, nil
}
if s3p.limits.tableFitsInDynamo(name, len(data), chunkCount) {
err := s3p.ddb.Write(ctx, name, data)
if err != nil {
return nil, err
}
return newReaderFromIndexData(ctx, s3p.q, data, name, &dynamoTableReaderAt{ddb: s3p.ddb, h: name}, s3BlockSize)
}
err = s3p.multipartUpload(ctx, bytes.NewReader(data), uint64(len(data)), name.String())
if err != nil {

View File

@@ -35,8 +35,6 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/store/util/sizecache"
)
func randomChunks(t *testing.T, r *rand.Rand, sz int) [][]byte {
@@ -71,9 +69,6 @@ func TestRandomChunks(t *testing.T) {
func TestAWSTablePersisterPersist(t *testing.T) {
ctx := context.Background()
calcPartSize := func(rdr chunkReader, maxPartNum uint64) uint64 {
return maxTableSize(uint64(mustUint32(rdr.count())), mustUint64(rdr.uncompressedLen())) / maxPartNum
}
r := rand.New(rand.NewSource(1024))
const sz15mb = 1 << 20 * 15
@@ -90,8 +85,8 @@ func TestAWSTablePersisterPersist(t *testing.T) {
testIt := func(t *testing.T, ns string) {
t.Run("InMultipleParts", func(t *testing.T) {
assert := assert.New(t)
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: ddb, limits: limits5mb, ns: ns, q: &UnlimitedQuotaProvider{}}
s3svc := makeFakeS3(t)
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", limits: limits5mb, ns: ns, q: &UnlimitedQuotaProvider{}}
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
@@ -108,8 +103,8 @@ func TestAWSTablePersisterPersist(t *testing.T) {
t.Run("InSinglePart", func(t *testing.T) {
assert := assert.New(t)
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: ddb, limits: limits64mb, ns: ns, q: &UnlimitedQuotaProvider{}}
s3svc := makeFakeS3(t)
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", limits: limits64mb, ns: ns, q: &UnlimitedQuotaProvider{}}
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
@@ -133,8 +128,8 @@ func TestAWSTablePersisterPersist(t *testing.T) {
assert.Equal(existingTable.addChunk(computeAddr(c), c), chunkAdded)
}
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: ddb, limits: limits5mb, ns: ns, q: &UnlimitedQuotaProvider{}}
s3svc := makeFakeS3(t)
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", limits: limits5mb, ns: ns, q: &UnlimitedQuotaProvider{}}
src, err := s3p.Persist(context.Background(), mt, existingTable, &Stats{})
require.NoError(t, err)
@@ -149,8 +144,7 @@ func TestAWSTablePersisterPersist(t *testing.T) {
assert := assert.New(t)
s3svc := &failingFakeS3{makeFakeS3(t), sync.Mutex{}, 1}
ddb := makeFakeDTS(makeFakeDDB(t), nil)
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: ddb, limits: limits5mb, ns: ns, q: &UnlimitedQuotaProvider{}}
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", limits: limits5mb, ns: ns, q: &UnlimitedQuotaProvider{}}
_, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
assert.Error(err)
@@ -163,96 +157,6 @@ func TestAWSTablePersisterPersist(t *testing.T) {
testIt(t, "a-namespace-here")
})
})
t.Run("PersistToDynamo", func(t *testing.T) {
t.Run("Success", func(t *testing.T) {
t.SkipNow()
assert := assert.New(t)
ddb := makeFakeDDB(t)
s3svc, dts := makeFakeS3(t), makeFakeDTS(ddb, nil)
limits := awsLimits{itemMax: maxDynamoItemSize, chunkMax: 2 * mustUint32(mt.count())}
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: dts, limits: limits, ns: "", q: &UnlimitedQuotaProvider{}}
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
})
t.Run("CacheOnOpen", func(t *testing.T) {
t.SkipNow()
assert := assert.New(t)
tc := sizecache.New(maxDynamoItemSize)
ddb := makeFakeDDB(t)
s3svc, dts := makeFakeS3(t), makeFakeDTS(ddb, tc)
limits := awsLimits{itemMax: maxDynamoItemSize, chunkMax: 2 * mustUint32(mt.count())}
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: dts, limits: limits, ns: "", q: &UnlimitedQuotaProvider{}}
tableData, name, err := buildTable(testChunks)
require.NoError(t, err)
ddb.putData(fmtTableName(name), tableData)
src, err := s3p.Open(context.Background(), name, uint32(len(testChunks)), &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
if data, present := tc.Get(name); assert.True(present) {
assert.Equal(tableData, data.([]byte))
}
}
})
t.Run("FailTooManyChunks", func(t *testing.T) {
t.SkipNow()
assert := assert.New(t)
ddb := makeFakeDDB(t)
s3svc, dts := makeFakeS3(t), makeFakeDTS(ddb, nil)
limits := awsLimits{itemMax: maxDynamoItemSize, chunkMax: 1, partTarget: calcPartSize(mt, 1)}
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: dts, limits: limits, ns: "", q: &UnlimitedQuotaProvider{}}
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.Nil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
}
})
t.Run("FailItemTooBig", func(t *testing.T) {
t.SkipNow()
assert := assert.New(t)
ddb := makeFakeDDB(t)
s3svc, dts := makeFakeS3(t), makeFakeDTS(ddb, nil)
limits := awsLimits{itemMax: 0, chunkMax: 2 * mustUint32(mt.count()), partTarget: calcPartSize(mt, 1)}
s3p := awsTablePersister{s3: s3svc, bucket: "bucket", ddb: dts, limits: limits, ns: "", q: &UnlimitedQuotaProvider{}}
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.Nil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
}
})
})
}
func makeFakeDTS(ddb ddbsvc, tc *sizecache.SizeCache) *ddbTableStore {
return &ddbTableStore{ddb, "table", nil, tc}
}
type waitOnStoreTableCache struct {
@@ -367,18 +271,16 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
const sz5mb = 1 << 20 * 5
targetPartSize := uint64(sz5mb)
minPartSize, maxPartSize := targetPartSize, 5*targetPartSize
maxItemSize, maxChunkCount := int(targetPartSize/2), uint32(4)
rl := make(chan struct{}, 8)
defer close(rl)
newPersister := func(s3svc s3iface.S3API, ddb *ddbTableStore) awsTablePersister {
newPersister := func(s3svc s3iface.S3API) awsTablePersister {
return awsTablePersister{
s3svc,
"bucket",
rl,
ddb,
awsLimits{targetPartSize, minPartSize, maxPartSize, maxItemSize, maxChunkCount},
awsLimits{targetPartSize, minPartSize, maxPartSize},
"",
&UnlimitedQuotaProvider{},
}
@@ -411,8 +313,8 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
t.Run("TotalUnderMinSize", func(t *testing.T) {
assert := assert.New(t)
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := newPersister(s3svc, ddb)
s3svc := makeFakeS3(t)
s3p := newPersister(s3svc)
chunks := smallChunks[:len(smallChunks)-1]
sources := makeSources(s3p, chunks)
@@ -433,8 +335,8 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
t.Run("TotalOverMinSize", func(t *testing.T) {
assert := assert.New(t)
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := newPersister(s3svc, ddb)
s3svc := makeFakeS3(t)
s3p := newPersister(s3svc)
sources := makeSources(s3p, smallChunks)
src, _, err := s3p.ConjoinAll(context.Background(), sources, &Stats{})
@@ -463,8 +365,8 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
t.Run("AllOverMax", func(t *testing.T) {
assert := assert.New(t)
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := newPersister(s3svc, ddb)
s3svc := makeFakeS3(t)
s3p := newPersister(s3svc)
// Make 2 chunk sources that each have >maxPartSize chunk data
sources := make(chunkSources, 2)
@@ -496,8 +398,8 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
t.Run("SomeOverMax", func(t *testing.T) {
assert := assert.New(t)
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := newPersister(s3svc, ddb)
s3svc := makeFakeS3(t)
s3p := newPersister(s3svc)
// Add one chunk source that has >maxPartSize data
mtb := newMemTable(uint64(2 * maxPartSize))
@@ -537,8 +439,8 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
t.Run("Mix", func(t *testing.T) {
assert := assert.New(t)
s3svc, ddb := makeFakeS3(t), makeFakeDTS(makeFakeDDB(t), nil)
s3p := newPersister(s3svc, ddb)
s3svc := makeFakeS3(t)
s3p := newPersister(s3svc)
// Start with small tables. Since total > minPartSize, will require more than one part to upload.
sources := make(chunkSources, len(smallChunks))

View File

@@ -178,13 +178,7 @@ func (bsp *blobstorePersister) Path() string {
return ""
}
func (bsp *blobstorePersister) CopyTableFile(ctx context.Context, r io.ReadCloser, name string, fileSz uint64, chunkCount uint32) (err error) {
defer func() {
if cerr := r.Close(); cerr != nil {
err = cerr
}
}()
func (bsp *blobstorePersister) CopyTableFile(ctx context.Context, r io.Reader, name string, fileSz uint64, chunkCount uint32) error {
// sanity check file size
if fileSz < indexSize(chunkCount)+footerSize {
return fmt.Errorf("table file size %d too small for chunk count %d", fileSz, chunkCount)
@@ -197,36 +191,36 @@ func (bsp *blobstorePersister) CopyTableFile(ctx context.Context, r io.ReadClose
rr, ok := r.(io.ReaderAt)
if !ok {
// sequentially write chunk records then tail
if _, err = bsp.bs.Put(ctx, name+tableRecordsExt, lr); err != nil {
if _, err := bsp.bs.Put(ctx, name+tableRecordsExt, lr); err != nil {
return err
}
if _, err = bsp.bs.Put(ctx, name+tableTailExt, r); err != nil {
if _, err := bsp.bs.Put(ctx, name+tableTailExt, r); err != nil {
return err
}
} else {
// on the push path, we expect to Put concurrently
// see BufferedFileByteSink in byte_sink.go
eg, ectx := errgroup.WithContext(ctx)
eg.Go(func() (err error) {
eg.Go(func() error {
buf := make([]byte, indexSize(chunkCount)+footerSize)
if _, err = rr.ReadAt(buf, off); err != nil {
if _, err := rr.ReadAt(buf, off); err != nil {
return err
}
_, err = bsp.bs.Put(ectx, name+tableTailExt, bytes.NewBuffer(buf))
return
_, err := bsp.bs.Put(ectx, name+tableTailExt, bytes.NewBuffer(buf))
return err
})
eg.Go(func() (err error) {
_, err = bsp.bs.Put(ectx, name+tableRecordsExt, lr)
return
eg.Go(func() error {
_, err := bsp.bs.Put(ectx, name+tableRecordsExt, lr)
return err
})
if err = eg.Wait(); err != nil {
if err := eg.Wait(); err != nil {
return err
}
}
// finally concatenate into the complete table
_, err = bsp.bs.Concatenate(ctx, name, []string{name + tableRecordsExt, name + tableTailExt})
return
_, err := bsp.bs.Concatenate(ctx, name, []string{name + tableRecordsExt, name + tableTailExt})
return err
}
type bsTableReaderAt struct {

View File

@@ -23,7 +23,6 @@ package nbs
import (
"bytes"
"context"
"sync/atomic"
"testing"
@@ -53,26 +52,6 @@ func makeFakeDDB(t *testing.T) *fakeDDB {
}
}
func (m *fakeDDB) readerForTable(ctx context.Context, name addr) (chunkReader, error) {
if i, present := m.data[fmtTableName(name)]; present {
buff, ok := i.([]byte)
assert.True(m.t, ok)
ti, err := parseTableIndex(ctx, buff, &UnlimitedQuotaProvider{})
if err != nil {
return nil, err
}
tr, err := newTableReader(ti, tableReaderAtFromBytes(buff), fileBlockSize)
if err != nil {
return nil, err
}
return tr, nil
}
return nil, nil
}
func (m *fakeDDB) GetItemWithContext(ctx aws.Context, input *dynamodb.GetItemInput, opts ...request.Option) (*dynamodb.GetItemOutput, error) {
key := input.Key[dbAttr].S
assert.NotNil(m.t, key, "key should have been a String: %+v", input.Key[dbAttr])
@@ -92,8 +71,6 @@ func (m *fakeDDB) GetItemWithContext(ctx aws.Context, input *dynamodb.GetItemInp
if e.appendix != "" {
item[appendixAttr] = &dynamodb.AttributeValue{S: aws.String(e.appendix)}
}
case []byte:
item[dataAttr] = &dynamodb.AttributeValue{B: e}
}
}
atomic.AddInt64(&m.numGets, 1)
@@ -113,12 +90,6 @@ func (m *fakeDDB) PutItemWithContext(ctx aws.Context, input *dynamodb.PutItemInp
assert.NotNil(m.t, input.Item[dbAttr].S, "key should have been a String: %+v", input.Item[dbAttr])
key := *input.Item[dbAttr].S
if input.Item[dataAttr] != nil {
assert.NotNil(m.t, input.Item[dataAttr].B, "data should have been a blob: %+v", input.Item[dataAttr])
m.putData(key, input.Item[dataAttr].B)
return &dynamodb.PutItemOutput{}, nil
}
assert.NotNil(m.t, input.Item[nbsVersAttr], "%s should have been present", nbsVersAttr)
assert.NotNil(m.t, input.Item[nbsVersAttr].S, "nbsVers should have been a String: %+v", input.Item[nbsVersAttr])
assert.Equal(m.t, AWSStorageVersion, *input.Item[nbsVersAttr].S)

View File

@@ -1,172 +0,0 @@
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright 2017 Attic Labs, Inc. All rights reserved.
// Licensed under the Apache License, version 2.0:
// http://www.apache.org/licenses/LICENSE-2.0
package nbs
import (
"bytes"
"context"
"fmt"
"io"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/dolthub/dolt/go/store/util/sizecache"
"github.com/dolthub/dolt/go/store/util/verbose"
)
const (
dataAttr = "data"
tablePrefix = "*" // I want to use NBS table names as keys when they are written to DynamoDB, but a bare table name is a legal Noms Database name as well. To avoid collisions, dynamoTableReader prepends this prefix (which is not a legal character in a Noms Database name).
)
// dynamoTableReaderAt assumes the existence of a DynamoDB table whose primary partition key is in String format and named `db`.
type dynamoTableReaderAt struct {
ddb *ddbTableStore
h addr
}
type tableNotInDynamoErr struct {
nbs, dynamo string
}
func (t tableNotInDynamoErr) Error() string {
return fmt.Sprintf("NBS table %s not present in DynamoDB table %s", t.nbs, t.dynamo)
}
func (dtra *dynamoTableReaderAt) Close() error {
return nil
}
func (dtra *dynamoTableReaderAt) clone() (tableReaderAt, error) {
return dtra, nil
}
func (dtra *dynamoTableReaderAt) Reader(ctx context.Context) (io.ReadCloser, error) {
data, err := dtra.ddb.ReadTable(ctx, dtra.h, &Stats{})
if err != nil {
return nil, err
}
return io.NopCloser(bytes.NewReader(data)), nil
}
func (dtra *dynamoTableReaderAt) ReadAtWithStats(ctx context.Context, p []byte, off int64, stats *Stats) (n int, err error) {
data, err := dtra.ddb.ReadTable(ctx, dtra.h, stats)
if err != nil {
return 0, err
}
n = copy(p, data[off:])
if n < len(p) {
err = io.ErrUnexpectedEOF
}
return
}
type ddbTableStore struct {
ddb ddbsvc
table string
readRl chan struct{}
cache *sizecache.SizeCache // TODO: merge this with tableCache as part of BUG 3601
}
func (dts *ddbTableStore) ReadTable(ctx context.Context, name addr, stats *Stats) (data []byte, err error) {
t1 := time.Now()
if dts.cache != nil {
if i, present := dts.cache.Get(name); present {
data = i.([]byte)
defer func() {
stats.MemBytesPerRead.Sample(uint64(len(data)))
stats.MemReadLatency.SampleTimeSince(t1)
}()
return data, nil
}
}
data, err = dts.readTable(ctx, name)
if data != nil {
defer func() {
stats.DynamoBytesPerRead.Sample(uint64(len(data)))
stats.DynamoReadLatency.SampleTimeSince(t1)
}()
}
if dts.cache != nil && err == nil {
dts.cache.Add(name, uint64(len(data)), data)
}
return data, err
}
func (dts *ddbTableStore) readTable(ctx context.Context, name addr) (data []byte, err error) {
try := func(input *dynamodb.GetItemInput) (data []byte, err error) {
if dts.readRl != nil {
dts.readRl <- struct{}{}
defer func() {
<-dts.readRl
}()
}
result, rerr := dts.ddb.GetItemWithContext(ctx, input)
if rerr != nil {
return nil, rerr
} else if len(result.Item) == 0 {
return nil, tableNotInDynamoErr{name.String(), dts.table}
} else if result.Item[dataAttr] == nil || result.Item[dataAttr].B == nil {
return nil, fmt.Errorf("NBS table %s in DynamoDB table %s is malformed", name, dts.table)
}
return result.Item[dataAttr].B, nil
}
input := dynamodb.GetItemInput{
TableName: aws.String(dts.table),
Key: map[string]*dynamodb.AttributeValue{
dbAttr: {S: aws.String(fmtTableName(name))},
},
}
data, err = try(&input)
if _, isNotFound := err.(tableNotInDynamoErr); isNotFound {
verbose.Logger(ctx).Sugar().Debugf("Eventually consistent read for %s failed; trying fully-consistent", name)
input.ConsistentRead = aws.Bool(true)
return try(&input)
}
return data, err
}
func fmtTableName(name addr) string {
return tablePrefix + name.String()
}
func (dts *ddbTableStore) Write(ctx context.Context, name addr, data []byte) error {
_, err := dts.ddb.PutItemWithContext(ctx, &dynamodb.PutItemInput{
TableName: aws.String(dts.table),
Item: map[string]*dynamodb.AttributeValue{
dbAttr: {S: aws.String(fmtTableName(name))},
dataAttr: {B: data},
},
})
if dts.cache != nil && err == nil {
dts.cache.Add(name, uint64(len(data)), data)
}
return err
}

View File

@@ -1,145 +0,0 @@
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright 2017 Attic Labs, Inc. All rights reserved.
// Licensed under the Apache License, version 2.0:
// http://www.apache.org/licenses/LICENSE-2.0
package nbs
import (
"context"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/store/util/sizecache"
)
func TestDynamoTableReaderAt(t *testing.T) {
ddb := makeFakeDDB(t)
chunks := [][]byte{
[]byte("hello2"),
[]byte("goodbye2"),
[]byte("badbye2"),
}
tableData, h, err := buildTable(chunks)
require.NoError(t, err)
ddb.putData(fmtTableName(h), tableData)
t.Run("ddbTableStore", func(t *testing.T) {
t.Run("ReadTable", func(t *testing.T) {
test := func(dts *ddbTableStore) {
assert := assert.New(t)
data, err := dts.ReadTable(context.Background(), h, &Stats{})
require.NoError(t, err)
assert.Equal(tableData, data)
data, err = dts.ReadTable(context.Background(), computeAddr([]byte{}), &Stats{})
assert.Error(err)
assert.IsType(tableNotInDynamoErr{}, err)
assert.Nil(data)
}
t.Run("EventuallyConsistentSuccess", func(t *testing.T) {
test(&ddbTableStore{ddb, "table", nil, nil})
})
t.Run("EventuallyConsistentFailure", func(t *testing.T) {
test(&ddbTableStore{&eventuallyConsistentDDB{ddb}, "table", nil, nil})
})
t.Run("WithCache", func(t *testing.T) {
tc := sizecache.New(uint64(2 * len(tableData)))
dts := &ddbTableStore{ddb, "table", nil, tc}
test(dts)
// Table should have been cached on read
baseline := ddb.NumGets()
_, err := dts.ReadTable(context.Background(), h, &Stats{})
require.NoError(t, err)
assert.Zero(t, ddb.NumGets()-baseline)
})
})
t.Run("WriteTable", func(t *testing.T) {
t.Run("WithoutCache", func(t *testing.T) {
assert := assert.New(t)
dts := &ddbTableStore{makeFakeDDB(t), "table", nil, nil}
require.NoError(t, dts.Write(context.Background(), h, tableData))
data, err := dts.ReadTable(context.Background(), h, &Stats{})
require.NoError(t, err)
assert.Equal(tableData, data)
})
t.Run("WithCache", func(t *testing.T) {
assert := assert.New(t)
tc := sizecache.New(uint64(2 * len(tableData)))
dts := &ddbTableStore{makeFakeDDB(t), "table", nil, tc}
require.NoError(t, dts.Write(context.Background(), h, tableData))
// Table should have been cached on write
baseline := ddb.NumGets()
data, err := dts.ReadTable(context.Background(), h, &Stats{})
require.NoError(t, err)
assert.Equal(tableData, data)
assert.Zero(ddb.NumGets() - baseline)
})
})
})
t.Run("ReadAtWithCache", func(t *testing.T) {
assert := assert.New(t)
stats := &Stats{}
tc := sizecache.New(uint64(2 * len(tableData)))
tra := &dynamoTableReaderAt{&ddbTableStore{ddb, "table", nil, tc}, h}
// First, read when table is not yet cached
scratch := make([]byte, len(tableData)/4)
baseline := ddb.NumGets()
_, err := tra.ReadAtWithStats(context.Background(), scratch, 0, stats)
require.NoError(t, err)
assert.True(ddb.NumGets() > baseline)
// Table should have been cached on read so read again, a different slice this time
baseline = ddb.NumGets()
_, err = tra.ReadAtWithStats(context.Background(), scratch, int64(len(scratch)), stats)
require.NoError(t, err)
assert.Zero(ddb.NumGets() - baseline)
})
}
type eventuallyConsistentDDB struct {
ddbsvc
}
func (ec *eventuallyConsistentDDB) GetItemWithContext(ctx aws.Context, input *dynamodb.GetItemInput, opts ...request.Option) (*dynamodb.GetItemOutput, error) {
if input.ConsistentRead != nil && *(input.ConsistentRead) {
return ec.ddbsvc.GetItemWithContext(ctx, input)
}
return &dynamodb.GetItemOutput{}, nil
}

View File

@@ -95,15 +95,8 @@ func (ftp *fsTablePersister) Path() string {
return ftp.dir
}
func (ftp *fsTablePersister) CopyTableFile(ctx context.Context, r io.ReadCloser, fileId string, fileSz uint64, chunkCount uint32) error {
func (ftp *fsTablePersister) CopyTableFile(ctx context.Context, r io.Reader, fileId string, fileSz uint64, chunkCount uint32) error {
tn, f, err := func() (n string, cleanup func(), err error) {
defer func() {
cerr := r.Close()
if err == nil {
err = cerr
}
}()
ftp.removeMu.Lock()
var temp *os.File
temp, err = tempfiles.MovableTempFileProvider.NewFile(ftp.dir, tempTablePrefix)

View File

@@ -110,6 +110,7 @@ func (gcc *gcCopier) copyTablesToDir(ctx context.Context, tfp tableFilePersister
if err != nil {
return nil, err
}
defer r.Close()
sz := gcc.writer.ContentLength()
err = tfp.CopyTableFile(ctx, r, filename, sz, uint32(gcc.writer.ChunkCount()))

View File

@@ -246,7 +246,7 @@ func (j *chunkJournal) Path() string {
return filepath.Dir(j.path)
}
func (j *chunkJournal) CopyTableFile(ctx context.Context, r io.ReadCloser, fileId string, fileSz uint64, chunkCount uint32) error {
func (j *chunkJournal) CopyTableFile(ctx context.Context, r io.Reader, fileId string, fileSz uint64, chunkCount uint32) error {
if j.backing.readOnly() {
return errReadOnlyManifest
}

View File

@@ -229,12 +229,12 @@ func (m *fakeS3) CompleteMultipartUploadWithContext(ctx aws.Context, input *s3.C
}
func (m *fakeS3) GetObjectWithContext(ctx aws.Context, input *s3.GetObjectInput, opts ...request.Option) (*s3.GetObjectOutput, error) {
m.getCount++
m.assert.NotNil(input.Bucket, "Bucket is a required field")
m.assert.NotNil(input.Key, "Key is a required field")
m.mu.Lock()
defer m.mu.Unlock()
m.getCount++
obj, present := m.data[*input.Key]
if !present {
return nil, mockAWSError("NoSuchKey")

View File

@@ -491,8 +491,7 @@ func NewAWSStoreWithMMapIndex(ctx context.Context, nbfVerStr string, table, ns,
s3,
bucket,
readRateLimiter,
&ddbTableStore{ddb, table, readRateLimiter, nil},
awsLimits{defaultS3PartSize, minS3PartSize, maxS3PartSize, maxDynamoItemSize, maxDynamoChunks},
awsLimits{defaultS3PartSize, minS3PartSize, maxS3PartSize},
ns,
q,
}
@@ -507,8 +506,7 @@ func NewAWSStore(ctx context.Context, nbfVerStr string, table, ns, bucket string
s3,
bucket,
readRateLimiter,
&ddbTableStore{ddb, table, readRateLimiter, nil},
awsLimits{defaultS3PartSize, minS3PartSize, maxS3PartSize, maxDynamoItemSize, maxDynamoChunks},
awsLimits{defaultS3PartSize, minS3PartSize, maxS3PartSize},
ns,
q,
}
@@ -1464,6 +1462,7 @@ func (nbs *NomsBlockStore) WriteTableFile(ctx context.Context, fileId string, nu
if err != nil {
return err
}
defer r.Close()
return tfp.CopyTableFile(ctx, r, fileId, sz, uint32(numChunks))
}

View File

@@ -70,7 +70,7 @@ type tableFilePersister interface {
tablePersister
// CopyTableFile copies the table file with the given fileId from the reader to the TableFileStore.
CopyTableFile(ctx context.Context, r io.ReadCloser, fileId string, fileSz uint64, chunkCount uint32) error
CopyTableFile(ctx context.Context, r io.Reader, fileId string, fileSz uint64, chunkCount uint32) error
// Path returns the file system path. Use CopyTableFile instead of Path to
// copy a file to the TableFileStore. Path cannot be removed because it's used

View File

@@ -178,8 +178,6 @@ var CopiedNomsFiles []CopiedNomsFile = []CopiedNomsFile{
{Path: "store/nbs/dynamo_fake_test.go", NomsPath: "go/nbs/dynamo_fake_test.go", HadCopyrightNotice: true},
{Path: "store/nbs/dynamo_manifest.go", NomsPath: "go/nbs/dynamo_manifest.go", HadCopyrightNotice: true},
{Path: "store/nbs/dynamo_manifest_test.go", NomsPath: "go/nbs/dynamo_manifest_test.go", HadCopyrightNotice: true},
{Path: "store/nbs/dynamo_table_reader.go", NomsPath: "go/nbs/dynamo_table_reader.go", HadCopyrightNotice: true},
{Path: "store/nbs/dynamo_table_reader_test.go", NomsPath: "go/nbs/dynamo_table_reader_test.go", HadCopyrightNotice: true},
{Path: "store/nbs/file_manifest.go", NomsPath: "go/nbs/file_manifest.go", HadCopyrightNotice: true},
{Path: "store/nbs/file_manifest_test.go", NomsPath: "go/nbs/file_manifest_test.go", HadCopyrightNotice: true},
{Path: "store/nbs/file_table_persister.go", NomsPath: "go/nbs/file_table_persister.go", HadCopyrightNotice: true},

View File

@@ -793,7 +793,6 @@ DELIM
}
@test "import-create-tables: created table with force option can be added and committed as modified" {
skip "overwritten table cannot be added and committed as modified"
run dolt table import -c --pk=id test `batshelper jails.csv`
[ "$status" -eq 0 ]
[[ "$output" =~ "Import completed successfully." ]] || false

View File

@@ -0,0 +1,186 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
# This suite of tests is for testing the sql server's presentation of privledges, priviledge persisteance between
# CLI and server instances.
#
# Caring about privledges on the CLI isn't really the point, but working in both modes ensures that persistance
# is working correctly. You won't see mention of working with servers in these tests because it's handled by
# running tests in this file using helper/local-remote.bash
# working dir will be test_db
make_multi_test_repo() {
rm -rf test_db
mkdir test_db
cd test_db
mkdir db1
cd db1
dolt init
cd ..
mkdir db2
cd db2
dolt init
cd ..
## All tests need a user, or two.
dolt sql -q "CREATE USER tester1@localhost"
dolt sql -q "CREATE USER tester2@localhost"
}
# working dir will be dolt_repo$$
delete_test_repo() {
cd ..
rm -rf test_db
}
setup() {
setup_no_dolt_init
make_multi_test_repo
}
teardown() {
delete_test_repo
teardown_common
}
@test "sql-mysqldb-privs: smoke test for db table" {
dolt sql -q "GRANT SELECT ON db1.* TO tester1@localhost"
run dolt sql -q "SELECT host,user,db,select_priv as s,insert_priv as i from mysql.db"
[ $status -eq 0 ]
[[ $output =~ "localhost | tester1 | db1 | Y | N" ]] || false
run dolt sql -q "SHOW GRANTS FOR tester1@localhost"
[ $status -eq 0 ]
[[ $output =~ 'GRANT SELECT ON `db1`.* TO `tester1`@`localhost`' ]] || false
dolt sql -q "GRANT INSERT ON db2.* TO tester2@localhost"
run dolt sql -q "SELECT user FROM mysql.db"
[ $status -eq 0 ]
[[ $output =~ "tester1" ]] || false
[[ $output =~ "tester2" ]] || false
run dolt sql -q "SELECT db FROM mysql.db where user = 'tester2'"
[ $status -eq 0 ]
[[ $output =~ "db2" ]] || false
! [[ $output =~ "db1" ]] || false
run dolt sql -q "SHOW GRANTS FOR tester2@localhost"
[ $status -eq 0 ]
[[ $output =~ 'GRANT INSERT ON `db2`.* TO `tester2`@`localhost`' ]] || false
dolt sql -q "REVOKE SELECT ON db1.* FROM tester1@localhost"
run dolt sql -q "SELECT user FROM mysql.db"
[ $status -eq 0 ]
! [[ $output =~ "tester1" ]] || false
[[ $output =~ "tester2" ]] || false
}
@test "sql-mysqldb-privs: smoke test for tables_priv table" {
dolt sql -q "GRANT SELECT ON db1.tbl TO tester1@localhost"
run dolt sql -q "SELECT host,user,db,table_name as t,table_priv FROM mysql.tables_priv"
[ $status -eq 0 ]
[[ $output =~ "localhost | tester1 | db1 | tbl | Select" ]] || false
run dolt sql -q "SHOW GRANTS FOR tester1@localhost"
[ $status -eq 0 ]
[[ $output =~ 'GRANT SELECT ON `db1`.`tbl` TO `tester1`@`localhost`' ]] || false
dolt sql -q "GRANT INSERT ON db1.tbl TO tester2@localhost"
run dolt sql -q "SELECT user FROM mysql.tables_priv"
[ $status -eq 0 ]
[[ $output =~ "tester1" ]] || false
[[ $output =~ "tester2" ]] || false
run dolt sql -q "SELECT user,table_priv FROM mysql.tables_priv"
[ $status -eq 0 ]
[[ $output =~ "tester1 | Select" ]] || false
[[ $output =~ "tester2 | Insert" ]] || false
run dolt sql -q "SHOW GRANTS FOR tester2@localhost"
[ $status -eq 0 ]
[[ $output =~ 'GRANT INSERT ON `db1`.`tbl` TO `tester2`@`localhost`' ]] || false
dolt sql -q "REVOKE SELECT ON db1.tbl FROM tester1@localhost"
run dolt sql -q "SELECT user FROM mysql.tables_priv"
[ $status -eq 0 ]
! [[ $output =~ "tester1" ]] || false
[[ $output =~ "tester2" ]] || false
}
@test "sql-mysqldb-privs: smoke test for procs_priv table" {
dolt sql -q "GRANT EXECUTE ON PROCEDURE db1.dolt_log TO tester1@localhost"
run dolt sql -q "SELECT host,user,db,routine_name,routine_type,proc_priv FROM mysql.procs_priv"
[ $status -eq 0 ]
[[ $output =~ "localhost | tester1 | db1 | dolt_log | PROCEDURE | Execute" ]] || false
run dolt sql -q "SHOW GRANTS FOR tester1@localhost"
[ $status -eq 0 ]
[[ $output =~ 'GRANT EXECUTE ON PROCEDURE `db1`.`dolt_log` TO `tester1`@`localhost`' ]] || false
dolt sql -q "GRANT GRANT OPTION ON PROCEDURE db1.dolt_diff TO tester2@localhost"
run dolt sql -q "SELECT user FROM mysql.procs_priv"
[ $status -eq 0 ]
[[ $output =~ "tester1" ]] || false
[[ $output =~ "tester2" ]] || false
run dolt sql -q "SELECT routine_name FROM mysql.procs_priv where user = 'tester2'"
[ $status -eq 0 ]
[[ $output =~ "dolt_diff" ]] || false
! [[ $output =~ "dolt_log" ]] || false
run dolt sql -q "SHOW GRANTS FOR tester2@localhost"
[ $status -eq 0 ]
[[ $output =~ 'GRANT USAGE ON PROCEDURE `db1`.`dolt_diff` TO `tester2`@`localhost` WITH GRANT OPTION' ]] || false
dolt sql -q "REVOKE EXECUTE ON PROCEDURE db1.dolt_log FROM tester1@localhost"
run dolt sql -q "SELECT user FROM mysql.procs_priv"
[ $status -eq 0 ]
! [[ $output =~ "tester1" ]] || false
[[ $output =~ "tester2" ]] || false
}
@test "sql-mysqldb-privs: procs_priv table should differentiate between functions and procedures" {
skip "Function Support is currently disabled"
dolt sql -q "GRANT EXECUTE ON FUNCTION db1.dolt_log TO tester1@localhost"
run dolt sql -q "SELECT host,user,db,routine_name as name,routine_type as type,proc_priv FROM mysql.procs_priv"
[ $status -eq 0 ]
[[ $output =~ "localhost | tester1 | db1 | dolt_log | FUNCTION | Execute" ]] || false
# revoking a procedure by the same name should not revoke the function permission
dolt sql -q "REVOKE EXECUTE ON PROCEDURE db1.dolt_log FROM tester1@localhost"
run dolt sql -q "SELECT host,user,db,routine_name as name,routine_type as type,proc_priv FROM mysql.procs_priv"
[ $status -eq 0 ]
[[ $output =~ "localhost | tester1 | db1 | dolt_log | FUNCTION | Execute" ]] || false
dolt sql -q "REVOKE EXECUTE ON FUNCTION db1.dolt_log FROM tester1@localhost"
run dolt sql -q "SELECT host,user,db,routine_name as name,routine_type as type,proc_priv FROM mysql.procs_priv"
[ $status -eq 0 ]
! [[ $output =~ "localhost | tester1 | db1 | dolt_log | FUNCTION | Execute" ]] || false
}
@test "sql-mysqldb-privs: revoke of non-existent permissions" {
dolt sql -q "REVOKE INSERT ON db1.* FROM tester1@localhost"
run dolt sql -q "SELECT user FROM mysql.db"
[ $status -eq 0 ]
! [[ $output =~ "tester1" ]] || false
dolt sql -q "REVOKE INSERT ON db1.tbl FROM tester1@localhost"
run dolt sql -q "SELECT user FROM mysql.tables_priv"
[ $status -eq 0 ]
! [[ $output =~ "tester1" ]] || false
dolt sql -q "REVOKE EXECUTE ON PROCEDURE db1.dolt_log FROM tester1@localhost"
run dolt sql -q "SELECT user FROM mysql.procs_priv"
[ $status -eq 0 ]
! [[ $output =~ "tester1" ]] || false
}

View File

@@ -730,7 +730,7 @@ behavior:
! [[ $output =~ "UPDATE" ]] || false
}
@test "sql-privs: revoking all privleges doesn't result in a corrupted privileges file" {
@test "sql-privs: revoking all privileges doesn't result in a corrupted privileges file" {
make_test_repo
dolt sql -q "CREATE USER tester@localhost"

View File

@@ -0,0 +1,158 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
# Suite of tests to validate restrictions on procedure privileges.
# working dir will be test_db, which has a single database: mydb
make_test_db_and_users() {
rm -rf test_db
mkdir test_db
cd test_db
mkdir mydb
cd mydb
dolt init
cd ..
## All tests need a user, or two.
dolt sql -q "CREATE USER neil@localhost IDENTIFIED BY 'pwd'"
dolt sql -q "CREATE USER mike@localhost IDENTIFIED BY 'pwd'"
}
# working dir will be dolt_repo$$
delete_test_repo() {
cd ..
rm -rf test_db
}
setup() {
setup_no_dolt_init
make_test_db_and_users
}
teardown() {
delete_test_repo
teardown_common
}
@test "sql-procedure-privs: smoke test" {
dolt sql -q "GRANT SELECT ON mydb.* TO neil@localhost"
dolt sql -q "GRANT EXECUTE ON mydb.* TO mike@localhost"
dolt -u neil -p pwd sql -q "select * from dolt_log()"
run dolt -u neil -p pwd sql -q "call dolt_branch('br1')"
[ $status -eq 1 ]
[[ $output =~ "command denied to user 'neil'@'localhost" ]] || false
dolt -u mike -p pwd sql -q "select * from dolt_log()"
dolt -u mike -p pwd sql -q "call dolt_branch('br1')"
run dolt -u mike -p pwd sql -q "call dolt_gc()"
[ $status -eq 1 ]
[[ $output =~ "command denied to user 'mike'@'localhost" ]] || false
# Admin procedure privs must be granted explicitly
dolt sql -q "GRANT EXECUTE ON PROCEDURE mydb.dolt_gc TO mike@localhost"
dolt -u mike -p pwd sql -q "call dolt_gc()"
}
mike_blocked_check() {
run dolt -u mike -p pwd sql -q "call $1"
[ $status -eq 1 ]
[[ $output =~ "command denied to user 'mike'@'localhost" ]] || false
}
@test "sql-procedure-privs: admin procedures all block" {
# Execute privs on a DB does not grant admin procedure privs
dolt sql -q "GRANT EXECUTE ON mydb.* TO mike@localhost"
mike_blocked_check "dolt_backup('sync','foo')"
mike_blocked_check "dolt_clone('file:///myDatabasesDir/database/.dolt/noms')"
mike_blocked_check "dolt_fetch('origin')"
mike_blocked_check "dolt_gc()"
mike_blocked_check "dolt_pull('origin')"
mike_blocked_check "dolt_purge_dropped_databases()"
mike_blocked_check "dolt_remote('add','origin1','Dolthub/museum-collections')"
mike_blocked_check "dolt_undrop('foo')"
# Verify non-admin procedures are executable, not an exhaustive list tho.
dolt -u mike -p pwd sql -q "call dolt_branch('br1')"
dolt -u mike -p pwd sql -q "call dolt_checkout('br1')"
}
@test "sql-procedure-privs: direct execute privs are let through" {
# Execute privs on a DB does not grant admin procedure privs
dolt sql -q "GRANT EXECUTE ON PROCEDURE mydb.dolt_gc TO neil@localhost"
dolt -u neil -p pwd sql -q "call dolt_gc()"
run dolt -u neil -p pwd sql -q "call dolt_fetch('origin')"
[ $status -eq 1 ]
[[ $output =~ "command denied to user 'neil'@'localhost" ]] || false
# since no db privs exist, this user should not be able to do non-admin procedures
run dolt -u neil -p pwd sql -q "call dolt_branch('br1')"
[ $status -eq 1 ]
[[ $output =~ "command denied to user 'neil'@'localhost" ]] || false
}
@test "sql-procedure-privs: grant option works as explicit grant" {
# Give Mike grant, but not execute perms. Verify he can grant to Neil, but can't execute.
dolt sql -q "GRANT GRANT OPTION ON PROCEDURE mydb.dolt_gc TO mike@localhost"
# Being able to grant access does not give you access
mike_blocked_check "dolt_gc()"
run dolt -u neil -p pwd sql -q "call dolt_gc()"
[ $status -eq 1 ]
[[ $output =~ "Access denied for user 'neil'@'localhost' to database 'mydb'" ]] || false
dolt -u mike -p pwd sql -q "GRANT EXECUTE ON PROCEDURE mydb.dolt_gc TO neil@localhost"
dolt -u neil -p pwd sql -q "call dolt_gc()"
}
@test "sql-procedure-privs: grant option at DB level does not give admin grant priviliges" {
# Give Mike grant, but non-execute perms. Verify he can grant to Neil, but can't execute.
dolt sql -q "GRANT GRANT OPTION ON mydb.* TO mike@localhost"
# Being able to grant access does not give you access
mike_blocked_check "dolt_gc()"
run dolt -u mike -p pwd sql -q "GRANT EXECUTE ON PROCEDURE mydb.dolt_gc TO neil@localhost"
[ $status -eq 1 ]
[[ $output =~ "command denied to user 'mike'@'localhost" ]] || false
}
@test "sql-procedure-privs: grant option at DB level works for non-admin procedures" {
dolt sql -q "GRANT GRANT OPTION ON mydb.* TO mike@localhost"
mike_blocked_check "dolt_branch('br1')"
run dolt -u neil -p pwd sql -q "call dolt_branch('br1')"
[ $status -eq 1 ]
[[ $output =~ "Access denied for user 'neil'@'localhost' to database 'mydb'" ]] || false
dolt -u mike -p pwd sql -q "GRANT EXECUTE ON PROCEDURE mydb.dolt_branch TO neil@localhost"
dolt -u neil -p pwd sql -q "call dolt_branch('br1')"
}
@test "sql-procedure-privs: non-dolt procedure execution" {
dolt sql <<SQL
DELIMITER //
CREATE PROCEDURE user_proc()
BEGIN
SELECT 'hello cruel world';
END//
SQL
dolt sql -q "GRANT GRANT OPTION ON mydb.* TO mike@localhost"
mike_blocked_check "user_proc()"
run dolt -u neil -p pwd sql -q "call user_proc()"
[ $status -eq 1 ]
[[ $output =~ "Access denied for user 'neil'@'localhost' to database 'mydb'" ]] || false
dolt -u mike -p pwd sql -q "GRANT EXECUTE ON PROCEDURE mydb.user_proc TO neil@localhost"
dolt -u neil -p pwd sql -q "call user_proc()"
}
# TODO - Alter Routine Grants can be created and revoked (tested in enginetests), but we can't
# actually alter any routines in a meaningful way, so until we can there is nothing to test.

View File

@@ -1,3 +0,0 @@
Create the ../*.pem files that are used by these tests.
Expects to be run from this directory like `go run .`.

View File

@@ -1,3 +0,0 @@
module github.com/dolthub/dolt/integration-tests/go-sql-server-driver/gencerts
go 1.19

View File

@@ -21,6 +21,7 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"path/filepath"
"math/big"
"net/url"
@@ -45,39 +46,38 @@ import (
// TODO: Further tests which should not verify? (SHA-1 signatures, expired
// roots or intermediates, wrong isCA, wrong key usage, etc.)
const RelPath = "../testdata"
func main() {
func GenerateX509Certs(dir string) error {
rsacerts, err := MakeRSACerts()
if err != nil {
panic(err)
return fmt.Errorf("could not make rsa certs: %w", err)
}
err = WriteRSACerts(rsacerts)
err = WriteRSACerts(dir, rsacerts)
if err != nil {
panic(err)
return fmt.Errorf("could not write rsa certs: %w", err)
}
edcerts, err := MakeEd25519Certs()
if err != nil {
panic(err)
return fmt.Errorf("could not make ed25519 certs: %w", err)
}
err = WriteEd25519Certs(edcerts)
err = WriteEd25519Certs(dir, edcerts)
if err != nil {
panic(err)
return fmt.Errorf("could not write ed25519 certs: %w", err)
}
return nil
}
func WriteRSACerts(rsacerts TestCerts) error {
err := os.WriteFile(filepath.Join(RelPath, "rsa_root.pem"), pem.EncodeToMemory(&pem.Block{
func WriteRSACerts(dir string, rsacerts TestCerts) error {
err := os.WriteFile(filepath.Join(dir, "rsa_root.pem"), pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: rsacerts.Root.Raw,
}), 0664)
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "rsa_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "rsa_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: rsacerts.Leaf.Raw,
}), pem.EncodeToMemory(&pem.Block{
@@ -87,7 +87,7 @@ func WriteRSACerts(rsacerts TestCerts) error {
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "rsa_key.pem"), pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "rsa_key.pem"), pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(rsacerts.LeafKey.(*rsa.PrivateKey)),
}), 0664)
@@ -95,7 +95,7 @@ func WriteRSACerts(rsacerts TestCerts) error {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "rsa_exp_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "rsa_exp_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: rsacerts.ExpiredLeaf.Raw,
}), pem.EncodeToMemory(&pem.Block{
@@ -105,7 +105,7 @@ func WriteRSACerts(rsacerts TestCerts) error {
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "rsa_exp_key.pem"), pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "rsa_exp_key.pem"), pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(rsacerts.ExpiredLeafKey.(*rsa.PrivateKey)),
}), 0664)
@@ -116,15 +116,15 @@ func WriteRSACerts(rsacerts TestCerts) error {
return nil
}
func WriteEd25519Certs(edcerts TestCerts) error {
err := os.WriteFile(filepath.Join(RelPath, "ed25519_root.pem"), pem.EncodeToMemory(&pem.Block{
func WriteEd25519Certs(dir string, edcerts TestCerts) error {
err := os.WriteFile(filepath.Join(dir, "ed25519_root.pem"), pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: edcerts.Root.Raw,
}), 0664)
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "ed25519_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "ed25519_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: edcerts.Leaf.Raw,
}), pem.EncodeToMemory(&pem.Block{
@@ -138,7 +138,7 @@ func WriteEd25519Certs(edcerts TestCerts) error {
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "ed25519_key.pem"), pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "ed25519_key.pem"), pem.EncodeToMemory(&pem.Block{
Type: "PRIVATE KEY",
Bytes: keybytes,
}), 0664)
@@ -146,7 +146,7 @@ func WriteEd25519Certs(edcerts TestCerts) error {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "ed25519_exp_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "ed25519_exp_chain.pem"), append(pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: edcerts.ExpiredLeaf.Raw,
}), pem.EncodeToMemory(&pem.Block{
@@ -160,7 +160,7 @@ func WriteEd25519Certs(edcerts TestCerts) error {
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(RelPath, "edcerts_exp_key.pem"), pem.EncodeToMemory(&pem.Block{
err = os.WriteFile(filepath.Join(dir, "edcerts_exp_key.pem"), pem.EncodeToMemory(&pem.Block{
Type: "PRIVATE KEY",
Bytes: keybytes,
}), 0664)

View File

@@ -1,3 +0,0 @@
Creates files with a jwks and a jwt that can be validated using the jwks.
Used in sql-server-jwt-auth.yaml.

View File

@@ -1,14 +0,0 @@
module github.com/dolthub/dolt/integration-tests/go-sql-server-driver/genjwt
go 1.19
require (
github.com/google/uuid v1.3.0
gopkg.in/square/go-jose.v2 v2.6.0
)
require (
github.com/google/go-cmp v0.5.9 // indirect
github.com/stretchr/testify v1.8.1 // indirect
golang.org/x/crypto v0.1.0 // indirect
)

View File

@@ -1,24 +0,0 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -29,42 +29,46 @@ import (
"gopkg.in/square/go-jose.v2/jwt"
)
const RelPath = "../testdata"
var sub = "test_user"
var iss = "dolthub.com"
var aud = "my_resource"
var onBehalfOf = "my_user"
func main() {
// Generates a JWKS and a JWT for authenticating against it. Outputs it into
// files `|dir|/token.jwt` and `|dir|/test_jwks.json`.
//
// These files are used by sql-server-jwt-auth.yaml, for example.
func GenerateTestJWTs(dir string) error {
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
fmt.Println(err)
return fmt.Errorf("could not generate rsa key: %w", err)
}
pubKey := privKey.Public()
kid, err := uuid.NewRandom()
if err != nil {
fmt.Println(err)
return fmt.Errorf("could not generate random uuid: %w", err)
}
err = writeJWKSToFile(pubKey, kid.String())
err = writeJWKSToFile(dir, pubKey, kid.String())
if err != nil {
fmt.Println(err)
return fmt.Errorf("could not write jwks to file: %w", err)
}
jwt, err := generateJWT(privKey, kid.String())
if err != nil {
fmt.Println(err)
return fmt.Errorf("could not generate jwt: %w", err)
}
err = ioutil.WriteFile(filepath.Join(RelPath, "token.jwt"), []byte(jwt), 0644)
err = ioutil.WriteFile(filepath.Join(dir, "token.jwt"), []byte(jwt), 0644)
if err != nil {
fmt.Println(err)
return fmt.Errorf("could not write jwt to file: %w", err)
}
return nil
}
func writeJWKSToFile(pubKey crypto.PublicKey, kid string) error {
func writeJWKSToFile(dir string, pubKey crypto.PublicKey, kid string) error {
jwk := jose.JSONWebKey{
KeyID: kid,
Key: pubKey,
@@ -78,7 +82,7 @@ func writeJWKSToFile(pubKey crypto.PublicKey, kid string) error {
return err
}
err = ioutil.WriteFile(filepath.Join(RelPath, "test_jwks.json"), jwksjson, 0644)
err = ioutil.WriteFile(filepath.Join(dir, "test_jwks.json"), jwksjson, 0644)
if err != nil {
return err
}

View File

@@ -4,8 +4,10 @@ go 1.21
require (
github.com/dolthub/dolt/go v0.40.4
github.com/google/uuid v1.3.0
github.com/stretchr/testify v1.8.2
golang.org/x/sync v0.3.0
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/yaml.v3 v3.0.1
)
@@ -14,7 +16,8 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-sql-driver/mysql v1.7.2-0.20230713085235-0b18dac46f7f // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
golang.org/x/sys v0.12.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/sys v0.13.0 // indirect
)
replace github.com/dolthub/dolt/go => ../../go/

View File

@@ -5,6 +5,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-sql-driver/mysql v1.7.2-0.20230713085235-0b18dac46f7f h1:4+t8Qb99xUG/Ea00cQAiQl+gsjpK8ZYtAO8E76gRzQI=
github.com/go-sql-driver/mysql v1.7.2-0.20230713085235-0b18dac46f7f/go.mod h1:6gYm/zDt3ahdnMVTPeT/LfoBFsws1qZm5yI6FmVjB14=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -14,12 +18,16 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -14,7 +14,43 @@
package main
import "testing"
import (
"flag"
"log"
"os"
"testing"
)
// We generate various TLS keys and certificates and some JWKS/JWT material
// which the tests reference. We do this once for the test run, because it can
// be expensive, and we expose the location of the generated files through an
// environment variable. dtestutils/sql_server_driver interpolates that
// environment variable into a few fields in the test definition.
//
// It's good enough for now, and it keeps us from checking in certificates or
// JWT which will expire at some point in the future.
func TestMain(m *testing.M) {
old := os.Getenv("TESTGENDIR")
defer func() {
os.Setenv("TESTGENDIR", old)
}()
gendir, err := os.MkdirTemp(os.TempDir(), "go-sql-server-driver-gen-*")
if err != nil {
log.Fatalf("could not create temp dir: %v", err)
}
defer os.RemoveAll(gendir)
err = GenerateTestJWTs(gendir)
if err != nil {
log.Fatalf("%v", err)
}
err = GenerateX509Certs(gendir)
if err != nil {
log.Fatalf("%v", err)
}
os.Setenv("TESTGENDIR", gendir)
flag.Parse()
os.Exit(m.Run())
}
func TestConfig(t *testing.T) {
RunTestsFile(t, "tests/sql-server-config.yaml")

View File

@@ -1,26 +0,0 @@
-----BEGIN CERTIFICATE-----
MIICOzCCAe2gAwIBAgIBAzAFBgMrZXAwWzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
DURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
ZWQyNTUxOSBJbnRlcm1lZGlhdGUwHhcNMjIxMDI2MjEwMTQyWhcNMzIxMDIzMjEw
MTQyWjBTMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNRG9sdEh1YiwgSW5jLjEsMCoG
A1UEAxMjZG9sdCBpbnRlZ3JhdGlvbiB0ZXN0cyBlZDI1NTE5IExlYWYwKjAFBgMr
ZXADIQBq59gmS/TqiLFwMpug/QSxGiq/zzMPQBWOe+l0o8tbkKOB3TCB2jAOBgNV
HQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAf
BgNVHSMEGDAWgBS8Fugt5Yjb7mabErluXQOwId4DfTCBgwYDVR0RBHwweoIrZG9s
dC1pbnN0YW5jZS5kb2x0LWludGVncmF0aW9uLXRlc3QuZXhhbXBsZYZLc3BpZmZl
Oi8vZG9sdC1pbnRlZ3JhdGlvbi10ZXN0cy5kZXYudHJ1c3QuZG9sdGh1Yi5jb20u
ZXhhbXBsZS9kb2x0LWluc3RhbmNlMAUGAytlcANBAF7vtPl1usXT+WgeD72BEdYB
2E8PbORVYT05SrjRYRcdHNegWQUN2fhKE/+WNeeOVfGQBcwMlObof6deraq9uw8=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBwDCCAXKgAwIBAgIBAjAFBgMrZXAwUzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
DURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
ZWQyNTUxOSBSb290MB4XDTIyMTAyNjIxMDE0MloXDTMyMTAyMzIxMDE0MlowWzEL
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2Rv
bHQgaW50ZWdyYXRpb24gdGVzdHMgZWQyNTUxOSBJbnRlcm1lZGlhdGUwKjAFBgMr
ZXADIQC63kDzz+nGeTtt2CcA2M3Q1R8YephuuUzxlvEB+cgj5KNjMGEwDgYDVR0P
AQH/BAQDAgIEMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLwW6C3liNvuZpsS
uW5dA7Ah3gN9MB8GA1UdIwQYMBaAFOE9s81S97V1S09D3k0obt02yhrpMAUGAytl
cANBAJkX45OPKCFrJ2EmgXntZQFznQuUriA68Pxaxxzy3/W1jDtxf2cccDxtS1TJ
uPGtJ5Ri8dbk+5FgK3GQFQweDwA=
-----END CERTIFICATE-----

View File

@@ -1,27 +0,0 @@
-----BEGIN CERTIFICATE-----
MIICQzCCAfWgAwIBAgIBBDAFBgMrZXAwWzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
DURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
ZWQyNTUxOSBJbnRlcm1lZGlhdGUwHhcNMjIxMDI2MjEwMTQyWhcNMjIxMDI3MDkw
MTQyWjBbMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNRG9sdEh1YiwgSW5jLjE0MDIG
A1UEAxMrZG9sdCBpbnRlZ3JhdGlvbiB0ZXN0cyBlZDI1NTE5IEV4cGlyZWQgTGVh
ZjAqMAUGAytlcAMhAF6ENDzBPmj6JXxySz9SBR4eh6pOI+IEeepQuqa0Pvn4o4Hd
MIHaMA4GA1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMB
Af8EAjAAMB8GA1UdIwQYMBaAFLwW6C3liNvuZpsSuW5dA7Ah3gN9MIGDBgNVHREE
fDB6gitkb2x0LWluc3RhbmNlLmRvbHQtaW50ZWdyYXRpb24tdGVzdC5leGFtcGxl
hktzcGlmZmU6Ly9kb2x0LWludGVncmF0aW9uLXRlc3RzLmRldi50cnVzdC5kb2x0
aHViLmNvbS5leGFtcGxlL2RvbHQtaW5zdGFuY2UwBQYDK2VwA0EAsJcZ7AAXXkmW
78cvfT7aa++y/t++altVJs0Qy8zZcP4XBBuPpdzxrQRcILQ2lyrpER8wrSB67UH6
LSeDh4FuCA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBwDCCAXKgAwIBAgIBAjAFBgMrZXAwUzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
DURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
ZWQyNTUxOSBSb290MB4XDTIyMTAyNjIxMDE0MloXDTMyMTAyMzIxMDE0MlowWzEL
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xNDAyBgNVBAMTK2Rv
bHQgaW50ZWdyYXRpb24gdGVzdHMgZWQyNTUxOSBJbnRlcm1lZGlhdGUwKjAFBgMr
ZXADIQC63kDzz+nGeTtt2CcA2M3Q1R8YephuuUzxlvEB+cgj5KNjMGEwDgYDVR0P
AQH/BAQDAgIEMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLwW6C3liNvuZpsS
uW5dA7Ah3gN9MB8GA1UdIwQYMBaAFOE9s81S97V1S09D3k0obt02yhrpMAUGAytl
cANBAJkX45OPKCFrJ2EmgXntZQFznQuUriA68Pxaxxzy3/W1jDtxf2cccDxtS1TJ
uPGtJ5Ri8dbk+5FgK3GQFQweDwA=
-----END CERTIFICATE-----

View File

@@ -1,3 +0,0 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIIq53ao+dZ09B33ER3RWNEbIhuQAOP/aza1sLDcCaBwN
-----END PRIVATE KEY-----

View File

@@ -1,11 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBlzCCAUmgAwIBAgIBATAFBgMrZXAwUzELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
DURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2RvbHQgaW50ZWdyYXRpb24gdGVzdHMg
ZWQyNTUxOSBSb290MB4XDTIyMTAyNjIxMDE0MloXDTMyMTAyMzIxMDE0MlowUzEL
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xLDAqBgNVBAMTI2Rv
bHQgaW50ZWdyYXRpb24gdGVzdHMgZWQyNTUxOSBSb290MCowBQYDK2VwAyEAUSTT
dZ6hXoZFVLDT4li0j/4K0//gRILcsNnPeTXeENSjQjBAMA4GA1UdDwEB/wQEAwIC
BDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBThPbPNUve1dUtPQ95NKG7dNsoa
6TAFBgMrZXADQQCS//dI2SsZnwaLk2I4m9WCHihUyZ2wWeDonwsPXkBtNBxJZnJb
tw0xf6bL+3opXeQfVTkn/BePZ8s4hbeBK9AO
-----END CERTIFICATE-----

View File

@@ -1,3 +0,0 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIIcPLEb34wrHmDff8cr7jjLaaaRyWEd+kuYw2h1GRA9U
-----END PRIVATE KEY-----

View File

@@ -1,21 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDbDCCAlSgAwIBAgIUdWEanf/1+cmS33nZDPY+gkQwS+gwDQYJKoZIhvcNAQEL
BQAwTjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4xJzAlBgNV
BAMTHnRlc3RkYXRhIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0yMjEwMjYyMDM1
MDBaFw0yNzEwMjUyMDM1MDBaME4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0
SHViLCBJbmMuMScwJQYDVQQDEx50ZXN0ZGF0YSBDZXJ0aWZpY2F0ZSBBdXRob3Jp
dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLAQ88jtxKIH0Uc0Yp
oUmM0Bx3/fBqgbYAGJ1cxtkXahhGp94ICe0gmASnbPuAY22X0zf55C94semPNNgb
xV/FHftvyi720z3wwOk8twa8I4vjb1mnxlPZzS2Xd1pb4KnUtjOemGfZOn6OWbXF
ukf5uNDKUZcFPPjaiAnQ+kK6vjYWZjY6Hn4KVAjBRylQj86hzgF0cc7B4WOX3L6L
ahY56urFElKnFh8vCydSfyZqtz56ng3Gc83PBIEkTTgQVwFJkx+Azh73NaTGwXcv
3Wj4D+TzF2T0JsHe6s1CWyoHxvccwoUdAv8HGzzHVcm+81KMdy9r9e7R3kyu9HSK
D3sBAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
A1UdDgQWBBRzOWBY5hQAM5obC3y+nbHKnvQtmzANBgkqhkiG9w0BAQsFAAOCAQEA
yKsw7CLYQQ2i9jzislIUF0pMW03rLTBPSyv78mhUrfaL2TncdJAPTMdR5KaFTKSy
2AzuYkIN9gU0blk73sxbtdNyZlpP0MQHRuRkgpuXii0tWQ0f6uhLaZRJvLm4Hjsj
Sma8ydO3/7FvdTby6Uv1Rivd53BGfVAcw8W1oC+8KfrDhUsWzqcDH6Aiszz0utKr
XAqiOdNUSy2riyxc3s9RH2j20BNj6vWkz8ZoRdBa2pf/oRtYF2ZJjCZq7eH5hlSj
/Am5Yw9Cc0/48Tm58e4V2SDHys9ld8EBKOMlo8djk3q0LxGtZ41O1hr4iaHTkWyl
2wYWEa395xncUBUqvCpKyA==
-----END CERTIFICATE-----

View File

@@ -1,67 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIGETCCA/mgAwIBAgIBAzANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjE5MDcGA1UEAxMwZG9sdCBpbnRlZ3JhdGlv
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgSW50ZXJtZWRpYXRlMB4XDTIyMTAyNjIxMDEz
N1oXDTMyMTAyMzIxMDEzN1owWDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRI
dWIsIEluYy4xMTAvBgNVBAMTKGRvbHQgaW50ZWdyYXRpb24gdGVzdHMgUlNBIDQw
OTYtYml0IExlYWYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDVFmdv
tpYlR7XPQlnSbza5io/JAmGlOX30InseMZkddyURv+9rs8FY5PuEZyNH5VcJ/w7n
xP3MwD8Uctojnl2FoEZjXreIIefPmyeLRgSXBTNE+iioTXn7B2sfPXFr4GeRborH
E8GZJOgztlWrBkPinDn9dcY2tJzlh0HWIRedeohVlqs45Uy7u1UkpGyVZPobTXm3
9S7gSyeGRnAgaIRPfVZdTkUHCY4x+Hn94Uj5U7TAH6QYLzeKGmFIRoQxgabPaZw+
Ug0XAVDIoYmlOPgphQjkLpjLDEt7nhxnEvMG68ZrR6c7WGOS+eC6HasAdgnxWmRo
HRSMdKKyDh5TwVbRGxlHAhcFPEYqDwUXb+H781Cia4MAo4eUiblBdEtQs97ymeRt
HBoSU2ORQIOYx568SZwKDx3/HwyHd/5jZ0oM0kMoVJhvjjiF30su1Tuku6FT0Uu4
NsIPf0Sq3BHERPz244t9yyFobbT8WdtNHGOOjEUWSP/ho/9hez6rboH2No1K1RuN
2wQlQ5mNIQyPwkrACOdhYvQPCs0gp4Y3wgdrQ7jqccObXy5hokRC92WfgwCFVIEl
JT9lOoBElH5IS2BaP+4k9k/A/LXQ87OmqRvZlKsaMX9LdUZoLFS3EzSDFdnWdgtf
QJIDX0T1l8LQaNhbcbfNVrRbD8+BSB3tdllv9wIDAQABo4HdMIHaMA4GA1UdDwEB
/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMB8GA1Ud
IwQYMBaAFFVLRPReehc5TLVcwASWmZzRgdVfMIGDBgNVHREEfDB6gitkb2x0LWlu
c3RhbmNlLmRvbHQtaW50ZWdyYXRpb24tdGVzdC5leGFtcGxlhktzcGlmZmU6Ly9k
b2x0LWludGVncmF0aW9uLXRlc3RzLmRldi50cnVzdC5kb2x0aHViLmNvbS5leGFt
cGxlL2RvbHQtaW5zdGFuY2UwDQYJKoZIhvcNAQELBQADggIBAKeAj0ALw5Qbc45u
kjGx856yoFdunVXRujz5/v37AuGEEV16DMnONroHD2DSss2vxGEQGEkvypgWOLE6
L5QPqH93W+deVrVeHS1FNWbEWGVEJEtIZOhZsTCU9bIj+WtgcHDCk7DHE2t2DBeh
QH4aDPfkPL0vOmD/H6Mq0dbPPJW6FuS0tIlCXorKHM98lqmOWcxDnbGl1aH4uITo
GB6dltX2YU9gM5G15Np9Nng2d1owTbOHt5sMvtKxCZeb+AYZvTGCFq8tRTlGvxHZ
Xr39YmtGbplzkEq8EVEMUTYHse0cdsw2xxYkq9aqYegrBHHfNFybv2U6Rz+yxco5
p44NecwZgsSm4+ZEb6gHg9RSZ/egDKHFEkQgapjQcRrHxrNqUmn2/zMmEs9uJLYM
nYCxrSlGY+wULDk9wsAStrz5n0xhsl0mE/CjRcwtiFyNW5QqBD//d4bacfFhPMA1
1Ce2mcha+PZhLC43zxuN5DMFNJWEiOzUpH72CwQ6UpnLZnL2Kkhff3SEuPAn83s/
8zHXEWYbvLlBVTZjTxJ5+4YqyEQaf4SHZoIJXLwUbp5ZSz1EIP6qlgqGtxHYBePo
KUtkFjf5aWt1nQ/Yu1sIBMa4i+xfQey/zwQS1smLrwlzh6QqvykINQCUUMzN2rQZ
kAddIrPAEFcBeHRQs93N+IqisBpA
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFljCCA36gAwIBAgIBAjANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjExMC8GA1UEAxMoZG9sdCBpbnRlZ3JhdGlv
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgUm9vdDAeFw0yMjEwMjYyMTAxMzdaFw0zMjEw
MjMyMTAxMzdaMGAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMu
MTkwNwYDVQQDEzBkb2x0IGludGVncmF0aW9uIHRlc3RzIFJTQSA0MDk2LWJpdCBJ
bnRlcm1lZGlhdGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC47wob
mq2NLhf6EJdkWeli1E0ViOVpGOM05FNtcrYuX0A/y5VQtmavwchdn02Fa0ueEu3I
JU9gqYu8Ubpa9fE3xWGA97543yx8Y3/blL6EGD4yWzf+iMb3R2qOe/omTOPllk3g
D4pghlaSQe5ZRzzvVfUBH1Qj6WTSHcUoRCKUYaaBFpLxapjAS90Vf6PL1GQabdaq
JN/BUbC1dR/4Z+brelUy9NSvAXg7/HtndiRMl3sOU8wh6NXVpPYta4xhkGr230Cc
t1kOMPRSq4px9DOx3vZJCyOUy1Ro2CgAU2px2PlBG+95/TrqTgO6G8DxGYSV2bDY
ZpHX1MyRoaYAuRvvuffXt17CCHbiD5i9VYy6F8WBLd16l96RsgU6sREOryYoXFZ0
K9oUA69PEabq7H6Tlg6sQgaQ6u1No/H4H2eYTtmhdcOnaRWPq5i/x7EZ/cnwp9zc
7P0afMTrKl1ezXxXOVRVpmk7SD53lqXYXNJpy1pf2wTvPnl4mq1aCnQtHPmEFbuJ
LzejLBKrelfm7HMmQfxiKYKCHcth96eclP9GXhmA6XkU2BqbXGDEetDv+YiRdaQl
wKdfRhCZPBVBcy7DnqJoa0lss5l0e3lTu2+wW5Znb3FzXdJSnl1e6togF9IgJAkH
n4Fgs4/33TXQi0XPmy/iTbiUR8Ht8HhGBO4A2QIDAQABo2MwYTAOBgNVHQ8BAf8E
BAMCAgQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVUtE9F56FzlMtVzABJaZ
nNGB1V8wHwYDVR0jBBgwFoAUmmgyBnblEP+8fE5WcVV1fDkenHswDQYJKoZIhvcN
AQELBQADggIBAIzbD8T6npRjMcTCHTfirl4r2eM9r6ANHIZgro5HK8EaTGaD2pQC
3nnh9dJfnw/bbIUG6yEacAAfp1Krba/f+z3B6PyIdbhGAkYaAZzyujzcyPZN3yx0
AIzZxwO4f9mpZ1Q95Xpn5ygozFKzZUg+AYW2qmyftDCVtHcZWBnKREgE64PMZ9b4
/sajWdmx9jdr6algdEUu4kIxGvAq5C2pgydh1mpVcx4Znvroczip+dlUAb5cudGP
krzCmdi9RxeGc/RIghNRNBtKVQtMh3nQwE0YOcIRY3T0WwCJHoRqCX9SoJvS/mYg
mpm4YxLf9NXxnhTCNTCgBZ+lYqqW4nt6msh10inYg/nSDgWoU50VC5WOQwmVbAXx
N4JrONvNXElWdEVkz8V2Lq1mwA6+4Mf1Rjau+j04z6bqZkdMYzCH0fG0to5B4fiM
+XfoFDgZfnymSuEPKjo4vsGLwwNAwfsVNIuiqEkJODKf31p9YNgNW23v6uKzV/GR
x6rKidp6XjfUkSXdmoPd4+qdhJLe+IQEVtoBUALlpGEYckin0L1/9Sl/GIucnkz3
bjq+NazgnPeRb2YdfiQBsY5C7b9x7bbRZdtskCtIjrdzvYr+Hil0xHDlqRSlOHZz
1snsRgG+DJF7rEPiXayz89JNrucWsrnyTYiQHANXWcwSKacILL1jneum
-----END CERTIFICATE-----

View File

@@ -1,67 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIGGTCCBAGgAwIBAgIBBDANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjE5MDcGA1UEAxMwZG9sdCBpbnRlZ3JhdGlv
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgSW50ZXJtZWRpYXRlMB4XDTIyMTAyNjIxMDEz
N1oXDTIyMTAyNzA5MDEzN1owYDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDURvbHRI
dWIsIEluYy4xOTA3BgNVBAMTMGRvbHQgaW50ZWdyYXRpb24gdGVzdHMgUlNBIDQw
OTYtYml0IEV4cGlyZWQgTGVhZjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
ggIBAN2eUhKl74hhAnLW4N5x3UHkiHkcoWzck6Qfg3u7d3OhtURYVMgUTynW/JKb
WfHBGT8yG77HT/GF0x+sV+IajZeZT1F4xou7sO9MFL7QDyEoaZo37dLkhr+mWkYb
YadcIRNAFY/vuI/xP5DMPVrZNzoDp6VHnW3NwlYWYlPLLkjIGcZafVkt3UfI0XzZ
4QhKtPDNopIt3lLmod5HLwcjiWIcMyjWJVue4kP71H9AA+7edCQ9kDmfO+v4JtUr
3AE1q+vwNQe8mXaAj8aRXJc2m6qmRfbeVdsI0YYdeDFyuhahb2hqaD/pAEbaNz0p
hy8AvPRY/oUwmF0oZ92YI94DUDgxneFyUp0I4+4ngUKUYwXHg0Elvw9w472rNBHB
qNAbPh1wi4bBKs4yVZaZB35ESWHl9gmFQMN5+dGgnB6kVgLZOmBKoaVwBjQ+SeSQ
QhDUnNph70y4LSNY+GEKWeYY6wZH0PbRa4PNhuXSs4aE9sHCbWKyRR1zlfoEloVr
xj1/Dx04nP0/tFcEcb0XMjOcTH/2484SJ9smBn8HZwybltIdZcpPNNVN98ZiPPK2
BXR9bOWYEJsjovGFVNSYQP0kURAez4qwFuppZ0WUHH5STJLBlkRhe3YQEG3RHojj
H8Tb1ynUJuLdM1dTCQe+Q9XVtUOXfxKBX+kJdphR/z3xS9FrAgMBAAGjgd0wgdow
DgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQC
MAAwHwYDVR0jBBgwFoAUVUtE9F56FzlMtVzABJaZnNGB1V8wgYMGA1UdEQR8MHqC
K2RvbHQtaW5zdGFuY2UuZG9sdC1pbnRlZ3JhdGlvbi10ZXN0LmV4YW1wbGWGS3Nw
aWZmZTovL2RvbHQtaW50ZWdyYXRpb24tdGVzdHMuZGV2LnRydXN0LmRvbHRodWIu
Y29tLmV4YW1wbGUvZG9sdC1pbnN0YW5jZTANBgkqhkiG9w0BAQsFAAOCAgEAQWgb
s9Y7kqnhxZr5UrZDqadVGCULU+M8+UqxLtT4IlS23z49uXcMgylX7Zzb1IwSHwN0
crZKzi/O0biTCFT0Kq3lTdLPrUUEt4qJXnMlLz5UYLs3ujSrDbCjNQMr7YM6/bjo
LPmThTlCUR1L611SJCYTg4zXlCOuTOqPLzrsyW7yoqk6++HIzf7x2CI5VAW7FrrV
J6QG80WKaDGRkVMINXGVxkcUEKQ69hc218jDA70J60PpiPHXmv/MzvzMc0dxnw1m
mB/4Cy9wsOP6M4YL8flmTbD6qLeMbmGVRDNJknQ+bo+RPAG6yO/TGXTYwSfPH9ki
wOE5OysRB8Rm9KGX+00W5OoRmRF8duj/b5EW+SnF6J3etKMbgzHUcKLrIB54ikMv
vNNcCbGS+Qb94cBfLXt9zK+ifywUjnT1au/ahlz5MonzVNmudeabn261A3UbXFg2
6dvlbPLbb/FDoDomr+uIQcipkjN2F3Pe7AYGW5JDr1+sqyfPx2BpYnASaKFZtVrL
FvlyYC/wtp29QW+zAn+csaS1r9WgUZfKoI29fIBE2Qw2367QpEQPgNdhZQEfXIwM
K10uZJPiam8buVt88PNKsc+wYFaOzHeGO3kmq86Em9j9SJa5EF+yU0Et+I3z7hna
xSEruA+hC+ccBaicl2rxNeiml7xbOTiuQD6Okx0=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFljCCA36gAwIBAgIBAjANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjExMC8GA1UEAxMoZG9sdCBpbnRlZ3JhdGlv
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgUm9vdDAeFw0yMjEwMjYyMTAxMzdaFw0zMjEw
MjMyMTAxMzdaMGAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMu
MTkwNwYDVQQDEzBkb2x0IGludGVncmF0aW9uIHRlc3RzIFJTQSA0MDk2LWJpdCBJ
bnRlcm1lZGlhdGUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC47wob
mq2NLhf6EJdkWeli1E0ViOVpGOM05FNtcrYuX0A/y5VQtmavwchdn02Fa0ueEu3I
JU9gqYu8Ubpa9fE3xWGA97543yx8Y3/blL6EGD4yWzf+iMb3R2qOe/omTOPllk3g
D4pghlaSQe5ZRzzvVfUBH1Qj6WTSHcUoRCKUYaaBFpLxapjAS90Vf6PL1GQabdaq
JN/BUbC1dR/4Z+brelUy9NSvAXg7/HtndiRMl3sOU8wh6NXVpPYta4xhkGr230Cc
t1kOMPRSq4px9DOx3vZJCyOUy1Ro2CgAU2px2PlBG+95/TrqTgO6G8DxGYSV2bDY
ZpHX1MyRoaYAuRvvuffXt17CCHbiD5i9VYy6F8WBLd16l96RsgU6sREOryYoXFZ0
K9oUA69PEabq7H6Tlg6sQgaQ6u1No/H4H2eYTtmhdcOnaRWPq5i/x7EZ/cnwp9zc
7P0afMTrKl1ezXxXOVRVpmk7SD53lqXYXNJpy1pf2wTvPnl4mq1aCnQtHPmEFbuJ
LzejLBKrelfm7HMmQfxiKYKCHcth96eclP9GXhmA6XkU2BqbXGDEetDv+YiRdaQl
wKdfRhCZPBVBcy7DnqJoa0lss5l0e3lTu2+wW5Znb3FzXdJSnl1e6togF9IgJAkH
n4Fgs4/33TXQi0XPmy/iTbiUR8Ht8HhGBO4A2QIDAQABo2MwYTAOBgNVHQ8BAf8E
BAMCAgQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVUtE9F56FzlMtVzABJaZ
nNGB1V8wHwYDVR0jBBgwFoAUmmgyBnblEP+8fE5WcVV1fDkenHswDQYJKoZIhvcN
AQELBQADggIBAIzbD8T6npRjMcTCHTfirl4r2eM9r6ANHIZgro5HK8EaTGaD2pQC
3nnh9dJfnw/bbIUG6yEacAAfp1Krba/f+z3B6PyIdbhGAkYaAZzyujzcyPZN3yx0
AIzZxwO4f9mpZ1Q95Xpn5ygozFKzZUg+AYW2qmyftDCVtHcZWBnKREgE64PMZ9b4
/sajWdmx9jdr6algdEUu4kIxGvAq5C2pgydh1mpVcx4Znvroczip+dlUAb5cudGP
krzCmdi9RxeGc/RIghNRNBtKVQtMh3nQwE0YOcIRY3T0WwCJHoRqCX9SoJvS/mYg
mpm4YxLf9NXxnhTCNTCgBZ+lYqqW4nt6msh10inYg/nSDgWoU50VC5WOQwmVbAXx
N4JrONvNXElWdEVkz8V2Lq1mwA6+4Mf1Rjau+j04z6bqZkdMYzCH0fG0to5B4fiM
+XfoFDgZfnymSuEPKjo4vsGLwwNAwfsVNIuiqEkJODKf31p9YNgNW23v6uKzV/GR
x6rKidp6XjfUkSXdmoPd4+qdhJLe+IQEVtoBUALlpGEYckin0L1/9Sl/GIucnkz3
bjq+NazgnPeRb2YdfiQBsY5C7b9x7bbRZdtskCtIjrdzvYr+Hil0xHDlqRSlOHZz
1snsRgG+DJF7rEPiXayz89JNrucWsrnyTYiQHANXWcwSKacILL1jneum
-----END CERTIFICATE-----

View File

@@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEA3Z5SEqXviGECctbg3nHdQeSIeRyhbNyTpB+De7t3c6G1RFhU
yBRPKdb8kptZ8cEZPzIbvsdP8YXTH6xX4hqNl5lPUXjGi7uw70wUvtAPIShpmjft
0uSGv6ZaRhthp1whE0AVj++4j/E/kMw9Wtk3OgOnpUedbc3CVhZiU8suSMgZxlp9
WS3dR8jRfNnhCEq08M2iki3eUuah3kcvByOJYhwzKNYlW57iQ/vUf0AD7t50JD2Q
OZ876/gm1SvcATWr6/A1B7yZdoCPxpFclzabqqZF9t5V2wjRhh14MXK6FqFvaGpo
P+kARto3PSmHLwC89Fj+hTCYXShn3Zgj3gNQODGd4XJSnQjj7ieBQpRjBceDQSW/
D3Djvas0EcGo0Bs+HXCLhsEqzjJVlpkHfkRJYeX2CYVAw3n50aCcHqRWAtk6YEqh
pXAGND5J5JBCENSc2mHvTLgtI1j4YQpZ5hjrBkfQ9tFrg82G5dKzhoT2wcJtYrJF
HXOV+gSWhWvGPX8PHTic/T+0VwRxvRcyM5xMf/bjzhIn2yYGfwdnDJuW0h1lyk80
1U33xmI88rYFdH1s5ZgQmyOi8YVU1JhA/SRREB7PirAW6mlnRZQcflJMksGWRGF7
dhAQbdEeiOMfxNvXKdQm4t0zV1MJB75D1dW1Q5d/EoFf6Ql2mFH/PfFL0WsCAwEA
AQKCAgAZIAfiAVoEtirW5jGcRoB1Jfmq8WoDs4Yxhzka4AgM3fp1AyapgFPxRGRi
Iqax19iK551ppaMUmL10y88y3RvMYh8x92VbFi5bEt8POvtcIP7H8ytNS4dTVwLJ
C/WoSbOeQRewt9bOyuUP/3e9Qv8V3rA09seMWVV8+RCwu0pGChmR+VRYtfBuYQAP
DYyLqhyVaFrUA8s+ztLvJlbYkljS/Kt4J78YggzY9EYFHzbS7/lu2mPthHYArKOK
a2yH3pPdLeB1PhaP7sdeFcDPi+teD3fDIzXMnVVTxSeJQ56BTlAZIGctR7c2USsO
DhU7aPQDJ8vDQd0kQp5z6vm2A94mKqk+iWDiJY8Q1LSbs/u1W+f3QYJq/s8eahhQ
4pbyb6rK8zyqynRTiNBW0al0ORdwzYLXsRgyn+DJSD+Yd84Iv9jnALVcYnGN5omd
Km5wIBIuu7OpWzxE+aY4svV3KrWQPgDzL6iTHRc4WjldBR0LUBE7N9lvs3mu0WnW
lgcGPuCfCo3DH1+j4YST3YmHq2viznYWJRGXZS15wcgopyAaWK9frMMmPWG4DGhD
IhLJPdueth/TK9rYBe8TBV64CJrBzCFmYitBYWNfZo0J59Lu0n1ubWTy4a5AO0sa
Z04D1YkAwNRaWODs7dyjl2LWJVxB8bmj+RwWo0ITASY+WE11wQKCAQEA/ols6B6O
qP1cWz37f2wghLw6T/pXkmOidTdUbquZe7hr8wDtB+D56Diox3K0pMXKTsECIl7x
rvt5YpQaNNCLikO532y7kynN5gEveIy5/hPjEQd3F+2hqF0wknSCMfqr1OsBeaRR
RTEFpMYRZ2KDpjpuCMOb67cn/lkZHlIA9SDAFo+8xecLqgoOEzy8S/D86JCVndK9
bbK2l2eoX6+FnnAdeZP6oTe2/WErPeU+moLS2monwL1F/Rpj5sPkwOlsQESxjxNh
jto3/IZL/to3bdc0QHuiFEdukKHQXmvw9N7mXqoX7OimlzwY4K9uVP5mm24mPhlQ
xzUYxOTK/ub0EQKCAQEA3uRz5fE64Af6NERdu5vRxcxck7H1FnOFxcbjH90ijJk+
EL74ku+gbhaaF9wc/DWXAa9fCq5sJfmMyOxcHA3jbprm3OU3Uvl3xqAYY94LtXKc
kNcfMNVydq5D7d0vhk1ZStISQ5iE0ooyi7k/+DpM9NUxrdMcVvc0UifsV3Wa8tnU
25kmY4SNH0O75cwUHrBVAyGEAjoWY5tqvub7hAarxWUoqScxnuaZsQ/kDiUyEvvV
E0lguQiuMV007uVx/pAB5pBGOmKTpn+FA/d7RZEjHdmhbmgUQTUM+wyLyCaXnyML
7GNBa82gKwkt2SecoB+PqxyCmSKh7b900/fxTwP5uwKCAQEAjVxrNHqMBkQ3f6Dr
xPebE7ypsfM5oV5eQjUJWjFJG1Hv+dRAz8hdYl/dNG967au/UH+WnNoX5XNaFapk
54IOjSpR10i+39sus5di8tNNFFOdHr8DoDFkP2oJ6Lx19iXeOnCNsC3WyNOR69Ho
pn8q2C3JIGrqdD2TI4n+Dj3CtGCM7brEPzCy5KuS55IqjQ54lvx9a1o3w+2lxG5Y
L1P+pGBlrjjFz7VulkfZyRVA8HTJf23HSB6V+Rwn8WhH6e95JDRCXFCKNNjykPdQ
y2gLsp/7L/i5qgOF3yNO4rGV75i/XkGe6f7HTmSc+GPVpbRn7dh9uq06lHfjmq3q
IyjG0QKCAQA1xgHcEMW2dNY9M901PNNwF+yhyUYqw3Ybj+8NqekC6JmbqqcHs/4N
cTB93yGzCy7CPk/8oLYAt2LqunNCZWtgLMjTtA0T3JGz/r3DojdK8DqriurAek5i
KYrD4R/tE84eCe5UFoC81pB5Oxkextn4G1Mf75WfuYYK6AzR0NKwEjOTQzCKw9jU
GSBEwWZ9POdVmQljDCaIo18ubUVyxbU9KzmTDZygDFw55m6LpxecktsGCyblnR2B
VU5G8F3/HzkGKfp3bX7XpV4u5c78qpZBRlb27u8sCCN4kb9La1wbDXZ78jo/St2A
ZHeGM1NLIjIkAv3S+hL867rAxMmX2YqvAoIBABYk3QFqAJ1D6oM0bKlwzFyxY45b
VyKW7EhyYdiShOTx5dPJ39phEuFd9ShWMVoJZ1t+PGlC4PDxhm3p6w0XxQD8maNk
o1R5lRcthYdiBOdVgE/LmGnTi88VbFsfQY1GzypYcej+7ioGapQwmWAhMxsIVoQW
wfwgP28Ju0Lt7ocu2iqlYWs1QIdkH/eBMRgU48OqiyhOa3LuAD13OZa5fWWshce7
R0cwnsavav+7SJ+9Y002DdpCKDuNUBk/7rnV7aJzmvTvI4Vtg1kMFF7kqdJDyyIZ
Fvwp4QXkmCpoxIR5Lg3qP8YkBR1KnFnjUiagMKkcTNUvpy02sRlXKiuZIQ4=
-----END RSA PRIVATE KEY-----

View File

@@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKwIBAAKCAgEA1RZnb7aWJUe1z0JZ0m82uYqPyQJhpTl99CJ7HjGZHXclEb/v
a7PBWOT7hGcjR+VXCf8O58T9zMA/FHLaI55dhaBGY163iCHnz5sni0YElwUzRPoo
qE15+wdrHz1xa+BnkW6KxxPBmSToM7ZVqwZD4pw5/XXGNrSc5YdB1iEXnXqIVZar
OOVMu7tVJKRslWT6G015t/Uu4EsnhkZwIGiET31WXU5FBwmOMfh5/eFI+VO0wB+k
GC83ihphSEaEMYGmz2mcPlINFwFQyKGJpTj4KYUI5C6YywxLe54cZxLzBuvGa0en
O1hjkvnguh2rAHYJ8VpkaB0UjHSisg4eU8FW0RsZRwIXBTxGKg8FF2/h+/NQomuD
AKOHlIm5QXRLULPe8pnkbRwaElNjkUCDmMeevEmcCg8d/x8Mh3f+Y2dKDNJDKFSY
b444hd9LLtU7pLuhU9FLuDbCD39EqtwRxET89uOLfcshaG20/FnbTRxjjoxFFkj/
4aP/YXs+q26B9jaNStUbjdsEJUOZjSEMj8JKwAjnYWL0DwrNIKeGN8IHa0O46nHD
m18uYaJEQvdln4MAhVSBJSU/ZTqARJR+SEtgWj/uJPZPwPy10POzpqkb2ZSrGjF/
S3VGaCxUtxM0gxXZ1nYLX0CSA19E9ZfC0GjYW3G3zVa0Ww/PgUgd7XZZb/cCAwEA
AQKCAgEAsczL6i8UAW9giNv4Ttp1c0Pmzfaqu07JhhfvWzZPaIKt762TfO60ehQ2
ujfE/Iykn7avHT0F+P7Ao7NhyS6vInylvuydf32rC1OPH2sBEXJJYVjK7AQZsBh0
jdCa/0RzpqnyKkKV288r4VpXCSllI4Tv7kmDZso10F+X07AxIDnJ4ICjxgmuiUa6
uKRp5g4nkviGg7SVtJmBFU1Y6fHL1PfkRF62wjR67b9DK0/7r+7kdxrvtS5QzW9N
lR0h2J0yLwcUU/vq6DM00PPZAoASsbJLxPoMjABqS2+Wi3sIRdId8g4LwDz0eQL/
PMzcjrnvee/OsADz0G7SAng9Xe2JHdNUUULUwXArVc8rw0krsPW51nHt5RnJJf1O
UDzl90AgrHmpte3K9UAkVWpgpjlCpjNk32GZpP/qKuvSS/tADqx+CI4Nz727WkTn
j4gCg0mdMqwNZL3ZJaVAFR1AAPptusMsIeZpuCePcveQq0oTYTj5iXeZN9ly62eU
m5eloEucFmBQgW1CFl3RiOO220Q1Z7DvHSvIEquMB6FdY9UL4HFfvmPadKE1Yt91
Ekd3JPJ7Imw2kZOr0RxSBknlho7mnIs4OmFhXs1Fi3vZ8E67dLzxYkjYE5WlXwIk
330pCMI/8S1PWkZpCtX+FnP88bsKbdD85ckWMOjewG5sR2l4TAECggEBANt5y1Ip
OWupcWXVlWXU6CkXbocv5SpVXUrJX9FECdXxG521Apn8d3ROF+J5Ef+NePRrIz+9
PKGnEj1DrPEEmiIBYDieND9L1fJ2F4M2zmvawCVv+I1YDMQYDdj8Pj2L+pK8oZRo
6G6edtFGKArlFNNc8n1aii5UicPJJfwb7GmUu3jIMzIHxkln0lt6I67/r1M9TA2Z
7sYqMCochdyQPva9LgMKkR51PW8EmF95GDV/ZVd/OwlVQL84nMV2atQq0NOb9v8/
WH5U4VzCTML9z+RVBm+XaR5Vd2sbVBRtDl7JU8ml7+GSfMpfRbrrCkwoZVFcDVFC
QgtSecNi0cPt03cCggEBAPiMdHtWZaSL/W3MfkosQ7PaDmdsEsR2lasdSL6esVTz
Tg0j+fzItPEgy79/xAZKAS8NDCgKKNs3YRcDe2TH/rxoG8HDrNN4ROMuF7c0ERwl
fxX6Hj/Ku0zCUJYAUMfC1JNKKhUSgLO39p+BMEDlK78cX5AV2UESir309DoyZJPR
br0Mp/1BCkf2fL8E5cdNCC2u9f3cA+Z9SSZe5zwS1VnicEugc2sXFpUltE9C48zq
BcGt+ipO5vd98rGFED95y3qkgl+S43QT7pcywnEVFuN8MA909ksQhxszuU+/NST7
QmaVhlbf1fvA6E+nRB4qeunfV4Di1hPJ96vFUuk7Z4ECggEBALBecnZG7H7I/niA
J3tamWoUC10VrwnS6ZJkutwW2fSTucaFJF8MFA5hhy4WbtE3JeD8i6n371ZURQL4
dGyGbzouct1svL20umwKXCC0lTL06f3ruggTnsaiMdaR9Y5OA4GRliMGHupt6zuH
Ljx7FL45biAv9uT+SGsOJLkw002RGQoZj5J/zudESDX8s9o4W/L/SjOP5OJYGrZs
j2HMhNyQ7/2/qxiXzFN/lNb8H+k1xAGJG68HVG2WF91SqMxwz7mNFvLNO9bhbOy2
syrq/foWHYLlYLLgqYNnxaxYavjevrdH4roZlrCl2Qo5QOAsgibcW1NWdG5Wy8So
j7rsTkcCggEBAOjiujXi40RFw0WBSYf4Z8t6cuqnEgKx1lVKECJEEYa2nxii8BbS
fPA+uYqKjcMSzn8mq5BMzLFy928X3SO2XVJt/iwVS7etxmZThvcrOyjzXVvbS0Kc
k9k9bULPsuEqBgKoiDvMZXl/0v7rjqoP0Wi1jjk0r6dQV13byodJNoJmx2suE62x
po99Pq/BSAfxfstHV8jwwVrTY5onbGUWhIA/Mtc51Uuvi4JenM9zrn0PfitW98Ny
wOl/QsrhEjNXzLfysxIYUTUvg+x6LETG7PZkI6goAKqfJujvEyDM2V/4aeQzEHgQ
LbFBtKsF4EafqSHAGn5yjQJnyMBTGPfeqIECggEBAMNuS+B2+XT9QuzA6erkQpNj
zEz2FmAwSdvoj9/Y4UnUfXb7xF98L84qyqEzoGMnrQLvYtL92tffKGObsjWqNWJs
NYhTpfvblAscpafkG3mUZM7u5xB/0VHfzRUvt1YQ1N0LItUzJL15GWEDGBJdh/Vi
3nyLciEETnCg+xzQQzkNAM3wg9ingBPjEdtOuZEgNmkR7ZUshhleD4dEp2yuzLG7
jFAPDHPpfkEGuy6WNuf/OKzjgBiwXsnFpjVOt/JUH03xXh9moRrwZGG4kuzOwRzk
i9k5kunsdiYsmcJ4RdtPx2TP/TgJoh6woTivFmhMVFsJ3jGFfWbzl94OxS5B9/U=
-----END RSA PRIVATE KEY-----

View File

@@ -1,32 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFbTCCA1WgAwIBAgIBATANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNRG9sdEh1YiwgSW5jLjExMC8GA1UEAxMoZG9sdCBpbnRlZ3JhdGlv
biB0ZXN0cyBSU0EgNDA5Ni1iaXQgUm9vdDAeFw0yMjEwMjYyMTAxMzdaFw0zMjEw
MjMyMTAxMzdaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMu
MTEwLwYDVQQDEyhkb2x0IGludGVncmF0aW9uIHRlc3RzIFJTQSA0MDk2LWJpdCBS
b290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqtesAvWsaJka6DLe
56o0b8xxuO5MIf7OC6SWX3GzFSNCW0rVBYZXK3v/Rv4hyni9DGgStQfvDWA+ZEvF
x1EtdtfRWx41RvtgrjQHj965Zh+fI41TSVyXYRL5v08tnXzu5Y1v51XlbCwmtXyP
cq1oYdNi/4XaVLEx+nYfZ2u3WXnzuHBcQc7n67zLdOUnHYszWpRrvqgmSCuqUz1x
Q431Kq4qORkRNUD/lvaXiE7Dgp3lJeYqcq27YX5AKODgBsPYCqD1iKiSphLWb84O
LUymJJDQ/ytYuMuwEK4Y0jsCLU1NnUI/Esdk4UeYQSdumNE70/9UuCGXSgpCOLt1
o/jKkEiagI4vQ8W3Daa+G3heNT2ukvK3JAnn2nCcH/El2/KsaJCX5idu7qjyxHMT
srkocltpEU3wLDQ8OuezcYUOig9fLX2lbZhNmEWdV5aXr2QrGXX4YmHw36awr1zm
6c33bjL6Hz3r9HgoBROIJDV4SvpCpnRRZJcx62sAQEUYjp0ASN2b95alPKYGFzti
m93kqVxMvb98b+L1XX/5LaU6Auqz6rWVJf8cBxgLlqope4IsSpP4jNUUw+m2l4Q6
dtaAg43UfHsVNuwgXYqvYrBJerSViPL1eDaxTLmjtyuWqHqb/MdGeU6EbTThieHq
cDEPLzk26VzxfVqgI4jL7ggJibECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgIEMA8G
A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJpoMgZ25RD/vHxOVnFVdXw5Hpx7MA0G
CSqGSIb3DQEBCwUAA4ICAQBgkaeyYLRUku3eyBSzG9QGjTsYFRAj5ov4gEj3SkdG
3vBJ+QttGIyDksn0p/kSq6O1pWDWSk2hUqC/AV3w1mGZt/BX7MrM27HlwGPD42zh
ndxx73axiSLVovTpDD4b5gihjx55GJzM7VTjFKTTTD8DBsZg+vVNtbMJHG+oa8Fe
xCGE6TH4q39Xzy0mYMBxj90MLQlUgak1I7juWZTFs3T6eCPB+10onwYDwAaSjxt/
abqKz3FSEdCclBZ/t6W7HRJNznKjtgL3xm7B1yRkXJuGVRzdPOa9GW3n1XOzMmFy
OqEHGhlGov/5O+jGoAGMD1EyYzhMxD71Y0hfNMt6XVV39tsbzwFYM7SkhJ2xgHzs
mdGxac9S+Hcyki0JHKIAAkswAG2POsrBxMLcUsvp4Gie0H0Gz53/wZEc5zC05pr3
VSjMXwnrCKTNpx6/hJNqf1rJLjBkJCiCEpeK7o4FjnVNGnIE0KroUsnWpRXwVQ0I
b/BJTA1BwA5AOvCFWIN+kVJTIl3FBQAKmsEn37VCPhMiOuy2HbVeQL1P+VveMb7X
9K7JoFb2OA1V7UL1iak9489xvXBGQzsMnJXHzo43T2eIODBXH3Cd+cFCoC2Zshm3
IjGR72bNhDgb9DvnIE0cUA4Umrs+yJkK2BJe294fi6nD80SDwbuxj6pW1jOSChf8
vQ==
-----END CERTIFICATE-----

View File

@@ -1 +0,0 @@
{"keys":[{"use":"sig","kty":"RSA","kid":"56321c6d-cdb9-4cb2-89f1-3c7cf6bb051d","alg":"RS256","n":"oKNBTaaBljGkmvns_lk4y8HWhquNFUXLcNofg56vgvmareivMABSq0miNg1G3Bgew6EGemTGSFNBCicXH1gGZ7tBnpxDj8Vpt5nYXEeywmiBKWxbVu8o9XamuLFXPOhK_BzRPKnoX-J-BoopxWNKUWwJfCM-69MgRXl-K5w4gjGrHePXtpDylT-zWMNCAvHDGd8kwVj0EKeI2-PjxBRzltCfEWPblLcif4pirBzsCho9fC133XlY3ixT162YYqt8M7Grr0zxRca8OhuVISrkJxh7p6M0eHpfUrAjuCGCqcIs0WLMZ-aZNzApbFebx5OeTBpe1Xi6oJxjuKozTy_W0w","e":"AQAB"}]}

View File

@@ -1 +0,0 @@
eyJhbGciOiJSUzI1NiIsImtpZCI6IjU2MzIxYzZkLWNkYjktNGNiMi04OWYxLTNjN2NmNmJiMDUxZCIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsibXlfcmVzb3VyY2UiXSwiZXhwIjoxNjk4Nzk0MTI0LCJpYXQiOjE2NjczNDQ1MjQsImlzcyI6ImRvbHRodWIuY29tIiwianRpIjoiMjc3YmIyNmMtMGMwOC00MjA3LWFiYjAtYzIyMGJmZTJlMDc2Iiwib25fYmVoYWxmX29mIjoibXlfdXNlciIsInN1YiI6InRlc3RfdXNlciJ9.OeKKNLQeQ6JWqFeu_lP9zGU1yCKvx5Xo868S82kEjJ_CGSj_K3Y3GG2rht8977SdULivRwxxtOM2LH7VJ0WkgmjCzERsA40z_SNwBwiWnIUzmT3uXsBsq31xgwG9xO-LrwMOJ-66Y1UgFjsmQmjV4Bw5vTkuGOkp87El-8MeRIC7eWzWotmcdSSWTCtFJumomHnDrTyYtvL0bLaqMCkUApdgAjB9At9q7a75kJ3kklTFmVJs9sO9cN3hsWTLmV-mM3PO6OKmNQbRqz92qTrTFIWLc92ooOVKrb6v5yY5GvH0z1bBpgNdSBbImS0FsrLhzBBDAJPK8uNoNuaLYVcvLQ

View File

@@ -21,11 +21,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/ed25519_root.pem
source_path: $TESTGENDIR/ed25519_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
@@ -49,11 +49,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310
@@ -88,11 +88,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
@@ -116,11 +116,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_exp_key.pem
source_path: $TESTGENDIR/rsa_exp_key.pem
- name: cert.pem
source_path: testdata/rsa_exp_chain.pem
source_path: $TESTGENDIR/rsa_exp_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310
@@ -156,11 +156,11 @@ tests:
tls_ca: root.pem
server_name_dns: ["does-not-match.dolt-instance.dolt-integration-test.example"]
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
@@ -184,11 +184,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310
@@ -224,11 +224,11 @@ tests:
tls_ca: root.pem
server_name_urls: ["spiffe://dolt-integration-tests.dev.trust.dolthub.com.example/dolt-instance/does-not-match"]
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
@@ -252,11 +252,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310
@@ -291,11 +291,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
@@ -319,11 +319,11 @@ tests:
tls_cert: cert.pem
tls_ca: root.pem
- name: key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: root.pem
source_path: testdata/rsa_root.pem
source_path: $TESTGENDIR/rsa_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310
@@ -400,11 +400,11 @@ tests:
server_name_urls: ["spiffe://dolt-integration-tests.dev.trust.dolthub.com.example/dolt-instance"]
server_name_dns: ["dolt-instance.dolt-integration-test.example"]
- name: key.pem
source_path: testdata/ed25519_key.pem
source_path: $TESTGENDIR/ed25519_key.pem
- name: cert.pem
source_path: testdata/ed25519_chain.pem
source_path: $TESTGENDIR/ed25519_chain.pem
- name: root.pem
source_path: testdata/ed25519_root.pem
source_path: $TESTGENDIR/ed25519_root.pem
server:
args: ["--config", "server.yaml"]
port: 3309
@@ -430,11 +430,11 @@ tests:
server_name_urls: ["spiffe://dolt-integration-tests.dev.trust.dolthub.com.example/dolt-instance"]
server_name_dns: ["dolt-instance.dolt-integration-test.example"]
- name: key.pem
source_path: testdata/ed25519_key.pem
source_path: $TESTGENDIR/ed25519_key.pem
- name: cert.pem
source_path: testdata/ed25519_chain.pem
source_path: $TESTGENDIR/ed25519_chain.pem
- name: root.pem
source_path: testdata/ed25519_root.pem
source_path: $TESTGENDIR/ed25519_root.pem
server:
args: ["--config", "server.yaml"]
port: 3310

View File

@@ -4,11 +4,11 @@ tests:
- name: repo1
with_files:
- name: chain_key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: chain_cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: test_jwks.json
source_path: testdata/test_jwks.json
source_path: $TESTGENDIR/test_jwks.json
- name: server.yaml
contents: |
listener:
@@ -33,7 +33,7 @@ tests:
- exec: "GRANT ALL ON *.* TO test_jwt_user@'127.0.0.1' WITH GRANT OPTION"
- on: repo1
user: test_jwt_user
password_file: testdata/token.jwt
password_file: $TESTGENDIR/token.jwt
driver_params:
allowCleartextPasswords: "true"
queries:

View File

@@ -16,9 +16,9 @@ tests:
- name: repo1
with_files:
- name: chain_key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: chain_cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: server.yaml
contents: |
listener:
@@ -33,9 +33,9 @@ tests:
- name: repo1
with_files:
- name: chain_key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: chain_cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: server.yaml
contents: |
listener:
@@ -53,9 +53,9 @@ tests:
- name: repo1
with_files:
- name: chain_key.pem
source_path: testdata/rsa_key.pem
source_path: $TESTGENDIR/rsa_key.pem
- name: chain_cert.pem
source_path: testdata/rsa_chain.pem
source_path: $TESTGENDIR/rsa_chain.pem
- name: server.yaml
contents: |
listener: