Merge remote-tracking branch 'origin/main' into andy/merge-artifacts

This commit is contained in:
Andy Arthur
2022-07-26 08:44:21 -07:00
186 changed files with 11020 additions and 10020 deletions

28
.github/README.md vendored Normal file
View File

@@ -0,0 +1,28 @@
# Dolt's GitHub Actions
This doc will provide context for the types of Workflows we use in this repository. This doc is not a comprehensive GitHub Actions tutorial. To familiarize yourself with GitHub Actions concepts and the terminology, please see the [documentation](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions).
Dolt uses GitHub Actions Workflows in four primary ways:
* To run continuous integration tests on pull requests and pushes to `main`
* To release and publish new Dolt assets
* To deploy various benchmarking jobs to contexts _other_ than GitHub Actions (like in a Kubernetes cluster, for example).
* To handle misc. [repository_dispatch](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#repository_dispatch) events triggered by external clients.
## Continuous Integration Workflows
Workflows prefixed with `ci-` are run on pull requests to `main`, though some run on pushes to `main` (after a pull request is merged). These workflows are synchronous and don't trigger any other workflows to run.
## Dolt Release Workflows
Workflows prefixed with `cd-` are used for releasing Dolt. Some of these workflows are asynchronous, meaning that they only perform part of a task before triggering the next part of a task to run in a _different_ workflow, sometimes in other GitHub repositories, using `repository_dispatch` events.
## Benchmarking Workflows
Benchmarking workflows are used as an interface for deploying benchmarking jobs to one of our Kubernetes Clusters. Workflows that deploy Kubernetes Jobs are prefixed with `k8s-` and can only be triggered with `repository_dispatch` events. Notice that benchmarking workflows, like `workflows/performance-benchmarks-email-report.yaml` for example, trigger these events using the `peter-evans/repository-dispatch@v1` Action.
These Kubernetes Jobs do not run on GitHub Actions Hosted Runners, so the workflow logs do not contain any information about the deployed Kubernetes Job or any errors it might have encountered. The workflow logs can only tell you if a Job was created successfully or not. To investigate an error or issue with a Job in our Kubernetes Cluster, see the debugging guide [here](https://github.com/dolthub/ld/blob/main/k8s/README.md#debug-performance-benchmarks-and-sql-correctness-jobs).
## Misc. Repository Dispatch Workflows
Some workflows perform single, common tasks and are triggered by `repository_dispatch` events. These include the `workflows/email-report.yaml` that emails the results of performance benchmarks to the team, or the `workflows/pull-report.yaml` that posts those same results to an open pull request. Workflows like these are triggered by external clients.

View File

@@ -9,6 +9,10 @@ inputs:
description: 'dolt version being benchmarked or ref of bats windows job'
required: true
default: ''
format:
description: 'dolt noms bin format'
required: true
default: '__LD_1__'
template:
description: 'email template'
required: false

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,8 @@
{
"Template": {
"TemplateName": "ImportBenchmarkingReleaseTemplate",
"SubjectPart": "Import Benchmarks for {{format}} {{version}}",
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{format}} {{version}} Import Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
"TextPart": "Dolt {{format}} {{version}} Import Results,\r\n{{results}}"
}
}

View File

@@ -4,6 +4,7 @@ const fs = require('fs');
const region = core.getInput('region');
const version = core.getInput('version');
const format = core.getInput('format');
const Template = core.getInput('template');
const dataFilePath = core.getInput('dataFile');
const CcAddresses = JSON.parse(core.getInput('ccAddresses'));
@@ -15,6 +16,7 @@ const data = dataFilePath ? fs.readFileSync(dataFilePath, { encoding: 'utf-8' })
const templated = {
version,
format,
results: data,
workflowURL,
};

View File

@@ -1,8 +1,152 @@
{
"name": "ses-email-action",
"version": "0.1.0",
"lockfileVersion": 1,
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "ses-email-action",
"version": "0.1.0",
"license": "ISC",
"dependencies": {
"@actions/core": "^1.2.6",
"aws-sdk": "^2.828.0"
}
},
"node_modules/@actions/core": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/@actions/core/-/core-1.2.6.tgz",
"integrity": "sha512-ZQYitnqiyBc3D+k7LsgSBmMDVkOVidaagDG7j3fOym77jNunWRuYx7VSHa9GNfFZh+zh61xsCjRj4JxMZlDqTA=="
},
"node_modules/aws-sdk": {
"version": "2.828.0",
"resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.828.0.tgz",
"integrity": "sha512-JoDujGdncSIF9ka+XFZjop/7G+fNGucwPwYj7OHYMmFIOV5p7YmqomdbVmH/vIzd988YZz8oLOinWc4jM6vvhg==",
"dependencies": {
"buffer": "4.9.2",
"events": "1.1.1",
"ieee754": "1.1.13",
"jmespath": "0.15.0",
"querystring": "0.2.0",
"sax": "1.2.1",
"url": "0.10.3",
"uuid": "3.3.2",
"xml2js": "0.4.19"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/buffer": {
"version": "4.9.2",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz",
"integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==",
"dependencies": {
"base64-js": "^1.0.2",
"ieee754": "^1.1.4",
"isarray": "^1.0.0"
}
},
"node_modules/events": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz",
"integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=",
"engines": {
"node": ">=0.4.x"
}
},
"node_modules/ieee754": {
"version": "1.1.13",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz",
"integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg=="
},
"node_modules/isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
},
"node_modules/jmespath": {
"version": "0.15.0",
"resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz",
"integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=",
"engines": {
"node": ">= 0.6.0"
}
},
"node_modules/punycode": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz",
"integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0="
},
"node_modules/querystring": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz",
"integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=",
"deprecated": "The querystring API is considered Legacy. new code should use the URLSearchParams API instead.",
"engines": {
"node": ">=0.4.x"
}
},
"node_modules/sax": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz",
"integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o="
},
"node_modules/url": {
"version": "0.10.3",
"resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz",
"integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=",
"dependencies": {
"punycode": "1.3.2",
"querystring": "0.2.0"
}
},
"node_modules/uuid": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz",
"integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==",
"deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.",
"bin": {
"uuid": "bin/uuid"
}
},
"node_modules/xml2js": {
"version": "0.4.19",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz",
"integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==",
"dependencies": {
"sax": ">=0.6.0",
"xmlbuilder": "~9.0.1"
}
},
"node_modules/xmlbuilder": {
"version": "9.0.7",
"resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz",
"integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=",
"engines": {
"node": ">=4.0"
}
}
},
"dependencies": {
"@actions/core": {
"version": "1.2.6",

View File

@@ -1,8 +1,8 @@
{
"Template": {
"TemplateName": "PerformanceBenchmarkingReleaseTemplate",
"SubjectPart": "Performance Benchmarks for {{version}}",
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{version}} Performance Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
"TextPart": "Dolt {{version}} Performance Results,\r\n{{results}}"
"SubjectPart": "Performance Benchmarks for {{format}} {{version}}",
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{format}} {{version}} Performance Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
"TextPart": "Dolt {{format}} {{version}} Performance Results,\r\n{{results}}"
}
}

View File

@@ -1,8 +1,8 @@
{
"Template": {
"TemplateName": "SqlCorrectnessReleaseTemplate",
"SubjectPart": "SQL Correctness for {{version}}",
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{version}} Performance Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
"TextPart": "Dolt {{version}} SQL correctness,\r\n{{results}}"
"SubjectPart": "SQL Correctness for {{format}} {{version}}",
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{format}} {{version}} Performance Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
"TextPart": "Dolt {{format}} {{version}} SQL correctness,\r\n{{results}}"
}
}

View File

@@ -64,7 +64,9 @@ echo '
"--fileNames=100k-sorted.csv",
"--fileNames=100k-random.csv",
"--fileNames=1m-sorted.csv",
"--fileNames=1m-random.csv"
"--fileNames=1m-random.csv",
"--fileNames=10m-sorted.csv",
"--fileNames=10m-random.csv"
]
}
],

View File

@@ -65,7 +65,9 @@ echo '
"--fileNames=100k-sorted.csv",
"--fileNames=100k-random.csv",
"--fileNames=1m-sorted.csv",
"--fileNames=1m-random.csv"
"--fileNames=1m-random.csv",
"--fileNames=10m-sorted.csv",
"--fileNames=10m-random.csv"
]
}
],

View File

@@ -3,7 +3,7 @@
set -e
if [ "$#" -lt 9 ]; then
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <format> <issueNumber> <initBigRepo> <nomsBinFormat>"
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <format> <issueNumber> <initBigRepo> <nomsBinFormat> <withTpcc>"
exit 1
fi
@@ -18,6 +18,7 @@ format="$8"
issueNumber="$9"
initBigRepo="${10}"
nomsBinFormat="${11}"
withTpcc="${12}"
tpccRegex="tpcc%"
if [ -n "$initBigRepo" ]; then
@@ -28,6 +29,10 @@ if [ -n "$nomsBinFormat" ]; then
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
fi
if [ -n "$withTpcc" ]; then
withTpcc="\"--withTpcc=$withTpcc\","
fi
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan', 'index_join_scan')"
medianLatencyChangeReadsQuery="select f.test_name as read_tests, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) < -0.1 then 1 when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) > 0.1 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
@@ -79,7 +84,7 @@ echo '
"--issue-number='$issueNumber'",
"--results-dir='$timePrefix'",
"--results-prefix='$actorPrefix'",
"--withTpcc=true",
'"$withTpcc"'
'"$initBigRepo"'
'"$nomsBinFormat"'
"--sysbenchQueries='"$medianLatencyChangeReadsQuery"'",

View File

@@ -2,8 +2,8 @@
set -e
if [ "$#" -lt 8 ]; then
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timeprefix> <actorprefix> <format> <initBigRepo> <nomsBinFormat>"
if [ "$#" -lt 9 ]; then
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timeprefix> <actorprefix> <format> <issueNumber> <initBigRepo> <nomsBinFormat> <withTpcc>"
exit 1
fi
@@ -15,8 +15,10 @@ toVersion="$5"
timeprefix="$6"
actorprefix="$7"
format="$8"
initBigRepo="$9"
nomsBinFormat="${10}"
issueNumber="$9"
initBigRepo="${10}"
nomsBinFormat="${11}"
withTpcc="${12}"
precision="1"
tpccRegex="tpcc%"
@@ -28,6 +30,10 @@ if [ -n "$nomsBinFormat" ]; then
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
fi
if [ -n "$withTpcc" ]; then
withTpcc="\"--withTpcc=$withTpcc\","
fi
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan', 'index_join_scan')"
medianLatencyMultiplierReadsQuery="select f.test_name as read_tests, f.server_name, f.server_version, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, t.server_name, t.server_version, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) < 1.0 then 1.0 else ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) end as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
meanMultiplierReadsQuery="select round(avg(multipliers), $precision) as reads_mean_multiplier from (select case when (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) < 1.0 then 1.0 else (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) end as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name)"
@@ -84,7 +90,7 @@ echo '
"--region=us-west-2",
"--results-dir='$timeprefix'",
"--results-prefix='$actorprefix'",
"--withTpcc=true",
'"$withTpcc"'
'"$initBigRepo"'
'"$nomsBinFormat"'
"--sysbenchQueries='"$medianLatencyMultiplierReadsQuery"'",

View File

@@ -12,6 +12,11 @@ if [ -z "$TEMPLATE_SCRIPT" ]; then
exit 1
fi
if [ -z "$NOMS_BIN_FORMAT" ]; then
echo "Must set NOMS_BIN_FORMAT"
exit 1
fi
if [ -z "$FROM_SERVER" ] || [ -z "$FROM_VERSION" ] || [ -z "$TO_SERVER" ] || [ -z "$TO_VERSION" ]; then
echo "Must set FROM_SERVER FROM_VERSION TO_SERVER and TO_VERSION"
exit 1
@@ -31,7 +36,6 @@ if [ "$NOMS_BIN_FORMAT" = "__DOLT_1__" ]; then
INIT_BIG_REPO="false"
fi
echo "Setting from $FROM_SERVER: $FROM_VERSION"
echo "Setting to $TO_SERVER: $TO_VERSION"
@@ -45,11 +49,16 @@ actorShort="$lowered-$short"
sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
timesuffix=`date +%s%N`
jobname="$actorShort-$timesuffix"
jobname="$actorShort"
if [ -n "$WITH_TPCC" ]; then
jobname="$jobname-tpcc"
fi
jobname="$jobname-$timesuffix"
timeprefix=$(date +%Y/%m/%d)
actorprefix="$MODE/$ACTOR/$actorShort"
actorprefix="$MODE/$ACTOR/$jobname/$NOMS_BIN_FORMAT"
format="markdown"
if [[ "$MODE" = "release" || "$MODE" = "nightly" ]]; then
@@ -72,7 +81,8 @@ source \
"$format" \
"$issuenumber" \
"$INIT_BIG_REPO" \
"$NOMS_BIN_FORMAT" > job.json
"$NOMS_BIN_FORMAT" \
"$WITH_TPCC" > job.json
out=$(KUBECONFIG="$KUBECONFIG" kubectl apply -f job.json || true)

View File

@@ -2,8 +2,8 @@
set -e
if [ "$#" -ne 6 ]; then
echo "Usage: ./get-dolt-correctness-job-json.sh <jobname> <fromVersion> <toVersion> <timeprefix> <actorprefix> <format>"
if [ "$#" -lt 6 ]; then
echo "Usage: ./get-dolt-correctness-job-json.sh <jobname> <fromVersion> <toVersion> <timeprefix> <actorprefix> <format> <nomsBinFormat>"
exit 1
fi
@@ -13,9 +13,14 @@ toVersion="$3"
timeprefix="$4"
actorprefix="$5"
format="$6"
nomsBinFormat="$7"
precision="2"
if [ -n "$nomsBinFormat" ]; then
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
fi
resultCountQuery="select result, count(*) as total from results where result != 'skipped' group by result;"
testCountQuery="select count(*) as total_tests from results where result != 'skipped';"
correctnessQuery="select ROUND(100.0 * (cast(ok_results.total as decimal) / (cast(all_results.total as decimal) + .000001)), $precision) as correctness_percentage from (select count(*) as total from results where result = 'ok') as ok_results join (select count(*) as total from results where result != 'skipped') as all_results"
@@ -40,13 +45,14 @@ echo '
"env": [
{ "name": "REPO_ACCESS_TOKEN", "value": "'$REPO_ACCESS_TOKEN'"},
{ "name": "ACTOR", "value": "'$ACTOR'"},
{ "name": "ACTOR_EMAIL", "value": "'$ACTOR_EMAIL'"}
{ "name": "DOLT_DEFAULT_BIN_FORMAT", "value": "'NOMS_BIN_FORMAT'"}
{ "name": "ACTOR_EMAIL", "value": "'$ACTOR_EMAIL'"},
{ "name": "DOLT_DEFAULT_BIN_FORMAT", "value": "'$NOMS_BIN_FORMAT'"}
],
"args": [
"--schema=/correctness.sql",
"--output='$format'",
"--version='$toVersion'",
'"$nomsBinFormat"'
"--bucket=sql-correctness-github-actions-results",
"--region=us-west-2",
"--results-dir='$timeprefix'",

View File

@@ -1,59 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -ne 6 ]; then
echo "Usage: ./get-dolt-regressions-job-json.sh <jobname> <fromVersion> <toVersion> <timeprefix> <actorprefix> <format>"
exit 1
fi
jobname="$1"
fromVersion="$2"
toVersion="$3"
timeprefix="$4"
actorprefix="$5"
format="$6"
regressionsQuery="select count(*) from from_results;"
echo '
{
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "'$jobname'",
"namespace": "sql-correctness"
},
"spec": {
"backoffLimit": 1,
"template": {
"spec": {
"serviceAccountName": "sql-correctness",
"containers": [
{
"name": "sql-correctness",
"image": "407903926827.dkr.ecr.us-west-2.amazonaws.com/liquidata/sql-correctness:latest",
"env": [
{ "name": "REPO_ACCESS_TOKEN", "value": "'$REPO_ACCESS_TOKEN'"},
{ "name": "ACTOR", "value": "'$ACTOR'"},
{ "name": "ACTOR_EMAIL", "value": "'$ACTOR_EMAIL'"}
],
"args": [
"--schema=/regressions.sql",
"--output='$format'",
"--from-version='$fromVersion'",
"--to-version='$toVersion'",
"--bucket=sql-correctness-github-actions-results",
"--region=us-west-2",
"--results-dir='$timeprefix'",
"--results-prefix='$actorprefix'",
"'"$regressionsQuery"'"
]
}
],
"restartPolicy": "Never"
}
}
}
}
'

View File

@@ -12,6 +12,11 @@ if [ -z "$TEMPLATE_SCRIPT" ]; then
exit 1
fi
if [ -z "$NOMS_BIN_FORMAT" ]; then
echo "Must set NOMS_BIN_FORMAT"
exit 1
fi
if [ -z "$FROM_VERSION" ] && [ -z "$TO_VERSION" ]; then
echo "Must set FROM_VERSION or TO_VERSION for correctness run"
echo "Must set both for regressions run"
@@ -39,18 +44,31 @@ short=${TO_VERSION:0:8}
lowered=$(echo "$ACTOR" | tr '[:upper:]' '[:lower:]')
actorShort="$lowered-$short"
jobname="$actorShort"
# random sleep
sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
timesuffix=`date +%s%N`
jobname="$actorShort-$timesuffix"
timeprefix=$(date +%Y/%m/%d)
actorprefix="$MODE/$ACTOR/$actorShort"
actorprefix="$MODE/$ACTOR/$jobname/$NOMS_BIN_FORMAT"
format="markdown"
if [[ "$MODE" = "release" || "$MODE" = "nightly" ]]; then
format="html"
fi
source "$TEMPLATE_SCRIPT" "$jobname" "$FROM_VERSION" "$TO_VERSION" "$timeprefix" "$actorprefix" "$format" > job.json
source \
"$TEMPLATE_SCRIPT" \
"$jobname" \
"$FROM_VERSION" \
"$TO_VERSION" \
"$timeprefix" \
"$actorprefix" \
"$format" \
"$NOMS_BIN_FORMAT" > job.json
out=$(KUBECONFIG="$KUBECONFIG" kubectl apply -f job.json || true)

View File

@@ -1,4 +1,4 @@
name: Release
name: Release Dolt
on:
workflow_dispatch:

View File

@@ -21,10 +21,10 @@ jobs:
strategy:
fail-fast: true
matrix:
os: [ ubuntu-18.04, macos-10.15 ]
os: [ ubuntu-18.04, macos-latest ]
dolt_fmt: [ "", "__DOLT_DEV__", "__DOLT_1__" ]
exclude:
- os: "macos-10.15"
- os: "macos-latest"
dolt_fmt: ["__DOLT_DEV__", "__DOLT_1__" ]
env:
use_credentials: ${{ secrets.AWS_SECRET_ACCESS_KEY != '' && secrets.AWS_ACCESS_KEY_ID != '' }}

View File

@@ -21,7 +21,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [macos-10.15, ubuntu-18.04, windows-latest]
os: [macos-latest, ubuntu-18.04, windows-latest]
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2

View File

@@ -21,7 +21,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [macos-10.15, ubuntu-18.04, windows-latest]
os: [macos-latest, ubuntu-18.04, windows-latest]
dolt_fmt: [ "" ]
include:
- os: "ubuntu-18.04"
@@ -73,7 +73,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [macos-10.15, ubuntu-18.04, windows-latest]
os: [macos-latest, ubuntu-18.04, windows-latest]
dolt_fmt: [ "" ]
include:
- os: "ubuntu-18.04"

View File

@@ -1,99 +0,0 @@
name: Run Import Benchmark on Pull Requests
on:
pull_request:
types: [ opened ]
issue_comment:
types: [ created ]
jobs:
validate-commentor:
runs-on: ubuntu-18.04
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v2
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
env:
ACTOR: ${{ github.actor }}
check-comments:
runs-on: ubuntu-18.04
needs: validate-commentor
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
outputs:
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
comment-body: ${{ steps.set_body.outputs.body }}
steps:
- name: Check for Deploy Trigger
uses: dolthub/pull-request-comment-trigger@master
id: check
with:
trigger: '#import-benchmark'
reaction: rocket
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set Benchmark
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
performance:
runs-on: ubuntu-18.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Benchmark Import Performance
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- uses: xt0rted/pull-request-comment-branch@v1
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Get pull number
uses: actions/github-script@v3
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- name: Run benchmarks
id: run-benchmarks
run: ./.github/scripts/import-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: "dolt"
FROM_VERSION: ${{ github.sha }}
TO_SERVER: "dolt"
TO_VERSION: ${{ steps.comment-branch.outputs.head_sha }}
MODE: 'pullRequest'
ISSUE_NUMBER: ${{ steps.get_pull_number.outputs.pull_number }}
ACTOR: ${{ github.actor }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
TEMPLATE_SCRIPT: "./.github/scripts/import-benchmarking/get-dolt-dolt-job-json.sh"
NOMS_BIN_FORMAT: "__LD_1__"

View File

@@ -1,100 +0,0 @@
name: Benchmark Pull Requests (New Format)
on:
pull_request:
types: [ opened ]
issue_comment:
types: [ created ]
jobs:
validate-commentor:
runs-on: ubuntu-18.04
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v2
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
env:
ACTOR: ${{ github.actor }}
check-comments:
runs-on: ubuntu-18.04
needs: validate-commentor
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
outputs:
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
comment-body: ${{ steps.set_body.outputs.body }}
steps:
- name: Check for Deploy Trigger
uses: dolthub/pull-request-comment-trigger@master
id: check
with:
trigger: '#newbenchmark'
reaction: rocket
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set Benchmark
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
performance:
runs-on: ubuntu-18.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Benchmark Performance
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- uses: xt0rted/pull-request-comment-branch@v1
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Get pull number
uses: actions/github-script@v3
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- name: Run benchmarks
id: run-benchmarks
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: 'dolt'
FROM_VERSION: ${{ github.sha }}
TO_SERVER: 'dolt'
TO_VERSION: ${{ steps.comment-branch.outputs.head_sha }}
MODE: 'pullRequest'
ISSUE_NUMBER: ${{ steps.get_pull_number.outputs.pull_number }}
ACTOR: ${{ github.actor }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
INIT_BIG_REPO: "false"
NOMS_BIN_FORMAT: "__DOLT_1__"
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-dolt-dolt-job-json.sh"

View File

@@ -1,80 +0,0 @@
name: Benchmark Dolt vs MySQL
on:
repository_dispatch:
types: [ release-dolt ]
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
default: ''
email:
description: 'Email address to receive results'
required: true
default: ''
jobs:
set-version-actor:
name: Set Version and Actor
runs-on: ubuntu-18.04
outputs:
version: ${{ steps.set-vars.outputs.version }}
actor: ${{ steps.set-vars.outputs.actor }}
actor_email: ${{ steps.set-vars.outputs.actor_email }}
steps:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
echo "::set-output name=actor_email::$ACTOR_EMAIL"
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
ACTOR_EMAIL: ${{ github.event.inputs.email }}
benchmark-release-mysql:
runs-on: ubuntu-18.04
needs: set-version-actor
name: Benchmark Dolt Release vs MySQL 8
steps:
- uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Run benchmarks
id: run-benchmarks
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: 'mysql'
FROM_VERSION: '8.0.28'
TO_SERVER: 'dolt'
TO_VERSION: ${{ needs.set-version-actor.outputs.version }}
MODE: 'release'
ACTOR: ${{ needs.set-version-actor.outputs.actor }}
ACTOR_EMAIL: ${{ needs.set-version-actor.outputs.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
NOMS_BIN_FORMAT: "__LD_1__"
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"

View File

@@ -1,82 +0,0 @@
# name: Performance Benchmark Reporter
on:
repository_dispatch:
types: [ benchmark-report ]
jobs:
report-pull-request:
name: Report Performance Benchmarks on Pull Request
runs-on: ubuntu-18.04
if: ${{ github.event.client_payload.issue_number != -1 }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Get benchmark results
id: get-results
run: aws s3api get-object --bucket=performance-benchmarking-github-actions-results --key="$KEY" results.log
env:
KEY: ${{ github.event.client_payload.key }}
- name: Post results to PR
uses: actions/github-script@v3
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { ACTOR, ISSUE_NUMBER, GITHUB_WORKSPACE } = process.env;
const issue_number = parseInt(ISSUE_NUMBER, 10);
const { owner, repo } = context.repo;
fs = require('fs');
fs.readFile(`${GITHUB_WORKSPACE}/results.log`, 'utf8', function (err,data) {
if (err) {
return console.log(err);
}
return github.issues.createComment({
issue_number,
owner,
repo,
body: `@${ACTOR}\n ${data}`
});
});
env:
ACTOR: ${{ github.event.client_payload.actor }}
ISSUE_NUMBER: ${{ github.event.client_payload.issue_number }}
report-email:
name: Report Performance Benchmarks via Email
runs-on: ubuntu-18.04
if: ${{ github.event.client_payload.issue_number == -1 }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Get benchmark results
id: get-results
run: aws s3api get-object --bucket=performance-benchmarking-github-actions-results --key="$KEY" results.log
env:
KEY: ${{ github.event.client_payload.key }}
- name: Send Email
if: ${{ github.event.client_payload.actor_email == '' }}
uses: ./.github/actions/ses-email-action
with:
region: us-west-2
version: ${{ github.event.client_payload.dolt_version }}
toAddresses: '["${{ secrets.PERF_REPORTS_EMAIL_ADDRESS }}"]'
dataFile: ${{ format('{0}/results.log', github.workspace) }}
- name: Send Email
if: ${{ github.event.client_payload.actor_email != '' }}
uses: ./.github/actions/ses-email-action
with:
region: us-west-2
version: ${{ github.event.client_payload.dolt_version }}
toAddresses: '["${{ github.event.client_payload.actor_email }}"]'
dataFile: ${{ format('{0}/results.log', github.workspace) }}

View File

@@ -1,100 +0,0 @@
name: Benchmark Pull Requests
on:
pull_request:
types: [ opened ]
issue_comment:
types: [ created ]
jobs:
validate-commentor:
runs-on: ubuntu-18.04
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v2
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
env:
ACTOR: ${{ github.actor }}
check-comments:
runs-on: ubuntu-18.04
needs: validate-commentor
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
outputs:
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
comment-body: ${{ steps.set_body.outputs.body }}
steps:
- name: Check for Deploy Trigger
uses: dolthub/pull-request-comment-trigger@master
id: check
with:
trigger: '#benchmark'
reaction: rocket
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set Benchmark
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
performance:
runs-on: ubuntu-18.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Benchmark Performance
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- uses: xt0rted/pull-request-comment-branch@v1
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Get pull number
uses: actions/github-script@v3
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- name: Run benchmarks
id: run-benchmarks
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: 'dolt'
FROM_VERSION: ${{ github.sha }}
TO_SERVER: 'dolt'
TO_VERSION: ${{ steps.comment-branch.outputs.head_sha }}
MODE: 'pullRequest'
ISSUE_NUMBER: ${{ steps.get_pull_number.outputs.pull_number }}
ACTOR: ${{ github.actor }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
INIT_BIG_REPO: "true"
NOMS_BIN_FORMAT: "__LD_1__"
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-dolt-dolt-job-json.sh"

View File

@@ -2,7 +2,7 @@ name: Email Team Members
on:
repository_dispatch:
types: [ release-email ]
types: [ email-report ]
jobs:
email-team:
@@ -39,5 +39,6 @@ jobs:
template: ${{ github.event.client_payload.template }}
region: us-west-2
version: ${{ github.event.client_payload.version }}
format: ${{ github.event.client_payload.noms_bin_format }}
toAddresses: ${{ steps.get-addresses.outputs.addresses }}
dataFile: ${{ format('{0}/results.log', github.workspace) }}

View File

@@ -0,0 +1,64 @@
name: Run Import Benchmark on Pull Requests
on:
pull_request:
types: [ opened ]
issue_comment:
types: [ created ]
jobs:
validate-commentor:
runs-on: ubuntu-18.04
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v2
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
env:
ACTOR: ${{ github.actor }}
check-comments:
runs-on: ubuntu-18.04
needs: validate-commentor
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
outputs:
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
comment-body: ${{ steps.set_body.outputs.body }}
steps:
- name: Check for Deploy Trigger
uses: dolthub/pull-request-comment-trigger@master
id: check
with:
trigger: '#import-benchmark'
reaction: rocket
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set Benchmark
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
performance:
runs-on: ubuntu-18.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Trigger Benchmark Import K8s Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Get pull number
uses: actions/github-script@v3
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
client-payload: '{"from_server": "dolt", "from_version": "${{ github.sha }}", "to_server": "dolt", "to_version": "${{ steps.comment-branch.outputs.head_sha }}", "mode": "pullRequest", "issue_number": "${{ steps.get_pull_number.outputs.pull_number }}", "actor": "${{ github.actor }}", "template_script": "./.github/scripts/import-benchmarking/get-dolt-dolt-job-json.sh"}'

View File

@@ -1,57 +1,34 @@
name: SQL Correctness
name: Benchmark Imports
on:
repository_dispatch:
types: [ release-dolt ]
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
default: ''
email:
description: 'Email address to receive results'
required: true
default: ''
types: [ benchmark-import ]
jobs:
set-version-actor:
name: Set Version and Actor
performance:
runs-on: ubuntu-18.04
outputs:
version: ${{ steps.set-vars.outputs.version }}
actor: ${{ steps.set-vars.outputs.actor }}
name: Benchmark Performance
strategy:
matrix:
dolt_fmt: [ "__LD_1__"]
steps:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
correctness-release:
runs-on: ubuntu-18.04
needs: set-version-actor
name: Dolt SQL Correctness
steps:
- uses: actions/checkout@v2
- name: Checkout
uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
@@ -60,14 +37,19 @@ jobs:
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Run correctness
id: run-correctness
run: ./.github/scripts/sql-correctness/run-correctness.sh
- name: Create Import Benchmarking K8s Job
run: ./.github/scripts/import-benchmarking/run-benchmarks.sh
env:
TO_VERSION: ${{ needs.set-version-actor.outputs.version }}
MODE: 'release'
ACTOR: ${{ needs.set-version-actor.outputs.actor }}
ACTOR_EMAIL: ${{ github.event.inputs.email }}
FROM_SERVER: ${{ github.event.client_payload.from_server }}
FROM_VERSION: ${{ github.event.client_payload.from_version }}
TO_SERVER: ${{ github.event.client_payload.to_server }}
TO_VERSION: ${{ github.event.client_payload.to_version }}
MODE: ${{ github.event.client_payload.mode }}
ISSUE_NUMBER: ${{ github.event.client_payload.issue_number }}
ACTOR: ${{ github.event.client_payload.actor }}
ACTOR_EMAIL: ${{ github.event.client_payload.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
TEMPLATE_SCRIPT: "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"
INIT_BIG_REPO: ${{ github.event.client_payload.init_big_repo }}
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}

View File

@@ -0,0 +1,72 @@
name: Benchmark Latency
on:
repository_dispatch:
types: [ benchmark-latency ]
jobs:
performance:
runs-on: ubuntu-18.04
name: Benchmark Performance
strategy:
matrix:
dolt_fmt: [ "__LD_1__", "__DOLT_1__" ]
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Create Sysbench Performance Benchmarking K8s Job
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: ${{ github.event.client_payload.from_server }}
FROM_VERSION: ${{ github.event.client_payload.from_version }}
TO_SERVER: ${{ github.event.client_payload.to_server }}
TO_VERSION: ${{ github.event.client_payload.to_version }}
MODE: ${{ github.event.client_payload.mode }}
ISSUE_NUMBER: ${{ github.event.client_payload.issue_number }}
ACTOR: ${{ github.event.client_payload.actor }}
ACTOR_EMAIL: ${{ github.event.client_payload.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
INIT_BIG_REPO: ${{ github.event.client_payload.init_big_repo }}
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}
- name: Create TPCC Performance Benchmarking K8s Job
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: ${{ github.event.client_payload.from_server }}
FROM_VERSION: ${{ github.event.client_payload.from_version }}
TO_SERVER: ${{ github.event.client_payload.to_server }}
TO_VERSION: ${{ github.event.client_payload.to_version }}
MODE: ${{ github.event.client_payload.mode }}
ISSUE_NUMBER: ${{ github.event.client_payload.issue_number }}
ACTOR: ${{ github.event.client_payload.actor }}
ACTOR_EMAIL: ${{ github.event.client_payload.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
INIT_BIG_REPO: ${{ github.event.client_payload.init_big_repo }}
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
WITH_TPCC: "true"
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}

View File

@@ -37,7 +37,7 @@ jobs:
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Fuzzer gatekeeper
- name: Create Fuzzer (GateKeeper) K8s Job
run: ./.github/scripts/fuzzer/run-fuzzer.sh
env:
VERSION: ${{ github.sha }}

View File

@@ -1,35 +1,33 @@
name: Nightly Benchmarks
name: SQL Correctness
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
repository_dispatch:
types: [ sql-correctness ]
jobs:
perf:
correctness:
runs-on: ubuntu-18.04
name: Benchmark Performance
name: Dolt SQL Correctness
strategy:
matrix:
dolt_fmt: [ "__LD_1__", "__DOLT_1__" ]
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
@@ -38,29 +36,14 @@ jobs:
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Run benchmarks
id: run-benchmarks
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: 'mysql'
FROM_VERSION: '8.0.28'
TO_SERVER: 'dolt'
TO_VERSION: ${{ github.sha }}
MODE: 'nightly'
ACTOR: ${{ github.actor }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"
- name: Run correctness
id: run-correctness
- name: Create SQL Correctness K8s Job
run: ./.github/scripts/sql-correctness/run-correctness.sh
env:
TO_VERSION: ${{ github.sha }}
MODE: 'nightly'
ACTOR: ${{ github.actor }}
ACTOR_EMAIL: 'max@dolthub.com'
TO_VERSION: ${{ github.event.client_payload.to_version }}
MODE: ${{ github.event.client_payload.mode }}
ACTOR: ${{ github.event.client_payload.actor }}
ACTOR_EMAIL: ${{ github.event.client_payload.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}

View File

@@ -0,0 +1,27 @@
name: Nightly Benchmarks
on:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
jobs:
perf:
runs-on: ubuntu-18.04
name: Trigger Benchmark Latency, Benchmark Import, and SQL Correctness K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
client-payload: '{"from_server": "mysql", "from_version": "8.0.28", "to_server": "dolt", "to_version": "${{ github.sha }}", "mode": "nightly", "actor": "${{ github.actor }}", "template_script": "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
client-payload: '{"to_version": "${{ github.sha }}", "mode": "nightly", "actor": "${{ github.actor }}", "actor_email": "max@dolthub.com", "template_script": "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
client-payload: '{"from_server": "dolt", "from_version": "${{ github.sha }}", "to_server": "mysql", "to_version": "8.0.28", "mode": "nightly", "actor": "${{ github.actor }}", "actor_email": "vinai@dolthub.com", "template_script": "./.github/scripts/import-benchmarking/get-mysql-dolt-job-json.sh"}'

View File

@@ -0,0 +1,51 @@
name: Benchmark Dolt vs MySQL
on:
repository_dispatch:
types: [ release-dolt ]
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
default: ''
email:
description: 'Email address to receive results'
required: true
default: ''
jobs:
set-version-actor:
name: Set Version and Actor
runs-on: ubuntu-18.04
outputs:
version: ${{ steps.set-vars.outputs.version }}
actor: ${{ steps.set-vars.outputs.actor }}
actor_email: ${{ steps.set-vars.outputs.actor_email }}
steps:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
echo "::set-output name=actor_email::$ACTOR_EMAIL"
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
ACTOR_EMAIL: ${{ github.event.inputs.email }}
benchmark-dolt-mysql:
runs-on: ubuntu-18.04
needs: set-version-actor
name: Trigger Benchmark Latency and Benchmark Import K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
client-payload: '{"from_server": "mysql", "from_version": "8.0.28", "to_server": "dolt", "to_version": "${{ needs.set-version-actor.outputs.version }}", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
client-payload: '{"from_server": "dolt", "from_version": "${{ needs.set-version-actor.outputs.version }}", "to_server": "mysql", "to_version": "8.0.28", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/import-benchmarking/get-mysql-dolt-job-json.sh"}'

View File

@@ -0,0 +1,64 @@
name: Benchmark Pull Requests
on:
pull_request:
types: [ opened ]
issue_comment:
types: [ created ]
jobs:
validate-commentor:
runs-on: ubuntu-18.04
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v2
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
env:
ACTOR: ${{ github.actor }}
check-comments:
runs-on: ubuntu-18.04
needs: validate-commentor
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
outputs:
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
comment-body: ${{ steps.set_body.outputs.body }}
steps:
- name: Check for Deploy Trigger
uses: dolthub/pull-request-comment-trigger@master
id: check
with:
trigger: '#benchmark'
reaction: rocket
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set Benchmark
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
performance:
runs-on: ubuntu-18.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Trigger Benchmark Latency K8s Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Get pull number
uses: actions/github-script@v3
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
client-payload: '{"from_server": "dolt", "from_version": "${{ github.sha }}", "to_server": "dolt", "to_version": "${{ steps.comment-branch.outputs.head_sha }}", "mode": "pullRequest", "issue_number": "${{ steps.get_pull_number.outputs.pull_number }}", "init_big_repo": "true", "actor": "${{ github.actor }}", "template_script": "./.github/scripts/performance-benchmarking/get-dolt-dolt-job-json.sh"}'

50
.github/workflows/pull-report.yaml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: Post to Pull Request
on:
repository_dispatch:
types: [ pull-report ]
jobs:
report-pull-request:
name: Report Performance Benchmarks on Pull Request
runs-on: ubuntu-18.04
if: ${{ github.event.client_payload.issue_number != -1 }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Get benchmark results
id: get-results
run: aws s3api get-object --bucket="$BUCKET" --key="$KEY" results.log
env:
KEY: ${{ github.event.client_payload.key }}
BUCKET: ${{ github.event.client_payload.bucket }}
- name: Post results to PR
uses: actions/github-script@v3
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { ACTOR, FORMAT, ISSUE_NUMBER, GITHUB_WORKSPACE } = process.env;
const issue_number = parseInt(ISSUE_NUMBER, 10);
const { owner, repo } = context.repo;
fs = require('fs');
fs.readFile(`${GITHUB_WORKSPACE}/results.log`, 'utf8', function (err,data) {
if (err) {
return console.log(err);
}
return github.issues.createComment({
issue_number,
owner,
repo,
body: `@${ACTOR} ${FORMAT}\n ${data}`
});
});
env:
ACTOR: ${{ github.event.client_payload.actor }}
ISSUE_NUMBER: ${{ github.event.client_payload.issue_number }}
FORMAT: ${{ github.event.client_payload.noms_bin_format }}

43
.github/workflows/sql-correctness.yaml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: Benchmark SQL Correctness
on:
repository_dispatch:
types: [ release-dolt ]
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
default: ''
email:
description: 'Email address to receive results'
required: true
default: ''
jobs:
set-version-actor:
name: Set Version and Actor
runs-on: ubuntu-18.04
outputs:
version: ${{ steps.set-vars.outputs.version }}
actor: ${{ steps.set-vars.outputs.actor }}
steps:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
correctness:
runs-on: ubuntu-18.04
needs: set-version-actor
name: Trigger SQL Correctness K8s Workflow
steps:
- uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
client-payload: '{"to_version": "${{ needs.set-version-actor.outputs.version }}", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"}'

209
go/Godeps/LICENSES generated
View File

@@ -5455,6 +5455,215 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 6681c42f6974591d2056518a26201323fa7d42bdc4d64bfc12c332b3 =
================================================================================
================================================================================
= github.com/pquerna/cachecontrol licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= github.com/prometheus/client_golang licensed under: =

View File

@@ -95,6 +95,8 @@ const (
DeleteFlag = "delete"
DeleteForceFlag = "D"
OutputOnlyFlag = "output-only"
RemoteParam = "remote"
BranchParam = "branch"
TrackFlag = "track"
)
@@ -147,6 +149,17 @@ func CreateAddArgParser() *argparser.ArgParser {
return ap
}
func CreateCloneArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsString(RemoteParam, "", "name", "Name of the remote to be added to the cloned database. The default is 'origin'.")
ap.SupportsString(BranchParam, "b", "branch", "The branch to be cloned. If not specified all branches will be cloned.")
ap.SupportsString(dbfactory.AWSRegionParam, "", "region", "")
ap.SupportsValidatedString(dbfactory.AWSCredsTypeParam, "", "creds-type", "", argparser.ValidatorFromStrList(dbfactory.AWSCredsTypeParam, dbfactory.AWSCredTypes))
ap.SupportsString(dbfactory.AWSCredsFileParam, "", "file", "AWS credentials file.")
ap.SupportsString(dbfactory.AWSCredsProfile, "", "profile", "AWS profile to use.")
return ap
}
func CreateResetArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(HardResetParam, "", "Resets the working tables and staged tables. Any changes to tracked tables in the working tree since {{.LessThan}}commit{{.GreaterThan}} are discarded.")
@@ -154,6 +167,15 @@ func CreateResetArgParser() *argparser.ArgParser {
return ap
}
func CreateRemoteArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsString(dbfactory.AWSRegionParam, "", "region", "")
ap.SupportsValidatedString(dbfactory.AWSCredsTypeParam, "", "creds-type", "", argparser.ValidatorFromStrList(dbfactory.AWSCredsTypeParam, dbfactory.AWSCredTypes))
ap.SupportsString(dbfactory.AWSCredsFileParam, "", "file", "AWS credentials file")
ap.SupportsString(dbfactory.AWSCredsProfile, "", "profile", "AWS profile to use")
return ap
}
func CreateCleanArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(DryRunFlag, "", "Tests removing untracked tables without modifying the working set.")

View File

@@ -17,7 +17,6 @@ package commands
import (
"context"
"encoding/json"
"os"
"strings"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
@@ -177,8 +176,8 @@ func addBackup(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) errhand.Verbos
return errhand.VerboseErrorFromError(err)
}
r := env.NewRemote(backupName, backupUrl, params, dEnv)
err = dEnv.AddBackup(r.Name, r.Url, r.FetchSpecs, r.Params)
r := env.NewRemote(backupName, backupUrl, params)
err = dEnv.AddBackup(r)
switch err {
case nil:
@@ -234,7 +233,7 @@ func syncBackupUrl(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
return errhand.VerboseErrorFromError(err)
}
b := env.NewRemote("__temp__", backupUrl, params, dEnv)
b := env.NewRemote("__temp__", backupUrl, params)
return backup(ctx, dEnv, b)
}
@@ -259,7 +258,7 @@ func syncBackup(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseR
}
func backup(ctx context.Context, dEnv *env.DoltEnv, b env.Remote) errhand.VerboseError {
destDb, err := b.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format())
destDb, err := b.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), dEnv)
if err != nil {
return errhand.BuildDError("error: unable to open destination.").AddCause(err).Build()
}
@@ -308,35 +307,35 @@ func restoreBackup(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
return verr
}
r := env.NewRemote("", remoteUrl, params, dEnv)
srcDb, err := r.GetRemoteDB(ctx, types.Format_Default)
r := env.NewRemote("", remoteUrl, params)
srcDb, err := r.GetRemoteDB(ctx, types.Format_Default, dEnv)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
// make .dolt dir whith env.NoRemote to avoid origin upstream
dEnv, err = actions.EnvForClone(ctx, srcDb.ValueReadWriter().Format(), env.NoRemote, dir, dEnv.FS, dEnv.Version, env.GetCurrentUserHomeDir)
// Create a new Dolt env for the clone; use env.NoRemote to avoid origin upstream
clonedEnv, err := actions.EnvForClone(ctx, srcDb.ValueReadWriter().Format(), env.NoRemote, dir, dEnv.FS, dEnv.Version, env.GetCurrentUserHomeDir)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
// Nil out the old Dolt env so we don't accidentally use the wrong database
dEnv = nil
// still make empty repo state
_, err = env.CreateRepoState(dEnv.FS, env.DefaultInitBranch)
_, err = env.CreateRepoState(clonedEnv.FS, env.DefaultInitBranch)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = actions.SyncRoots(ctx, srcDb, dEnv.DoltDB, dEnv.TempTableFilesDir(), buildProgStarter(downloadLanguage), stopProgFuncs)
err = actions.SyncRoots(ctx, srcDb, clonedEnv.DoltDB, clonedEnv.TempTableFilesDir(), buildProgStarter(downloadLanguage), stopProgFuncs)
if err != nil {
// If we're cloning into a directory that already exists do not erase it. Otherwise
// make best effort to delete the directory we created.
if userDirExists {
// Set the working dir to the parent of the .dolt folder so we can delete .dolt
_ = os.Chdir(dir)
_ = dEnv.FS.Delete(dbfactory.DoltDir, true)
_ = clonedEnv.FS.Delete(dbfactory.DoltDir, true)
} else {
_ = os.Chdir("../")
_ = dEnv.FS.Delete(dir, true)
_ = clonedEnv.FS.Delete(".", true)
}
return errhand.VerboseErrorFromError(err)
}

View File

@@ -139,7 +139,7 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
branchSet := set.NewStrSet(apr.Args)
verbose := apr.Contains(verboseFlag)
printRemote := apr.Contains(remoteParam)
printRemote := apr.Contains(cli.RemoteParam)
printAll := apr.Contains(allFlag)
branches, err := dEnv.DoltDB.GetHeadRefs(ctx)

View File

@@ -16,7 +16,6 @@ package commands
import (
"context"
"os"
"path"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
@@ -33,11 +32,6 @@ import (
"github.com/dolthub/dolt/go/store/types"
)
const (
remoteParam = "remote"
branchParam = "branch"
)
var cloneDocs = cli.CommandDocumentationContent{
ShortDesc: "Clone a data repository into a new directory",
LongDesc: `Clones a repository into a newly created directory, creates remote-tracking branches for each branch in the cloned repository (visible using {{.LessThan}}dolt branch -a{{.GreaterThan}}), and creates and checks out an initial branch that is forked from the cloned repository's currently active branch.
@@ -75,14 +69,7 @@ func (cmd CloneCmd) Docs() *cli.CommandDocumentation {
}
func (cmd CloneCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsString(remoteParam, "", "name", "Name of the remote to be added. Default will be 'origin'.")
ap.SupportsString(branchParam, "b", "branch", "The branch to be cloned. If not specified all branches will be cloned.")
ap.SupportsString(dbfactory.AWSRegionParam, "", "region", "")
ap.SupportsValidatedString(dbfactory.AWSCredsTypeParam, "", "creds-type", "", argparser.ValidatorFromStrList(dbfactory.AWSCredsTypeParam, credTypes))
ap.SupportsString(dbfactory.AWSCredsFileParam, "", "file", "AWS credentials file.")
ap.SupportsString(dbfactory.AWSCredsProfile, "", "profile", "AWS profile to use.")
return ap
return cli.CreateCloneArgParser()
}
// EventType returns the type of the event to log
@@ -105,8 +92,8 @@ func (cmd CloneCmd) Exec(ctx context.Context, commandStr string, args []string,
}
func clone(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv) errhand.VerboseError {
remoteName := apr.GetValueOrDefault(remoteParam, "origin")
branch := apr.GetValueOrDefault(branchParam, "")
remoteName := apr.GetValueOrDefault(cli.RemoteParam, "origin")
branch := apr.GetValueOrDefault(cli.BranchParam, "")
dir, urlStr, verr := parseArgs(apr)
if verr != nil {
return verr
@@ -132,22 +119,23 @@ func clone(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEn
return verr
}
dEnv, err = actions.EnvForClone(ctx, srcDB.ValueReadWriter().Format(), r, dir, dEnv.FS, dEnv.Version, env.GetCurrentUserHomeDir)
// Create a new Dolt env for the clone
clonedEnv, err := actions.EnvForClone(ctx, srcDB.ValueReadWriter().Format(), r, dir, dEnv.FS, dEnv.Version, env.GetCurrentUserHomeDir)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = actions.CloneRemote(ctx, srcDB, remoteName, branch, dEnv)
// Nil out the old Dolt env so we don't accidentally operate on the wrong database
dEnv = nil
err = actions.CloneRemote(ctx, srcDB, remoteName, branch, clonedEnv)
if err != nil {
// If we're cloning into a directory that already exists do not erase it. Otherwise
// make best effort to delete the directory we created.
if userDirExists {
// Set the working dir to the parent of the .dolt folder so we can delete .dolt
_ = os.Chdir(dir)
_ = dEnv.FS.Delete(dbfactory.DoltDir, true)
clonedEnv.FS.Delete(dbfactory.DoltDir, true)
} else {
_ = os.Chdir("../")
_ = dEnv.FS.Delete(dir, true)
clonedEnv.FS.Delete(".", true)
}
return errhand.VerboseErrorFromError(err)
}
@@ -160,15 +148,15 @@ func clone(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEn
}
}
err = dEnv.RepoStateWriter().UpdateBranch(dEnv.RepoState.CWBHeadRef().GetPath(), env.BranchConfig{
Merge: dEnv.RepoState.Head,
err = clonedEnv.RepoStateWriter().UpdateBranch(clonedEnv.RepoState.CWBHeadRef().GetPath(), env.BranchConfig{
Merge: clonedEnv.RepoState.Head,
Remote: remoteName,
})
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = dEnv.RepoState.Save(dEnv.FS)
err = clonedEnv.RepoState.Save(clonedEnv.FS)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
@@ -206,9 +194,8 @@ func parseArgs(apr *argparser.ArgParseResults) (string, string, errhand.VerboseE
func createRemote(ctx context.Context, remoteName, remoteUrl string, params map[string]string, dEnv *env.DoltEnv) (env.Remote, *doltdb.DoltDB, errhand.VerboseError) {
cli.Printf("cloning %s\n", remoteUrl)
r := env.NewRemote(remoteName, remoteUrl, params, dEnv)
ddb, err := r.GetRemoteDB(ctx, types.Format_Default)
r := env.NewRemote(remoteName, remoteUrl, params)
ddb, err := r.GetRemoteDB(ctx, types.Format_Default, dEnv)
if err != nil {
bdr := errhand.BuildDError("error: failed to get remote db").AddCause(err)

View File

@@ -385,7 +385,7 @@ func diffUserTables(ctx context.Context, dEnv *env.DoltEnv, dArgs *diffArgs) (ve
fromSch = toSch
}
if !schema.ArePrimaryKeySetsDiffable(fromSch, toSch) {
if !schema.ArePrimaryKeySetsDiffable(td.Format(), fromSch, toSch) {
cli.PrintErrf("Primary key sets differ between revisions for table %s, skipping data diff\n", tblName)
continue
}

View File

@@ -0,0 +1,132 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package engine
import (
"fmt"
"strings"
"time"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
"github.com/sirupsen/logrus"
"github.com/dolthub/dolt/go/libraries/utils/jwtauth"
)
type JwksConfig struct {
Name string `yaml:"name"`
LocationUrl string `yaml:"location_url"`
Claims map[string]string `yaml:"claims"`
FieldsToLog []string `yaml:"fields_to_log"`
}
// authenticateDoltJWTPlugin is used to authenticate plaintext user plugins
type authenticateDoltJWTPlugin struct {
jwksConfig []JwksConfig
}
func NewAuthenticateDoltJWTPlugin(jwksConfig []JwksConfig) mysql_db.PlaintextAuthPlugin {
return &authenticateDoltJWTPlugin{jwksConfig: jwksConfig}
}
func (p *authenticateDoltJWTPlugin) Authenticate(db *mysql_db.MySQLDb, user string, userEntry *mysql_db.User, pass string) (bool, error) {
return validateJWT(p.jwksConfig, user, userEntry.Identity, pass, time.Now())
}
func validateJWT(config []JwksConfig, username, identity, token string, reqTime time.Time) (bool, error) {
if len(config) == 0 {
return false, fmt.Errorf("ValidateJWT: JWKS server config not found")
}
expectedClaimsMap := parseUserIdentity(identity)
sub, ok := expectedClaimsMap["sub"]
if ok && sub != username {
return false, fmt.Errorf("ValidateJWT: Subjects do not match")
}
jwksConfig, err := getMatchingJwksConfig(config, expectedClaimsMap["jwks"])
if err != nil {
return false, err
}
pr, err := getJWTProvider(expectedClaimsMap, jwksConfig.LocationUrl)
if err != nil {
return false, err
}
vd := jwtauth.NewJWTValidator(pr)
claims, err := vd.ValidateJWT(token, reqTime)
if err != nil {
return false, err
}
logString := "Authenticating with JWT: "
for _, field := range jwksConfig.FieldsToLog {
logString += fmt.Sprintf("%s: %s,", field, getClaimFromKey(claims, field))
}
logrus.Info(logString)
return true, nil
}
func getJWTProvider(expectedClaimsMap map[string]string, url string) (jwtauth.JWTProvider, error) {
pr := jwtauth.JWTProvider{URL: url}
for name, claim := range expectedClaimsMap {
switch name {
case "iss":
pr.Issuer = claim
case "aud":
pr.Audience = claim
case "sub":
pr.Subject = claim
case "jwks":
continue
default:
return pr, fmt.Errorf("ValidateJWT: Unexpected expected claim found in user identity")
}
}
return pr, nil
}
func getClaimFromKey(claims *jwtauth.Claims, field string) string {
switch field {
case "id":
return claims.ID
case "iss":
return claims.Issuer
case "sub":
return claims.Subject
case "on_behalf_of":
return claims.OnBehalfOf
}
return ""
}
func getMatchingJwksConfig(config []JwksConfig, name string) (*JwksConfig, error) {
for _, item := range config {
if item.Name == name {
return &item, nil
}
}
return nil, fmt.Errorf("ValidateJWT: Matching JWKS config not found")
}
func parseUserIdentity(identity string) map[string]string {
idMap := make(map[string]string)
items := strings.Split(identity, ",")
for _, item := range items {
tup := strings.Split(item, "=")
idMap[tup[0]] = tup[1]
}
return idMap
}

View File

@@ -0,0 +1,78 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package engine
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
)
var jwksName = "jwksname"
var sub = "test_user"
var iss = "dolthub.com"
var aud = "my_resource"
var onBehalfOf = "my_user"
var jwt = "eyJhbGciOiJSUzI1NiIsImtpZCI6ImUwNjA2Y2QwLTkwNWQtNGFiYS05MjBjLTZlNTE0YTFjYmIyNiIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsibXlfcmVzb3VyY2UiXSwiZXhwIjoxNjU4Mjc1OTAzLCJpYXQiOjE2NTgyNzU4NzMsImlzcyI6ImRvbHRodWIuY29tIiwianRpIjoiN2ViZTg3YmMtOTkzMi00ZTljLTk5N2EtNjQzMDk0NTBkMWVjIiwib25fYmVoYWxmX29mIjoibXlfdXNlciIsInN1YiI6InRlc3RfdXNlciJ9.u2cUGUkQ2hk4AaxtNQB-6Jcdf5LtehFA7XX2FG8LGgTf6KfwE3cuuGaBIU8Jz9ktD9g8TjAbfAfbrNaFNYnKG6SnDUHp0t7VbfLdgfNDQqSyH0nOK2UF8ffxqa46PRxeMwTSJv8prE07rcmiZNL9Ie4vSGYLncJfMzo_RdE-A-PH7z-ZyZ_TxOMhkgMFq2Af5Px3zFuAKq-Y-PrQNopSuzjPJc0DQ93Q7EcIHfU6Fx6gOVTkzHxnOFcg3Nj-4HhqBSvBa_BdMYEzHJKx3F_9rrCCPqEGUFnxXAqFFmnZUQuQKpN2yW_zhviCVqrvbP7vOCIXmxi8YXLiGiV-4KlxHA"
func TestJWTAuth(t *testing.T) {
jwksConfig := []JwksConfig{
{
Name: jwksName,
LocationUrl: fmt.Sprintf("file:///testdata/test_jwks.json"),
Claims: map[string]string{
"alg": "RS256",
"aud": aud,
"iss": iss,
"sub": sub,
},
FieldsToLog: []string{"id", "on_behalf_of"},
},
}
// Success
tokenCreated := time.Date(2022, 07, 20, 0, 12, 0, 0, time.UTC) // Update time if creating new token
authed, err := validateJWT(jwksConfig, sub, fmt.Sprintf("jwks=%s,sub=%s,iss=%s,aud=%s", jwksName, sub, iss, aud), jwt, tokenCreated)
require.NoError(t, err)
require.True(t, authed)
// Token expired
now := time.Now()
authed, err = validateJWT(jwksConfig, sub, fmt.Sprintf("jwks=%s,sub=%s,iss=%s,aud=%s", jwksName, sub, iss, aud), jwt, now)
require.Error(t, err)
require.False(t, authed)
// Expected sub does not match
authed, err = validateJWT(jwksConfig, sub, fmt.Sprintf("jwks=%s,sub=%s,iss=%s,aud=%s", jwksName, "wrong-sub", iss, aud), jwt, tokenCreated)
require.Error(t, err)
require.False(t, authed)
// Jwks config doesn't exist
authed, err = validateJWT([]JwksConfig{}, sub, fmt.Sprintf("jwks=%s,sub=%s,iss=%s,aud=%s", jwksName, sub, iss, aud), jwt, tokenCreated)
require.Error(t, err)
require.False(t, authed)
// No token
authed, err = validateJWT(jwksConfig, sub, fmt.Sprintf("jwks=%s,sub=%s,iss=%s,aud=%s", jwksName, sub, iss, aud), "", tokenCreated)
require.Error(t, err)
require.False(t, authed)
// Unknown claim in identity string
authed, err = validateJWT(jwksConfig, sub, fmt.Sprintf("jwks=%s,sub=%s,iss=%s,unknown=%s", jwksName, sub, iss, aud), "", tokenCreated)
require.Error(t, err)
require.False(t, authed)
}

View File

@@ -25,6 +25,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/analyzer"
"github.com/dolthub/go-mysql-server/sql/information_schema"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
"github.com/dolthub/vitess/go/vt/sqlparser"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
@@ -47,13 +48,15 @@ type SqlEngine struct {
}
type SqlEngineConfig struct {
InitialDb string
IsReadOnly bool
PrivFilePath string
ServerUser string
ServerPass string
Autocommit bool
Bulk bool
InitialDb string
IsReadOnly bool
IsServerLocked bool
PrivFilePath string
ServerUser string
ServerPass string
Autocommit bool
Bulk bool
JwksConfig []JwksConfig
}
// NewSqlEngine returns a SqlEngine
@@ -65,7 +68,7 @@ func NewSqlEngine(
) (*SqlEngine, error) {
if ok, _ := mrEnv.IsLocked(); ok {
config.IsReadOnly = true
config.IsServerLocked = true
}
parallelism := runtime.GOMAXPROCS(0)
@@ -85,7 +88,7 @@ func NewSqlEngine(
all := append(dsqleDBsAsSqlDBs(dbs), infoDB)
b := env.GetDefaultInitBranch(mrEnv.Config())
pro := dsqle.NewDoltDatabaseProvider(b, mrEnv.FileSystem(), all...)
pro := dsqle.NewDoltDatabaseProvider(b, mrEnv.FileSystem(), all...).WithRemoteDialer(mrEnv.RemoteDialProvider())
// Load in privileges from file, if it exists
persister := mysql_file_handler.NewPersister(config.PrivFilePath)
@@ -104,8 +107,12 @@ func NewSqlEngine(
}
// Set up engine
engine := gms.New(analyzer.NewBuilder(pro).WithParallelism(parallelism).Build(), &gms.Config{IsReadOnly: config.IsReadOnly, TemporaryUsers: tempUsers}).WithBackgroundThreads(bThreads)
engine := gms.New(analyzer.NewBuilder(pro).WithParallelism(parallelism).Build(), &gms.Config{IsReadOnly: config.IsReadOnly, TemporaryUsers: tempUsers, IsServerLocked: config.IsServerLocked}).WithBackgroundThreads(bThreads)
engine.Analyzer.Catalog.MySQLDb.SetPersister(persister)
engine.Analyzer.Catalog.MySQLDb.SetPlugins(map[string]mysql_db.PlaintextAuthPlugin{
"authentication_dolt_jwt": NewAuthenticateDoltJWTPlugin(config.JwksConfig),
})
// Load MySQL Db information
if err = engine.Analyzer.Catalog.MySQLDb.LoadData(sql.NewEmptyContext(), data); err != nil {
return nil, err

View File

@@ -0,0 +1 @@
{"keys":[{"use":"sig","kty":"RSA","kid":"e0606cd0-905d-4aba-920c-6e514a1cbb26","alg":"RS256","n":"3uhDGQLrA8ZJIPAixKoxzCMzWRHb5_UWNihyccCWUicUnCOuTr5caoP4pTVdvK8SudQDQJFXBcO8i3Y5zIHTafTjYZ_ofe07_E9Pz9hFI8pvfXLfL0PWaSO5Y2XGyCgC2ibLqAEc71VYIqruyVuaYqv4gqG__gmBcR3C4gULOjYkBTNs7jMT2XaqlPE0dWDWSfLC-Zz-Czxkp52fOxUVAYb1tZcuActo6DLwnYGf5cpJMU76BhEegEkJzvJqDr9DdS996cjl2vxhPtCtS2rx7z4fCk5gCnCefztu_JjageY5Lk9IG03xzW3A2TUAXm-t_KJbRKa4-DghWEfBr8zv1w","e":"AQAB"}]}

View File

@@ -143,7 +143,7 @@ func getPushOnWriteHook(ctx context.Context, dEnv *env.DoltEnv) (*doltdb.PushOnW
return nil, fmt.Errorf("%w: '%s'", env.ErrRemoteNotFound, remoteName)
}
ddb, err := rem.GetRemoteDB(ctx, types.Format_Default)
ddb, err := rem.GetRemoteDB(ctx, types.Format_Default, dEnv)
if err != nil {
return nil, err
}

View File

@@ -79,7 +79,12 @@ func (cmd FetchCmd) Exec(ctx context.Context, commandStr string, args []string,
}
updateMode := ref.UpdateMode{Force: apr.Contains(cli.ForceFlag)}
err = actions.FetchRefSpecs(ctx, dEnv.DbData(), refSpecs, r, updateMode, buildProgStarter(downloadLanguage), stopProgFuncs)
srcDB, err := r.GetRemoteDBWithoutCaching(ctx, dEnv.DbData().Ddb.ValueReadWriter().Format(), dEnv)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
err = actions.FetchRefSpecs(ctx, dEnv.DbData(), srcDB, refSpecs, r, updateMode, buildProgStarter(downloadLanguage), stopProgFuncs)
switch err {
case doltdb.ErrUpToDate:
return HandleVErrAndExitCode(nil, usage)

View File

@@ -104,7 +104,7 @@ func (cmd PullCmd) Exec(ctx context.Context, commandStr string, args []string, d
// pullHelper splits pull into fetch, prepare merge, and merge to interleave printing
func pullHelper(ctx context.Context, dEnv *env.DoltEnv, pullSpec *env.PullSpec) error {
srcDB, err := pullSpec.Remote.GetRemoteDBWithoutCaching(ctx, dEnv.DoltDB.ValueReadWriter().Format())
srcDB, err := pullSpec.Remote.GetRemoteDBWithoutCaching(ctx, dEnv.DoltDB.ValueReadWriter().Format(), dEnv)
if err != nil {
return fmt.Errorf("failed to get remote db; %w", err)
}

View File

@@ -109,8 +109,16 @@ func (cmd PushCmd) Exec(ctx context.Context, commandStr string, args []string, d
return HandleVErrAndExitCode(verr, usage)
}
remoteDB, err := opts.Remote.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), dEnv)
if err != nil {
if err == remotestorage.ErrInvalidDoltSpecPath {
err = actions.HandleInvalidDoltSpecPathErr(opts.Remote.Name, opts.Remote.Url, err)
}
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
var verr errhand.VerboseError
err = actions.DoPush(ctx, dEnv.RepoStateReader(), dEnv.RepoStateWriter(), dEnv.DoltDB, dEnv.TempTableFilesDir(), opts, buildProgStarter(defaultLanguage), stopProgFuncs)
err = actions.DoPush(ctx, dEnv.RepoStateReader(), dEnv.RepoStateWriter(), dEnv.DoltDB, remoteDB, dEnv.TempTableFilesDir(), opts, buildProgStarter(defaultLanguage), stopProgFuncs)
if err != nil {
verr = printInfoForPushError(err, opts.Remote, opts.DestRef, opts.RemoteRef)
}

View File

@@ -67,8 +67,6 @@ const (
removeRemoteShortId = "rm"
)
var credTypes = dbfactory.AWSCredTypes
type RemoteCmd struct{}
// Name is returns the name of the Dolt cli command. This is what is used on the command line to invoke the command
@@ -93,7 +91,7 @@ func (cmd RemoteCmd) ArgParser() *argparser.ArgParser {
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"profile", "AWS profile to use."})
ap.SupportsFlag(verboseFlag, "v", "When printing the list of remotes adds additional details.")
ap.SupportsString(dbfactory.AWSRegionParam, "", "region", "")
ap.SupportsValidatedString(dbfactory.AWSCredsTypeParam, "", "creds-type", "", argparser.ValidatorFromStrList(dbfactory.AWSCredsTypeParam, credTypes))
ap.SupportsValidatedString(dbfactory.AWSCredsTypeParam, "", "creds-type", "", argparser.ValidatorFromStrList(dbfactory.AWSCredsTypeParam, dbfactory.AWSCredTypes))
ap.SupportsString(dbfactory.AWSCredsFileParam, "", "file", "AWS credentials file")
ap.SupportsString(dbfactory.AWSCredsProfile, "", "profile", "AWS profile to use")
return ap
@@ -170,8 +168,8 @@ func addRemote(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) errhand.Verbos
return verr
}
r := env.NewRemote(remoteName, remoteUrl, params, dEnv)
err = dEnv.AddRemote(r.Name, r.Url, r.FetchSpecs, r.Params)
r := env.NewRemote(remoteName, remoteUrl, params)
err = dEnv.AddRemote(r)
switch err {
case nil:

View File

@@ -151,8 +151,8 @@ func (cmd RootsCmd) processTableFile(ctx context.Context, path string, modified
cli.Println()
}
} else if sm, ok := value.(types.SerialMessage); ok {
if serial.GetFileID([]byte(sm)) == serial.StoreRootFileID {
msg := serial.GetRootAsStoreRoot([]byte(sm), 0)
if serial.GetFileID(sm) == serial.StoreRootFileID {
msg := serial.GetRootAsStoreRoot([]byte(sm), serial.MessagePrefixSz)
ambytes := msg.AddressMapBytes()
node := tree.NodeFromBytes(ambytes)
err := tree.OutputAddressMapNode(cli.OutStream, node)

View File

@@ -19,6 +19,7 @@ import (
"fmt"
"net"
"net/http"
"runtime"
"strconv"
"time"
@@ -79,11 +80,6 @@ func Serve(
}
logrus.SetFormatter(LogFormat{})
isReadOnly := false
if serverConfig.ReadOnly() {
isReadOnly = true
}
var mrEnv *env.MultiRepoEnv
var err error
fs := dEnv.FS
@@ -134,11 +130,12 @@ func Serve(
// Create SQL Engine with users
config := &engine.SqlEngineConfig{
InitialDb: "",
IsReadOnly: isReadOnly,
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
ServerUser: serverConfig.User(),
ServerPass: serverConfig.Password(),
Autocommit: serverConfig.AutoCommit(),
JwksConfig: serverConfig.JwksConfig(),
}
sqlEngine, err := engine.NewSqlEngine(
ctx,
@@ -229,8 +226,8 @@ func newSessionBuilder(se *engine.SqlEngine, config ServerConfig) server.Session
userToSessionVars[curr.Name] = curr.Vars
}
return func(ctx context.Context, conn *mysql.Conn, host string) (sql.Session, error) {
mysqlSess, err := server.DefaultSessionBuilder(ctx, conn, host)
return func(ctx context.Context, conn *mysql.Conn, addr string) (sql.Session, error) {
mysqlSess, err := server.DefaultSessionBuilder(ctx, conn, addr)
if err != nil {
return nil, err
}
@@ -265,7 +262,11 @@ func newSessionBuilder(se *engine.SqlEngine, config ServerConfig) server.Session
// getConfigFromServerConfig processes ServerConfig and returns server.Config for sql-server.
func getConfigFromServerConfig(serverConfig ServerConfig) (server.Config, error, error) {
serverConf := server.Config{Protocol: "tcp"}
serverConf, err := handleProtocolAndAddress(serverConfig)
if err != nil {
return server.Config{}, err, nil
}
serverConf.DisableClientMultiStatements = serverConfig.DisableClientMultiStatements()
readTimeout := time.Duration(serverConfig.ReadTimeout()) * time.Millisecond
@@ -276,14 +277,6 @@ func getConfigFromServerConfig(serverConfig ServerConfig) (server.Config, error,
return server.Config{}, nil, err
}
portAsString := strconv.Itoa(serverConfig.Port())
hostPort := net.JoinHostPort(serverConfig.Host(), portAsString)
if portInUse(hostPort) {
portInUseError := fmt.Errorf("Port %s already in use.", portAsString)
return server.Config{}, portInUseError, nil
}
// if persist is 'load' we use currently set persisted global variable,
// else if 'ignore' we set persisted global variable to current value from serverConfig
if serverConfig.PersistenceBehavior() == loadPerisistentGlobals {
@@ -300,7 +293,6 @@ func getConfigFromServerConfig(serverConfig ServerConfig) (server.Config, error,
// Do not set the value of Version. Let it default to what go-mysql-server uses. This should be equivalent
// to the value of mysql that we support.
serverConf.Address = hostPort
serverConf.ConnReadTimeout = readTimeout
serverConf.ConnWriteTimeout = writeTimeout
serverConf.MaxConnections = serverConfig.MaxConnections()
@@ -309,3 +301,34 @@ func getConfigFromServerConfig(serverConfig ServerConfig) (server.Config, error,
return serverConf, nil, nil
}
// handleProtocolAndAddress returns new server.Config object with only Protocol and Address defined.
func handleProtocolAndAddress(serverConfig ServerConfig) (server.Config, error) {
serverConf := server.Config{Protocol: "tcp"}
portAsString := strconv.Itoa(serverConfig.Port())
hostPort := net.JoinHostPort(serverConfig.Host(), portAsString)
if portInUse(hostPort) {
portInUseError := fmt.Errorf("Port %s already in use.", portAsString)
return server.Config{}, portInUseError
}
serverConf.Address = hostPort
// if socket is defined with or without value -> unix
if serverConfig.Socket() != "" {
if runtime.GOOS == "windows" {
return server.Config{}, fmt.Errorf("cannot define unix socket file on Windows")
}
serverConf.Socket = serverConfig.Socket()
}
// TODO : making it an "opt in" feature (just to start) and requiring users to pass in the `--socket` flag
// to turn them on instead of defaulting them on when host and port aren't set or host is set to `localhost`.
//} else {
// // if host is undefined or defined as "localhost" -> unix
// if shouldUseUnixSocket(serverConfig) {
// serverConf.Socket = defaultUnixSocketFilePath
// }
//}
return serverConf, nil
}

View File

@@ -153,9 +153,9 @@ func TestServerGoodParams(t *testing.T) {
tests := []ServerConfig{
DefaultServerConfig(),
DefaultServerConfig().withHost("127.0.0.1").WithPort(15400),
DefaultServerConfig().withHost("localhost").WithPort(15401),
//DefaultServerConfig().withHost("::1").WithPort(15402), // Fails on Jenkins, assuming no IPv6 support
DefaultServerConfig().WithHost("127.0.0.1").WithPort(15400),
DefaultServerConfig().WithHost("localhost").WithPort(15401),
//DefaultServerConfig().WithHost("::1").WithPort(15402), // Fails on Jenkins, assuming no IPv6 support
DefaultServerConfig().withUser("testusername").WithPort(15403),
DefaultServerConfig().withPassword("hunter2").WithPort(15404),
DefaultServerConfig().withTimeout(0).WithPort(15405),
@@ -164,6 +164,7 @@ func TestServerGoodParams(t *testing.T) {
DefaultServerConfig().withLogLevel(LogLevel_Info).WithPort(15408),
DefaultServerConfig().withReadOnly(true).WithPort(15409),
DefaultServerConfig().withUser("testusernamE").withPassword("hunter2").withTimeout(4).WithPort(15410),
DefaultServerConfig().withAllowCleartextPasswords(true),
}
for _, test := range tests {
@@ -174,7 +175,7 @@ func TestServerGoodParams(t *testing.T) {
}(test, sc)
err := sc.WaitForStart()
require.NoError(t, err)
conn, err := dbr.Open("mysql", ConnectionString(test), nil)
conn, err := dbr.Open("mysql", ConnectionString(test, "dbname"), nil)
require.NoError(t, err)
err = conn.Close()
require.NoError(t, err)
@@ -198,7 +199,7 @@ func TestServerSelect(t *testing.T) {
require.NoError(t, err)
const dbName = "dolt"
conn, err := dbr.Open("mysql", ConnectionString(serverConfig)+dbName, nil)
conn, err := dbr.Open("mysql", ConnectionString(serverConfig, dbName), nil)
require.NoError(t, err)
defer conn.Close()
sess := conn.NewSession(nil)
@@ -274,7 +275,7 @@ func TestServerSetDefaultBranch(t *testing.T) {
const dbName = "dolt"
conn, err := dbr.Open("mysql", ConnectionString(serverConfig)+dbName, nil)
conn, err := dbr.Open("mysql", ConnectionString(serverConfig, dbName), nil)
require.NoError(t, err)
sess := conn.NewSession(nil)
@@ -316,7 +317,7 @@ func TestServerSetDefaultBranch(t *testing.T) {
}
conn.Close()
conn, err = dbr.Open("mysql", ConnectionString(serverConfig)+dbName, nil)
conn, err = dbr.Open("mysql", ConnectionString(serverConfig, dbName), nil)
require.NoError(t, err)
defer conn.Close()
@@ -353,7 +354,7 @@ func TestServerSetDefaultBranch(t *testing.T) {
}
conn.Close()
conn, err = dbr.Open("mysql", ConnectionString(serverConfig)+dbName, nil)
conn, err = dbr.Open("mysql", ConnectionString(serverConfig, dbName), nil)
require.NoError(t, err)
defer conn.Close()
@@ -431,7 +432,7 @@ func TestReadReplica(t *testing.T) {
multiSetup.PushToRemote(sourceDbName, "remote1", "main")
t.Run("read replica pulls multiple branches", func(t *testing.T) {
conn, err := dbr.Open("mysql", ConnectionString(serverConfig)+readReplicaDbName, nil)
conn, err := dbr.Open("mysql", ConnectionString(serverConfig, readReplicaDbName), nil)
defer conn.Close()
require.NoError(t, err)
sess := conn.NewSession(nil)

View File

@@ -18,7 +18,9 @@ import (
"crypto/tls"
"fmt"
"net"
"path/filepath"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
)
@@ -35,22 +37,24 @@ const (
)
const (
defaultHost = "localhost"
defaultPort = 3306
defaultUser = "root"
defaultPass = ""
defaultTimeout = 8 * 60 * 60 * 1000 // 8 hours, same as MySQL
defaultReadOnly = false
defaultLogLevel = LogLevel_Info
defaultAutoCommit = true
defaultMaxConnections = 100
defaultQueryParallelism = 2
defaultPersistenceBahavior = loadPerisistentGlobals
defaultDataDir = "."
defaultCfgDir = "./.doltcfg"
defaultPrivilegeFilePath = "./.doltcfg/privileges.db"
defaultMetricsHost = ""
defaultMetricsPort = -1
defaultHost = "localhost"
defaultPort = 3306
defaultUser = "root"
defaultPass = ""
defaultTimeout = 8 * 60 * 60 * 1000 // 8 hours, same as MySQL
defaultReadOnly = false
defaultLogLevel = LogLevel_Info
defaultAutoCommit = true
defaultMaxConnections = 100
defaultQueryParallelism = 2
defaultPersistenceBahavior = loadPerisistentGlobals
defaultDataDir = "."
defaultCfgDir = ".doltcfg"
defaultPrivilegeFilePath = "privileges.db"
defaultMetricsHost = ""
defaultMetricsPort = -1
defaultAllowCleartextPasswords = false
defaultUnixSocketFilePath = "/tmp/mysql.sock"
)
const (
@@ -131,27 +135,35 @@ type ServerConfig interface {
PrivilegeFilePath() string
// UserVars is an array containing user specific session variables
UserVars() []UserSessionVars
// JwksConfig is an array containing jwks config
JwksConfig() []engine.JwksConfig
// AllowCleartextPasswords is true if the server should accept cleartext passwords.
AllowCleartextPasswords() bool
// Socket is a path to the unix socket file
Socket() string
}
type commandLineServerConfig struct {
host string
port int
user string
password string
timeout uint64
readOnly bool
logLevel LogLevel
dbNamesAndPaths []env.EnvNameAndPath
dataDir string
cfgDir string
autoCommit bool
maxConnections uint64
queryParallelism int
tlsKey string
tlsCert string
requireSecureTransport bool
persistenceBehavior string
privilegeFilePath string
host string
port int
user string
password string
timeout uint64
readOnly bool
logLevel LogLevel
dbNamesAndPaths []env.EnvNameAndPath
dataDir string
cfgDir string
autoCommit bool
maxConnections uint64
queryParallelism int
tlsKey string
tlsCert string
requireSecureTransport bool
persistenceBehavior string
privilegeFilePath string
allowCleartextPasswords bool
socket string
}
var _ ServerConfig = (*commandLineServerConfig)(nil)
@@ -196,7 +208,7 @@ func (cfg *commandLineServerConfig) LogLevel() LogLevel {
return cfg.logLevel
}
// Autocommit defines the value of the @@autocommit session variable used on every connection
// AutoCommit defines the value of the @@autocommit session variable used on every connection
func (cfg *commandLineServerConfig) AutoCommit() bool {
return cfg.autoCommit
}
@@ -216,22 +228,29 @@ func (cfg *commandLineServerConfig) PersistenceBehavior() string {
return cfg.persistenceBehavior
}
// TLSKey returns a path to the servers PEM-encoded private TLS key. "" if there is none.
func (cfg *commandLineServerConfig) TLSKey() string {
return cfg.tlsKey
}
// TLSCert returns a path to the servers PEM-encoded TLS certificate chain. "" if there is none.
func (cfg *commandLineServerConfig) TLSCert() string {
return cfg.tlsCert
}
// RequireSecureTransport is true if the server should reject non-TLS connections.
func (cfg *commandLineServerConfig) RequireSecureTransport() bool {
return cfg.requireSecureTransport
}
// DisableClientMultiStatements is true if we want the server to not
// process incoming ComQuery packets as if they had multiple queries in
// them, even if the client advertises support for MULTI_STATEMENTS.
func (cfg *commandLineServerConfig) DisableClientMultiStatements() bool {
return false
}
// MetricsLabels returns labels that are applied to all prometheus metrics
func (cfg *commandLineServerConfig) MetricsLabels() map[string]string {
return nil
}
@@ -244,14 +263,25 @@ func (cfg *commandLineServerConfig) MetricsPort() int {
return defaultMetricsPort
}
// PrivilegeFilePath returns the path to the file which contains all needed privilege information in the form of a
// JSON string.
func (cfg *commandLineServerConfig) PrivilegeFilePath() string {
return cfg.privilegeFilePath
}
// UserVars is an array containing user specific session variables.
func (cfg *commandLineServerConfig) UserVars() []UserSessionVars {
return nil
}
func (cfg *commandLineServerConfig) JwksConfig() []engine.JwksConfig {
return nil
}
func (cfg *commandLineServerConfig) AllowCleartextPasswords() bool {
return cfg.allowCleartextPasswords
}
// DatabaseNamesAndPaths returns an array of env.EnvNameAndPathObjects corresponding to the databases to be loaded in
// a multiple db configuration. If nil is returned the server will look for a database in the current directory and
// give it a name automatically.
@@ -259,16 +289,23 @@ func (cfg *commandLineServerConfig) DatabaseNamesAndPaths() []env.EnvNameAndPath
return cfg.dbNamesAndPaths
}
// DataDir is the path to a directory to use as the data dir, both to create new databases and locate existing ones.
func (cfg *commandLineServerConfig) DataDir() string {
return cfg.dataDir
}
// CfgDir is the path to a directory to use to store the dolt configuration files.
func (cfg *commandLineServerConfig) CfgDir() string {
return cfg.cfgDir
}
// withHost updates the host and returns the called `*commandLineServerConfig`, which is useful for chaining calls.
func (cfg *commandLineServerConfig) withHost(host string) *commandLineServerConfig {
// Socket is a path to the unix socket file
func (cfg *commandLineServerConfig) Socket() string {
return cfg.socket
}
// WithHost updates the host and returns the called `*commandLineServerConfig`, which is useful for chaining calls.
func (cfg *commandLineServerConfig) WithHost(host string) *commandLineServerConfig {
cfg.host = host
return cfg
}
@@ -322,52 +359,69 @@ func (cfg *commandLineServerConfig) withQueryParallelism(queryParallelism int) *
return cfg
}
// withDBNamesAndPaths updates the dbNamesAndPaths, which is an array of env.EnvNameAndPathObjects corresponding to the databases
func (cfg *commandLineServerConfig) withDBNamesAndPaths(dbNamesAndPaths []env.EnvNameAndPath) *commandLineServerConfig {
cfg.dbNamesAndPaths = dbNamesAndPaths
return cfg
}
// withDataDir updates the path to a directory to use as the data dir.
func (cfg *commandLineServerConfig) withDataDir(dataDir string) *commandLineServerConfig {
cfg.dataDir = dataDir
return cfg
}
// withCfgDir updates the path to a directory to use to store the dolt configuration files.
func (cfg *commandLineServerConfig) withCfgDir(cfgDir string) *commandLineServerConfig {
cfg.cfgDir = cfgDir
return cfg
}
// withPersistenceBehavior updates persistence behavior of system globals on server init
func (cfg *commandLineServerConfig) withPersistenceBehavior(persistenceBehavior string) *commandLineServerConfig {
cfg.persistenceBehavior = persistenceBehavior
return cfg
}
// withPrivilegeFilePath updates the path to the file which contains all needed privilege information in the form of a JSON string
func (cfg *commandLineServerConfig) withPrivilegeFilePath(privFilePath string) *commandLineServerConfig {
cfg.privilegeFilePath = privFilePath
return cfg
}
func (cfg *commandLineServerConfig) withAllowCleartextPasswords(allow bool) *commandLineServerConfig {
cfg.allowCleartextPasswords = allow
return cfg
}
// WithSocket updates the path to the unix socket file
func (cfg *commandLineServerConfig) WithSocket(sockFilePath string) *commandLineServerConfig {
cfg.socket = sockFilePath
return cfg
}
// DefaultServerConfig creates a `*ServerConfig` that has all of the options set to their default values.
func DefaultServerConfig() *commandLineServerConfig {
return &commandLineServerConfig{
host: defaultHost,
port: defaultPort,
user: defaultUser,
password: defaultPass,
timeout: defaultTimeout,
readOnly: defaultReadOnly,
logLevel: defaultLogLevel,
autoCommit: defaultAutoCommit,
maxConnections: defaultMaxConnections,
queryParallelism: defaultQueryParallelism,
persistenceBehavior: defaultPersistenceBahavior,
dataDir: defaultDataDir,
cfgDir: defaultCfgDir,
privilegeFilePath: defaultPrivilegeFilePath,
host: defaultHost,
port: defaultPort,
user: defaultUser,
password: defaultPass,
timeout: defaultTimeout,
readOnly: defaultReadOnly,
logLevel: defaultLogLevel,
autoCommit: defaultAutoCommit,
maxConnections: defaultMaxConnections,
queryParallelism: defaultQueryParallelism,
persistenceBehavior: defaultPersistenceBahavior,
dataDir: defaultDataDir,
cfgDir: filepath.Join(defaultDataDir, defaultCfgDir),
privilegeFilePath: filepath.Join(defaultDataDir, defaultCfgDir, defaultPrivilegeFilePath),
allowCleartextPasswords: defaultAllowCleartextPasswords,
}
}
// Validate returns an `error` if any field is not valid.
// ValidateConfig returns an `error` if any field is not valid.
func ValidateConfig(config ServerConfig) error {
if config.Host() != "localhost" {
ip := net.ParseIP(config.Host())
@@ -391,14 +445,32 @@ func ValidateConfig(config ServerConfig) error {
}
// ConnectionString returns a Data Source Name (DSN) to be used by go clients for connecting to a running server.
func ConnectionString(config ServerConfig) string {
return fmt.Sprintf("%v:%v@tcp(%v:%v)/", config.User(), config.Password(), config.Host(), config.Port())
// If unix socket file path is defined in ServerConfig, then `unix` DSN will be returned.
func ConnectionString(config ServerConfig, database string) string {
var dsn string
if config.Socket() != "" {
dsn = fmt.Sprintf("%v:%v@unix(%v)/%v", config.User(), config.Password(), config.Socket(), database)
} else {
dsn = fmt.Sprintf("%v:%v@tcp(%v:%v)/%v", config.User(), config.Password(), config.Host(), config.Port(), database)
}
if config.AllowCleartextPasswords() {
dsn += "?allowCleartextPasswords=1"
}
return dsn
}
// ConfigInfo returns a summary of some of the config which contains some of the more important information
func ConfigInfo(config ServerConfig) string {
return fmt.Sprintf(`HP="%v:%v"|T="%v"|R="%v"|L="%v"`, config.Host(), config.Port(),
config.ReadTimeout(), config.ReadOnly(), config.LogLevel())
socket := ""
if config.Socket() != "" {
s := config.Socket()
if s == "" {
s = defaultUnixSocketFilePath
}
socket = fmt.Sprintf(`|S="%v"`, s)
}
return fmt.Sprintf(`HP="%v:%v"|T="%v"|R="%v"|L="%v"%s`, config.Host(), config.Port(),
config.ReadTimeout(), config.ReadOnly(), config.LogLevel(), socket)
}
// LoadTLSConfig loads the certificate chain from config.TLSKey() and config.TLSCert() and returns

View File

@@ -137,7 +137,7 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
}
}
conn, err := dbr.Open("mysql", ConnectionString(serverConfig), nil)
conn, err := dbr.Open("mysql", ConnectionString(serverConfig, ""), nil)
if err != nil {
cli.PrintErrln(err.Error())
serverController.StopServer()

View File

@@ -34,18 +34,20 @@ import (
)
const (
hostFlag = "host"
portFlag = "port"
userFlag = "user"
passwordFlag = "password"
timeoutFlag = "timeout"
readonlyFlag = "readonly"
logLevelFlag = "loglevel"
noAutoCommitFlag = "no-auto-commit"
configFileFlag = "config"
queryParallelismFlag = "query-parallelism"
maxConnectionsFlag = "max-connections"
persistenceBehaviorFlag = "persistence-behavior"
hostFlag = "host"
portFlag = "port"
userFlag = "user"
passwordFlag = "password"
timeoutFlag = "timeout"
readonlyFlag = "readonly"
logLevelFlag = "loglevel"
noAutoCommitFlag = "no-auto-commit"
configFileFlag = "config"
queryParallelismFlag = "query-parallelism"
maxConnectionsFlag = "max-connections"
persistenceBehaviorFlag = "persistence-behavior"
allowCleartextPasswordsFlag = "allow-cleartext-passwords"
socketFlag = "socket"
)
func indentLines(s string) string {
@@ -145,6 +147,8 @@ func (cmd SqlServerCmd) ArgParser() *argparser.ArgParser {
ap.SupportsInt(maxConnectionsFlag, "", "max-connections", fmt.Sprintf("Set the number of connections handled by the server (default `%d`)", serverConfig.MaxConnections()))
ap.SupportsString(persistenceBehaviorFlag, "", "persistence-behavior", fmt.Sprintf("Indicate whether to `load` or `ignore` persisted global variables (default `%s`)", serverConfig.PersistenceBehavior()))
ap.SupportsString(commands.PrivsFilePathFlag, "", "privilege file", "Path to a file to load and store users and grants. Defaults to $doltcfg-dir/privileges.db")
ap.SupportsString(allowCleartextPasswordsFlag, "", "allow-cleartext-passwords", "Allows use of cleartext passwords. Defaults to false.")
ap.SupportsString(socketFlag, "", "socket file", "Path for the unix socket file. Defaults to '/tmp/mysql.sock'")
return ap
}
@@ -241,8 +245,14 @@ func GetServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (ServerC
// SetupDoltConfig updates the given server config with where to create .doltcfg directory
func SetupDoltConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults, config ServerConfig) error {
if _, ok := apr.GetValue(configFileFlag); ok {
if exists, _ := dEnv.FS.Exists(config.CfgDir()); !exists {
if err := dEnv.FS.MkDirs(config.CfgDir()); err != nil {
return err
}
}
return nil
}
serverConfig := config.(*commandLineServerConfig)
_, dataDirFlag1 := apr.GetValue(commands.MultiDBDirFlag)
@@ -316,8 +326,16 @@ func SetupDoltConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults, config S
func getCommandLineServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (ServerConfig, error) {
serverConfig := DefaultServerConfig()
if sock, ok := apr.GetValue(socketFlag); ok {
// defined without value gets default
if sock == "" {
sock = defaultUnixSocketFilePath
}
serverConfig.WithSocket(sock)
}
if host, ok := apr.GetValue(hostFlag); ok {
serverConfig.withHost(host)
serverConfig.WithHost(host)
}
if port, ok := apr.GetInt(portFlag); ok {
@@ -396,6 +414,7 @@ func getCommandLineServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResult
}
serverConfig.autoCommit = !apr.Contains(noAutoCommitFlag)
serverConfig.allowCleartextPasswords = apr.Contains(allowCleartextPasswordsFlag)
return serverConfig, nil
}

View File

@@ -1,19 +1,27 @@
-----BEGIN CERTIFICATE-----
MIIBNDCB56ADAgECAhEAhVdohjG405MnAoq13fHhEDAFBgMrZXAwGDEWMBQGA1UE
ChMNRG9sdEh1YiwgSW5jLjAeFw0yMTA4MjcyMjI4MzdaFw0yMTA4MjgwMDI4Mzda
MBgxFjAUBgNVBAoTDURvbHRIdWIsIEluYy4wKjAFBgMrZXADIQC/Zq8Ls6UODL79
YzRbfgy9oahzE87ivh8FgM+lKPkBtaNGMEQwDgYDVR0PAQH/BAQDAgeAMBMGA1Ud
JQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDwYDVR0RBAgwBocEfwAAATAF
BgMrZXADQQAoT9OOXUnXkw+la93z+1unL867fVNnONbNWkH/slRs/cHXIE2MQy6+
bKq/GWJjmcE+IldXLsGefzDrV49RrucM
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBVzCCAQmgAwIBAgIRAI0WiAW7LLS1/nv1+9T/y0swBQYDK2VwMBgxFjAUBgNV
BAoTDURvbHRIdWIsIEluYy4wHhcNMjEwODI3MjIyODM3WhcNMjEwODI4MDAyODM3
WjAYMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMuMCowBQYDK2VwAyEAHb23NjQMfMsj
riWIJVCXmqWTFCuAa0ETYI/3kL7QhxqjaDBmMA4GA1UdDwEB/wQEAwIHgDATBgNV
HSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRBKiEB
oxoqqO7cTCUUCeFVzdC4iDAPBgNVHREECDAGhwR/AAABMAUGAytlcANBAEV94bXU
ELMAUs+uhcp1Px4/1oHu//x56qwRWVpoeQ1AxF8hJ7ImZao2NQ1xQkbJ5KSZzPWU
od969YFcalxnkQU=
MIIErDCCApQCCQCnSokQKR3M/zANBgkqhkiG9w0BAQUFADAYMRYwFAYDVQQKDA1E
b2x0SHViLCBJbmMuMB4XDTIyMDcyMTIwMDgzMloXDTI2MDcxOTIwMDgzMlowGDEW
MBQGA1UECgwNRG9sdEh1YiwgSW5jLjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC
AgoCggIBAMPmzHy0CmW5Xc27rbRYpJG/QKMXVAz+k2v+AkTQkUzBWKv0z8WhePB/
tDNVfVYuYQ2sBiHTaar9nn2Lokon+YkPjyMis2aMETHVuqx0DmJb9YcxniA8M27o
ZlfDrJtQO5UzIp9q2zhsFWj30Qdm6YUOhZ3rTnvYOMUYG/cIYLWXyQCg1oPqRVRr
GldzLP2GdigdrS6QQjA9AdK+Zi3dP2m2vssG4gJ+lkAWOHe7wvv2RJl/alsvWXmw
pur7Q9Z7M+tQmqGDxlyDtkDDecyqvEkxPH7mnKV1jahJjzUFHND1r44JlCN0eTmD
Q3+RldBNZCZSJWQ42yOIK+mTSp4QUvZL9wnJ1/lMb/v7atDlF/MSLeN6SDyAPod7
Oci8PR+nGhaOKacngrogM6SFQ1kF4tlY5Scrpg61IAcf6uxF3eSBP0qEaFvfLXZV
mc136E4g2G1haLt7y2prckCHLXEnxurXU4xlU/SH4cy4jB/zLZJs46tM7J9ZtCjg
QScZeNBA91kKAvHr36f/+suU3MNPAP2fmMCziH2uxh6SxTP8yzsUoV9PCTeaSnXX
rTMB077j0TOB2qsYhLF3XsLMz+B2Jo0b7ydT7c7rMS9yYvyKPA9JSE44nUrZWj3B
7ity1moIfrzwbH3AK3D5I9iUbBV0+JpuIZFPoqTIb15TUXJSusYHAgMBAAEwDQYJ
KoZIhvcNAQEFBQADggIBABGrQEUFJk5StmyFUGvaw/57H+K1ZT62rusFBq1NacMb
61dMh9xJyDMgLiUllQ8q5CS3bjYt2J2KajpU/58ugF/Ct9aoxA4vFDtfHECllYaH
zvoiK0Dkrf901xxNVeCbHDmXbvzJ0N/xTkP80kbT4o+aBOw6fxQVEBGAGg4EEz1D
k7v3/lEsZ2TkCPua1p9kXHaG8+wwE0hAWsaUYgXHTpzz0gUBJ69bOIlBpLKqO9It
HStkPD7wtYnN54pmOM68EAyXAxUC7yZ9PqncX0X04hH0VlmQGfdXFJDR89mSS6B4
P1qsi1XtnKC/hHuJlrY02uMXn7u1cVCf5uWfFm6Xs8rLL+q28gV6Tr2aXqgY0Cjl
tNtUEIP23/irWN48c5/rKOTiUIHJy2m6UofwMQO91jgKFxIyUmkgPQmos2LLNjtk
VFaPRigAaArwvombUmvfXJl6KoyH/je4H4+Gs+rRQURXU/PD1cioHgsOYNXSmYAj
AQJv/xp9QBmpzb1ExJOKeWjnUWGu0Wdv4TCTXJNvfdQqOVkT6k6ty1urgr9fNOxY
PDbHZTI6rXMtT57G108k2gAkaCE6O2R2Dm+vfW7auauqF3lNiZU9Y8IEGU2ybmE3
s2j+THPWmhuepbZKO5daQH0zlma31QgoyhGSoZ6QUWKEjufEvfx4HwGqMP6BEmaP
-----END CERTIFICATE-----

View File

@@ -1,3 +1,52 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIIMIgv9IKSQINc+lktxpphnkRQmX2P7NmzWGn2TydYGo
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDD5sx8tApluV3N
u620WKSRv0CjF1QM/pNr/gJE0JFMwVir9M/FoXjwf7QzVX1WLmENrAYh02mq/Z59
i6JKJ/mJD48jIrNmjBEx1bqsdA5iW/WHMZ4gPDNu6GZXw6ybUDuVMyKfats4bBVo
99EHZumFDoWd60572DjFGBv3CGC1l8kAoNaD6kVUaxpXcyz9hnYoHa0ukEIwPQHS
vmYt3T9ptr7LBuICfpZAFjh3u8L79kSZf2pbL1l5sKbq+0PWezPrUJqhg8Zcg7ZA
w3nMqrxJMTx+5pyldY2oSY81BRzQ9a+OCZQjdHk5g0N/kZXQTWQmUiVkONsjiCvp
k0qeEFL2S/cJydf5TG/7+2rQ5RfzEi3jekg8gD6HeznIvD0fpxoWjimnJ4K6IDOk
hUNZBeLZWOUnK6YOtSAHH+rsRd3kgT9KhGhb3y12VZnNd+hOINhtYWi7e8tqa3JA
hy1xJ8bq11OMZVP0h+HMuIwf8y2SbOOrTOyfWbQo4EEnGXjQQPdZCgLx69+n//rL
lNzDTwD9n5jAs4h9rsYeksUz/Ms7FKFfTwk3mkp1160zAdO+49EzgdqrGISxd17C
zM/gdiaNG+8nU+3O6zEvcmL8ijwPSUhOOJ1K2Vo9we4rctZqCH688Gx9wCtw+SPY
lGwVdPiabiGRT6KkyG9eU1FyUrrGBwIDAQABAoICABUIJlQNEECzkfqQd6mxCpoL
KmlYC9IJUtJ5Rs0Uh0TyTQ7JDbVuDInla/dG6lniSNEq8s2W4PVWnTllUFsdx5CL
dxaSlygfSYlMJOp220R8EvQcw5k6XVs+4B30CAf0qTDveHwdAMQh9np6gJqG1fNP
B9FYfeiV4iJm4Dm5UIiubwn+OomXETJq/Tz+RIpDcVQFO56QJkr/gb6aamXqJvC2
ie1KI+GYrZDb0dwo8FoUqnDAWS7I+pYx/PmlWDciqwRMdw14FEfCbEKvudfbTLOe
8Zu+LnslD7xNiW5ryhg1CE/7f0f/LTSbfxenDap7ZJEoqJMF96Ds8an2AkDOB9nx
XB5kVz5jMsaZ1f68Rx8S4EqEEcXxYwiRe5WoDEnnVr2+Q6QzOqh/4DaA5VuId462
IjPDWmYszSqig9QXjS11SkTMKCKxas4AqfCb8uUlcXdri4aSv0Khb7DgbO2su1KC
+hcXpiAMH9jVX1d4N8c0Q0HLOT09lRnD2mmEX6Lo2kWgb5Hpzo88Ty9WI7oiszsY
J1r6qPkXIc9Ft1YwpdVBhkBbxB024l9IG8I1UzjrLFnR/A5sRefzosNi4/ZACPW4
Kykhy7p+ZV9Kf8cjMbY11afCmi9jlXsVqWwJIMk+LxTCjF/lmbMay/G7j+ibGtSQ
hU+LNPzAOUEwBj1OqoMhAoIBAQDlo3Ecgeu5zxNILnkut6RHHDJUK2N9+5HIDwi4
frMlkM3b8NLz09/GtmX4HTKkDBur4x9QeEIsxG19tk2QWZQ4EAKs8OcEXaCL4Q9g
msZbQC5rrFjRzUC4roxCTEz4g/ANEM+huLq/3a6afUhkmUuGZzK6rf6E36dTx3na
DP4tDAx1s/DqfMtXYYmzrb3V1Nk9NUwQFRselJ8EHeIA7NEcLcv5yREia57RcYm/
EfuA90j1ER6iHZIxopPfo1Cx7I9N4eoQM4/Tjb5qu+krfGOFOQbL6hCPHeHkZlAw
0/2ECxCHS2y+Uih3MkMdnme2tfBr8AQpcfAOxSTMXu1wGDs9AoIBAQDaY+fVJ2G/
/myI3Nly7MZaJ8NT8kcQx55b1s5vqWU+IQo5YC4KGdUp32U5Uxr2war8SuA2pKW0
Cv42IJYlGQQUgpj2k+DJcDz+Qz9nqE5Ft8vNmyA3Y2gbwgTkd9dtFCTph4BNiAad
qyjXwdJ6qwB1dbORsprC/Ue8WcEVwWwvF3PGnvbEiM8qLyxv/WIXnN5B/XcvUFHS
mS3IVkJpdR8Kzp0Ctro5mHd2L6SQa/XM5tU3bye9Hzf1J3rWM/FGzVtYInC//CoO
w/sA/ebfhK1iHjYYp4MjyETBkbD1kpCl6eNdTKN9ydSkUzhWlHn3xKQQrdZ7KiiH
YbIhh1rwB+qTAoIBAFIoOnSfis2MZ3Kgpdxv+UczsFHqwAq3sX1o247eTYu4Fd6F
d4OinuICKdMt5wtIBbJmbLKmg85ubFnYmkF1uxCfscVb3tryAFlrKMxAM408Fh+R
pqlRDMHGOQoTMEqNMZoLFK3gYHf6gNhm0DqlmZ65Vy3wyCmTttLDgDXiBiHpuJ93
xE6wXTOjAtgU5eEV6K78XX03f99d/tJDOrNoBpxVSi/Qnt+4rzZxr317moaWcjSz
bklD2SUG7G7LiDhP0SllFQ+80s02XhTjq9VSCG0GbQcRc+EwKLxFWpVNktrl9oDh
HEOvMykKA3caUDLPPvfvBB4r1F4EbFjt8Xb0RGUCggEAO0PrcRvr2gd4ere8RwTc
WzD5P/m6sWIKpo+nnAPTVsXumV1xgQo7n85hEOptodMyzJ6hNBMAaNim3hd/x3d/
dPVv/1JoKSJNWw7y0PWKsD7NjvFvD7jpUscXPs0K6C4USk+cUO3+JaGCRvLxZJqt
WDLl1T8r4oiLhCCzVm0UJ79sitUu0Gz0E1WT8JxJl3DZm/zl8DAS1Fz/YKOQCEBh
eTRSxZ7C8MhgevE47nxtyvpFmHKQzTEApYXePuz/qCAojsVh5afP3gvvPPiqQ7Qk
vUDHm28yFm7Nwd4AsNPibzQGoJYgtA0mqKVw34YRh1yUzXXvg6MQNpUbmx+5XPQ5
AwKCAQEA5Iye1s7RVxZP5iJ3vgy67OU+zza5yEIoJEabhF4bOBDsCwNcx2EwsQll
X/Su5qqiIVnrRmkdYVhTnZv8bigq/8Hu+BBenMLxkAwZ5ep6gKq9wdiPQArjNBlS
5KkGuj+7LNCsmmldXVXjjg2BNWBDdVv33hhhqsi/Tzau+qAufdNGdBTS4ZTWEH0z
X5EBtOphJbBPeMUrm1PFOXKUDDwPfqX86rg1NHr1l5iB7uqShZak1s1ovoyFO6s7
I9d8chi4/qwwYk8cHczB4C9EwBvWEvcAf1xa6I1Mp8y3tDhWPVIpq5P8i9vQFYIJ
LWLCd/YowgxkNl5j6a5QMFoZvjLi5A==
-----END PRIVATE KEY-----

View File

@@ -1,10 +1,27 @@
-----BEGIN CERTIFICATE-----
MIIBVzCCAQmgAwIBAgIRAI0WiAW7LLS1/nv1+9T/y0swBQYDK2VwMBgxFjAUBgNV
BAoTDURvbHRIdWIsIEluYy4wHhcNMjEwODI3MjIyODM3WhcNMjEwODI4MDAyODM3
WjAYMRYwFAYDVQQKEw1Eb2x0SHViLCBJbmMuMCowBQYDK2VwAyEAHb23NjQMfMsj
riWIJVCXmqWTFCuAa0ETYI/3kL7QhxqjaDBmMA4GA1UdDwEB/wQEAwIHgDATBgNV
HSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRBKiEB
oxoqqO7cTCUUCeFVzdC4iDAPBgNVHREECDAGhwR/AAABMAUGAytlcANBAEV94bXU
ELMAUs+uhcp1Px4/1oHu//x56qwRWVpoeQ1AxF8hJ7ImZao2NQ1xQkbJ5KSZzPWU
od969YFcalxnkQU=
MIIErDCCApQCCQCP9IKGyBYVUDANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQKDA1E
b2x0SHViLCBJbmMuMB4XDTIyMDcyMTE5NTUyN1oXDTI2MDcyMDE5NTUyN1owGDEW
MBQGA1UECgwNRG9sdEh1YiwgSW5jLjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC
AgoCggIBAMRNMquL4n2bAMQmoedUFcT/zI42ThW5o9qxh9U8MhsDqKBet7JL7ruh
3FRj/yaU+3ax2lGgapcTnSJHWHPUX6MzUpqkwrLzKxqMrKQYnU2F96n3GOflllsB
8ISKBy0TIipLWKupndc893qk+j6HUQ9OrtFHwroZ6/Tg32h4LcJvhkwILb8EHZpI
fSSzi637msnXWqln1WYmX+v4ARUVVSwl5kkPGZ4PxMDQRX3ioqy2d27GbHYSo01K
tSScQvnFJeddu09l7hNTSFixRz+XU+F+jYna8xUtqHMawEpfGw8Gsu8sMV9PUfUx
6WrMb1tcyWEDvHwH6RUlhNa085eCRFznHLDZNxrgBwrslII9d9xW/JaRq43d4M0q
e7F+BazB3rImMo2QHFvtvq55N7mvp+LprMjoEneBboiz1I6M0rq7Sn58FxazgQvz
mCZmNYIrznx+S1mlJjxtC7xgDPZn5Z/68TsmT26j5UNLou7i/yb7XFdMVY6kAnoZ
BAf4+rAUEDV85OLz0kWZSJj+DdlPOl/gIhTgrgJev2Cc7THUTnuk+nGETG49DITS
ySWYLsMYFHweSTm3rG6sMyN7mdbRrUSM1bqbARm46mt89+0HGsFX1QLe3v4yda67
ic93J5mkfNPZ96K9Hth3SBbZt43DvB56ZqsFQlzBZeQ3hkk7ooulAgMBAAEwDQYJ
KoZIhvcNAQELBQADggIBAJgopHEYmETWhH06T72EpgLr3xqckCP9QZ6/UBN8eDt0
rRqMCfM5H33qpe2wojjKwFDkR8XpwF/80VflfFBt9hc1c1fuKmyQSES9gUw10uSL
Z54MCPOa+c8+hslkmJR8Na0QyWN5unnozVHf4XIChsgL1/FblXcOgQLPZygzMNM2
IjdT9XpEHiZTDZDp1NmM7rkRkcpgF2J8G2dcRjo/OGpnhH7wHgxm7hS6yWLW8xPP
8M2/8LPYA7H6HMGYeyrYuDPeVzfaHrECTft+4cjHLu/jYnVLukMMSuI9v5FjtxtX
PYtxnLv8hnbParjSnzK8cOlGdfJDRPMUw9/tvZ4bTeyTtJQYgl/jtdaU4mbdWlge
XMzkZGH3kKpsV2rPZXKJuqRq3vzfr51cQhEcnbJ5H8BsDUQADS3ind37guqoKhuZ
6vFUBTKLeYK5VZ4J18ztXhEynAf7kdROKP6XbE53qtH8qQujmOMWliSFQFdidYsj
eItzGQ4M/ZqI84UnPRL3WPdfPkWqa0x1k6PHFRcFJPp8nhl3O5V4ZdyVC3pKhzUJ
Y8sMit5RH1K8ZTYUVtEKwMX9wRMEkbfE4u/P+yItKw7QgYRdKerlDfCGZP8JY9+k
wqYmF56EVGQFaJdJ1ublVEHQkAVHOBowzccwWOV/OPi1sL+cf4RxAaY7gJpuNEIk
-----END CERTIFICATE-----

View File

@@ -1,3 +1,52 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIAO+Hiat4mkUIYXB0Ix+CycrwhgxvshVlLLfkOu8Vyk1
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDETTKri+J9mwDE
JqHnVBXE/8yONk4VuaPasYfVPDIbA6igXreyS+67odxUY/8mlPt2sdpRoGqXE50i
R1hz1F+jM1KapMKy8ysajKykGJ1Nhfep9xjn5ZZbAfCEigctEyIqS1irqZ3XPPd6
pPo+h1EPTq7RR8K6Gev04N9oeC3Cb4ZMCC2/BB2aSH0ks4ut+5rJ11qpZ9VmJl/r
+AEVFVUsJeZJDxmeD8TA0EV94qKstnduxmx2EqNNSrUknEL5xSXnXbtPZe4TU0hY
sUc/l1Phfo2J2vMVLahzGsBKXxsPBrLvLDFfT1H1MelqzG9bXMlhA7x8B+kVJYTW
tPOXgkRc5xyw2Tca4AcK7JSCPXfcVvyWkauN3eDNKnuxfgWswd6yJjKNkBxb7b6u
eTe5r6fi6azI6BJ3gW6Is9SOjNK6u0p+fBcWs4EL85gmZjWCK858fktZpSY8bQu8
YAz2Z+Wf+vE7Jk9uo+VDS6Lu4v8m+1xXTFWOpAJ6GQQH+PqwFBA1fOTi89JFmUiY
/g3ZTzpf4CIU4K4CXr9gnO0x1E57pPpxhExuPQyE0sklmC7DGBR8Hkk5t6xurDMj
e5nW0a1EjNW6mwEZuOprfPftBxrBV9UC3t7+MnWuu4nPdyeZpHzT2feivR7Yd0gW
2beNw7weemarBUJcwWXkN4ZJO6KLpQIDAQABAoICAHIKz1cuK2UBeg56yzCTfxo1
6ebs0ax5byIMZXeSQyHCnGKe5GWnC4jiXhiBB6iogPbSGJ23bnVapb1WaaLRTMaJ
eIHzGlHQR8hi1aF301tIazvJHCUNEq7Ij6zQa57aMM0VfOwt3E9BUh1kXyWYg5U7
BwD6ibyIdraLNf+BYkRFemYNklYY1AHf/yQlUw6+z4xXmoo0kpuHy85RBH/1JshB
NGpZZW6Yhpvl45lg41UnpHcsu7JU3Z53uokMZzSoPn8Ny1YzR37esXcldtkQ12B3
n07pbrNtFSHZ9sC+RAAUyjt9Fynh8SFb39l173PKkgvUmdoM1nK8m1IJSkNJIOE6
LFZS1j97rQbKkC7SLDfCdV7Rw4BrkSgUOHRjbkhI6I98S5kVLhiUYg0J7cachqUk
fMDRJbh7WqpVe9ZMOMJmFNA1Zvp2apwjwP0LXS5NAz3pkm5xlAY5WX/1PDzpyFXO
g4No/iPqStpuCpYfM1BbwLtZiISFDwn44BUdlR9ltpKlEBW6ExP3X8RGQ1WnAq7A
jdDdh6x9hfGFiI4WpnPWdTJYZ2FPXJz78Z0P7nGgcVe6DYm513JvUeDtNueOM3UU
a5BcVQrOt3SlSAUB8Woeg1CN04Sf2zV+bWwziyEWf0iQ7sJ626sMu3gH7zLVkjpQ
2IUUVDFBR3whO8ZZSXO5AoIBAQD6UcXCcN50fXoKPvpx+rWjPYcl8crJprwV0jmw
GmlPAtpVz6S/vaLblmD7Wb3bCX7ljsl+6sBr6kX5s2yV3Mm2yG7UkU9n3Y9ryYJQ
vN6hRZxUjqjLta2/yiV8L1w0zvj2y8g0iM56CMm9d+8pKr727qj6DHlnOxAJsNmw
fmG6czOC1iQZi0GZ/FU9WXxYHD9Ffde3KQrjYcjoCzJDKFtPqNGRFCzrYFoK3taL
/mxqQIzeqVZEq860WFbfrFDF9LqmXTN9k0G/Ycfcbl26W7vYRphoMMxyIAxkiLBm
rW3N/rN2niurYVnJO2O7y5ICBbR1RsEP0W6m9U8SVL/ogo1PAoIBAQDIwZv7nrmh
Vl5BzgYvfa0UE5wInYxF9WEY9isdM3AnUYhqCysvgMzt/izy6J33CS3zFHRkJihi
q7DY/YMAXenRSrWU48p1roXqFl1SHjF2+L04kT3p3o8m0szWJdrQjN9C6QsZdmRK
17/5T4/Dcs6uiC21MVWWJ2wogwAKGd0hRLxm4h8U9PVuOmUyTFV5BKw/fZg/GVil
PPmztl0c9XvBtlBzf0zaHIFTRIVyH8mmLuRtZ6ApgQUMQJ8+S0Y+USVM5WS6++U1
bVhUYTOup5GRYMgGCzG3FXCuQk+CWuo+VO3/tUL9XcNCaZAd96Mh7BjpM7j6Lfbf
5dlHZ5I/KCLLAoIBAQDRZLIPMyeDPqtmAsSxr81dnkx9e0PtZ2KSxmanX5CUHYjS
m33vPw0Kr0K1P57HqavTD5ySZIFORI0AkgzVV/oMwqGjg2JvOjGNMuWl8Dgzo+1f
9m5Q6ctMUicFOQDi0/gDSvhQqdg+0TchHUCcqTtRiNclRGYR6qBB2wRe1Xme5FtE
qSlNjOX1j9UmGsMfWZG76ccXWmfXSacsJKGI+CtZ+ZhEyiHBS7pGuZ2zQcMjJpgw
cmrNywKAbh1NwfFXhp7UJ8a41wP6uirbxB73k2ERTAyVq6x6E0EKoCUf3xepZ9Rr
92gEVs0qvllxcJrUwjzwlZ1ORB1R4IaiiO536y2VAoIBAEsa04Yw/XV0YFLyBrJh
rAykwW0fs8jAYhD6l2qXQdAT2psBjqh44THwM1S03dP7pSsZbenBtL4lSUYEoavT
dpQMBR6skaOxJPxMXaFJFmxR5khxXd5OmvOFTYiYJOJ8sVHQ6YwfFKpDSNi2gSw3
mUcGP0NYL5K7MOV/DNa6klXN50X+Nm6are8M/arxj9B0hRRDol+I1fcLdsda5D7f
P+taj4KGD3RR0bgbHGlzpvb6+A5OBEdCs2bADlM5yg+qP/Aiqaqibj+spqz6qGEg
436l3G8WZQT/imZG/IPiC1xCXb+aSnOLTm9cGsR7TpZ0Q2WLKhq+c2uUC9OA1d+2
3j0CggEAGjJAE6KV36HL+YBcQwMcwrI+hXYm7twBdzyhWtEOFfi0jam/pYzAVVKx
4NUHfShBNqIcvCNKseri6EqZkjXl1JXnaxcsw/VWq1cbBHGFS4Tkr+n/mO/oRPIp
EuQ8NTDeNk6UGN+g3TqvsoThuVf6PErrsz89zGFbp5nJI40yfMndBfz3Ocy0slIq
B6yyoLMYTtt+MrjjbJk+eklS83qMFBpU0RkQUTvbbALjPRL0C5PIM1jwAhBLUA3F
OKwi0XXAHvMZlzMiwsklNBaqSXciJXx1VIni8HL4AQINMcYgwvNzGUQFV885yFMq
+MjyTS/rmN7YbJRkCrgg/4kGlldmYg==
-----END PRIVATE KEY-----

View File

@@ -15,12 +15,14 @@
package sqlserver
import (
"path/filepath"
"strings"
"unicode"
"unicode/utf8"
"gopkg.in/yaml.v2"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
)
@@ -94,6 +96,10 @@ type ListenerYAMLConfig struct {
TLSCert *string `yaml:"tls_cert"`
// RequireSecureTransport can enable a mode where non-TLS connections are turned away.
RequireSecureTransport *bool `yaml:"require_secure_transport"`
// AllowCleartextPasswords enables use of cleartext passwords.
AllowCleartextPasswords *bool `yaml:"allow_cleartext_passwords"`
// Socket is unix socket file path
Socket *string `yaml:"socket"`
}
// PerformanceYAMLConfig contains configuration parameters for performance tweaking
@@ -112,13 +118,6 @@ type UserSessionVars struct {
Vars map[string]string `yaml:"vars"`
}
type JwksYAMLConfig struct {
Name string `yaml:"name"`
LocationUrl string `yaml:"location_url"`
Claims map[string]string `yaml:"claims"`
FieldsToLog []string `yaml:"fields_to_log"`
}
// YAMLConfig is a ServerConfig implementation which is read from a yaml file
type YAMLConfig struct {
LogLevelStr *string `yaml:"log_level"`
@@ -132,7 +131,7 @@ type YAMLConfig struct {
MetricsConfig MetricsYAMLConfig `yaml:"metrics"`
PrivilegeFile *string `yaml:"privilege_file"`
Vars []UserSessionVars `yaml:"user_session_vars"`
Jwks []JwksYAMLConfig `yaml:"jwks"`
Jwks []engine.JwksConfig `yaml:"jwks"`
}
var _ ServerConfig = YAMLConfig{}
@@ -162,6 +161,8 @@ func serverConfigAsYAMLConfig(cfg ServerConfig) YAMLConfig {
nillableStrPtr(cfg.TLSKey()),
nillableStrPtr(cfg.TLSCert()),
nillableBoolPtr(cfg.RequireSecureTransport()),
nillableBoolPtr(cfg.AllowCleartextPasswords()),
nillableStrPtr(cfg.Socket()),
},
DatabaseConfig: nil,
}
@@ -262,7 +263,7 @@ func (cfg YAMLConfig) ReadOnly() bool {
return *cfg.BehaviorConfig.ReadOnly
}
// Autocommit defines the value of the @@autocommit session variable used on every connection
// AutoCommit defines the value of the @@autocommit session variable used on every connection
func (cfg YAMLConfig) AutoCommit() bool {
if cfg.BehaviorConfig.AutoCommit == nil {
return defaultAutoCommit
@@ -312,6 +313,7 @@ func (cfg YAMLConfig) DisableClientMultiStatements() bool {
return *cfg.BehaviorConfig.DisableClientMultiStatements
}
// MetricsLabels returns labels that are applied to all prometheus metrics
func (cfg YAMLConfig) MetricsLabels() map[string]string {
return cfg.MetricsConfig.Labels
}
@@ -332,13 +334,16 @@ func (cfg YAMLConfig) MetricsPort() int {
return *cfg.MetricsConfig.Port
}
// PrivilegeFilePath returns the path to the file which contains all needed privilege information in the form of a
// JSON string.
func (cfg YAMLConfig) PrivilegeFilePath() string {
if cfg.PrivilegeFile != nil {
return *cfg.PrivilegeFile
}
return ""
return filepath.Join(cfg.CfgDir(), defaultPrivilegeFilePath)
}
// UserVars is an array containing user specific session variables
func (cfg YAMLConfig) UserVars() []UserSessionVars {
if cfg.Vars != nil {
return cfg.Vars
@@ -347,13 +352,21 @@ func (cfg YAMLConfig) UserVars() []UserSessionVars {
return nil
}
func (cfg YAMLConfig) JwksConfig() []JwksYAMLConfig {
// JwksConfig is JSON Web Key Set config, and used to validate a user authed with a jwt (JSON Web Token).
func (cfg YAMLConfig) JwksConfig() []engine.JwksConfig {
if cfg.Jwks != nil {
return cfg.Jwks
}
return nil
}
func (cfg YAMLConfig) AllowCleartextPasswords() bool {
if cfg.ListenerConfig.AllowCleartextPasswords == nil {
return defaultAllowCleartextPasswords
}
return *cfg.ListenerConfig.AllowCleartextPasswords
}
// QueryParallelism returns the parallelism that should be used by the go-mysql-server analyzer
func (cfg YAMLConfig) QueryParallelism() int {
if cfg.PerformanceConfig.QueryParallelism == nil {
@@ -363,6 +376,7 @@ func (cfg YAMLConfig) QueryParallelism() int {
return *cfg.PerformanceConfig.QueryParallelism
}
// TLSKey returns a path to the servers PEM-encoded private TLS key. "" if there is none.
func (cfg YAMLConfig) TLSKey() string {
if cfg.ListenerConfig.TLSKey == nil {
return ""
@@ -370,6 +384,7 @@ func (cfg YAMLConfig) TLSKey() string {
return *cfg.ListenerConfig.TLSKey
}
// TLSCert returns a path to the servers PEM-encoded TLS certificate chain. "" if there is none.
func (cfg YAMLConfig) TLSCert() string {
if cfg.ListenerConfig.TLSCert == nil {
return ""
@@ -377,6 +392,7 @@ func (cfg YAMLConfig) TLSCert() string {
return *cfg.ListenerConfig.TLSCert
}
// RequireSecureTransport is true if the server should reject non-TLS connections.
func (cfg YAMLConfig) RequireSecureTransport() bool {
if cfg.ListenerConfig.RequireSecureTransport == nil {
return false
@@ -384,6 +400,7 @@ func (cfg YAMLConfig) RequireSecureTransport() bool {
return *cfg.ListenerConfig.RequireSecureTransport
}
// PersistenceBehavior is "load" if we include persisted system globals on server init
func (cfg YAMLConfig) PersistenceBehavior() string {
if cfg.BehaviorConfig.PersistenceBehavior == nil {
return loadPerisistentGlobals
@@ -391,16 +408,30 @@ func (cfg YAMLConfig) PersistenceBehavior() string {
return *cfg.BehaviorConfig.PersistenceBehavior
}
// DataDir is the path to a directory to use as the data dir, both to create new databases and locate existing ones.
func (cfg YAMLConfig) DataDir() string {
if cfg.DataDirStr != nil {
return *cfg.DataDirStr
}
return ""
return defaultDataDir
}
// CfgDir is the path to a directory to use to store the dolt configuration files.
func (cfg YAMLConfig) CfgDir() string {
if cfg.CfgDirStr != nil {
return *cfg.DataDirStr
return *cfg.CfgDirStr
}
return ""
return filepath.Join(cfg.DataDir(), defaultCfgDir)
}
// Socket is a path to the unix socket file
func (cfg YAMLConfig) Socket() string {
if cfg.ListenerConfig.Socket == nil {
return ""
}
// if defined but empty -> default
if *cfg.ListenerConfig.Socket == "" {
return defaultUnixSocketFilePath
}
return *cfg.ListenerConfig.Socket
}

View File

@@ -18,9 +18,10 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
)
func TestUnmarshall(t *testing.T) {
@@ -125,7 +126,7 @@ jwks:
},
},
}
expected.Jwks = []JwksYAMLConfig{
expected.Jwks = []engine.JwksConfig{
{
Name: "jwks_name",
LocationUrl: "https://website.com",
@@ -198,10 +199,12 @@ func TestYAMLConfigDefaults(t *testing.T) {
assert.Equal(t, "", cfg.TLSKey())
assert.Equal(t, "", cfg.TLSCert())
assert.Equal(t, false, cfg.RequireSecureTransport())
assert.Equal(t, false, cfg.AllowCleartextPasswords())
assert.Equal(t, false, cfg.DisableClientMultiStatements())
assert.Equal(t, defaultMetricsHost, cfg.MetricsHost())
assert.Equal(t, defaultMetricsPort, cfg.MetricsPort())
assert.Nil(t, cfg.MetricsConfig.Labels)
assert.Equal(t, defaultAllowCleartextPasswords, cfg.AllowCleartextPasswords())
c, err := LoadTLSConfig(cfg)
assert.NoError(t, err)
@@ -234,7 +237,7 @@ listener:
assert.NoError(t, err)
assert.NotNil(t, c)
assert.Len(t, c.Certificates, 1)
assert.Len(t, c.Certificates[0].Certificate, 2)
assert.Len(t, c.Certificates[0].Certificate, 1)
cfg = YAMLConfig{}
err = yaml.Unmarshal([]byte(`

View File

@@ -544,7 +544,7 @@ func move(ctx context.Context, rd table.SqlRowReader, wr *mvdata.SqlEngineTableW
r := pipeline.GetTransFailureSqlRow(trf)
if r != nil {
cli.PrintErr(sql.FormatRow(r))
cli.PrintErr(sql.FormatRow(r), "\n")
}
return false

View File

@@ -56,7 +56,7 @@ import (
)
const (
Version = "0.40.17"
Version = "0.40.20"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
@@ -355,6 +355,33 @@ func runMain() int {
defer tempfiles.MovableTempFileProvider.Clean()
// Find all database names and add global variables for them. This needs to
// occur before a call to dsess.InitPersistedSystemVars. Otherwise, database
// specific persisted system vars will fail to load.
//
// In general, there is a lot of work TODO in this area. System global
// variables are persisted to the Dolt local config if found and if not
// found the Dolt global config (typically ~/.dolt/config_global.json).
// Depending on what directory a dolt sql-server is started in, users may
// see different variables values. For example, start a dolt sql-server in
// the dolt database folder and persist some system variable.
// If dolt sql-server is started outside that folder, those system variables
// will be lost. This is particularly confusing for database specific system
// variables like `${db_name}_default_branch` (maybe these should not be
// part of Dolt config in the first place!).
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), dEnv.FS, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
cli.PrintErrln("failed to load database names")
return 1
}
_ = mrEnv.Iter(func(dbName string, dEnv *env.DoltEnv) (stop bool, err error) {
dsess.DefineSystemVariablesForDB(dbName)
return false, nil
})
err = dsess.InitPersistedSystemVars(dEnv)
if err != nil {
cli.Printf("error: failed to load persisted global variables: %s\n", err.Error())

View File

@@ -15,7 +15,10 @@
package serial
import (
"encoding/binary"
"unsafe"
fb "github.com/google/flatbuffers/go"
)
// KEEP THESE IN SYNC WITH .fbs FILES!
@@ -33,11 +36,44 @@ const TableSchemaFileID = "DSCH"
const ForeignKeyCollectionFileID = "DFKC"
const MergeArtifactsFileID = "ARTM"
const MessageTypesKind int = 27
const MessagePrefixSz = 4
type Message []byte
func GetFileID(bs []byte) string {
if len(bs) < 8 {
if len(bs) < 8+MessagePrefixSz {
return ""
}
return byteSliceToString(bs[4:8])
return byteSliceToString(bs[MessagePrefixSz+4 : MessagePrefixSz+8])
}
func FinishMessage(b *fb.Builder, off fb.UOffsetT, fileID []byte) Message {
// We finish the buffer by prefixing it with:
// 1) 1 byte NomsKind == SerialMessage.
// 2) big endian 3 byte uint representing the size of the message, not
// including the kind or size prefix bytes.
//
// This allows chunks we serialize here to be read by types binary
// codec.
//
// All accessors in this package expect this prefix to be on the front
// of the message bytes as well. See |MessagePrefixSz|.
b.Prep(1, fb.SizeInt32+4+MessagePrefixSz)
b.FinishWithFileIdentifier(off, fileID)
var size [4]byte
binary.BigEndian.PutUint32(size[:], uint32(len(b.Bytes)-int(b.Head())))
if size[0] != 0 {
panic("message is too large to be encoded")
}
bytes := b.Bytes[b.Head()-MessagePrefixSz:]
bytes[0] = byte(MessageTypesKind)
copy(bytes[1:], size[1:])
return bytes
}
// byteSliceToString converts a []byte to string without a heap allocation.

View File

@@ -127,42 +127,8 @@ func (rcv *RootValue) MutateForeignKeyAddr(j int, n byte) bool {
return false
}
func (rcv *RootValue) SuperSchemasAddr(j int) byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *RootValue) SuperSchemasAddrLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *RootValue) SuperSchemasAddrBytes() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *RootValue) MutateSuperSchemasAddr(j int, n byte) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
}
return false
}
func RootValueStart(builder *flatbuffers.Builder) {
builder.StartObject(4)
builder.StartObject(3)
}
func RootValueAddFeatureVersion(builder *flatbuffers.Builder, featureVersion int64) {
builder.PrependInt64Slot(0, featureVersion, 0)
@@ -179,12 +145,6 @@ func RootValueAddForeignKeyAddr(builder *flatbuffers.Builder, foreignKeyAddr fla
func RootValueStartForeignKeyAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func RootValueAddSuperSchemasAddr(builder *flatbuffers.Builder, superSchemasAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(superSchemasAddr), 0)
}
func RootValueStartSuperSchemasAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func RootValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}

View File

@@ -17,7 +17,7 @@ require (
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20220628181817-dae896d89d02
github.com/dolthub/vitess v0.0.0-20220720203453-81dc23415170
github.com/dustin/go-humanize v1.0.0
github.com/fatih/color v1.13.0
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -52,13 +52,13 @@ require (
google.golang.org/api v0.32.0
google.golang.org/grpc v1.37.0
google.golang.org/protobuf v1.27.1
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/square/go-jose.v2 v2.6.0
gopkg.in/src-d/go-errors.v1 v1.0.0
gopkg.in/yaml.v2 v2.3.0
)
require (
github.com/dolthub/go-mysql-server v0.12.1-0.20220715220455-74cc955d9dfc
github.com/dolthub/go-mysql-server v0.12.1-0.20220726021412-60dcca3a24be
github.com/google/flatbuffers v2.0.6+incompatible
github.com/gosuri/uilive v0.0.4
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
@@ -112,6 +112,7 @@ require (
github.com/pierrec/lz4/v4 v4.1.6 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/pquerna/cachecontrol v0.1.0
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
@@ -139,4 +140,8 @@ replace (
github.com/oliveagle/jsonpath => github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474
)
// replace github.com/dolthub/vitess => /Users/taylor/go/src/github.com/dolthub/vitess
// replace github.com/dolthub/go-mysql-server => /Users/taylor/go/src/github.com/dolthub/go-mysql-server
go 1.18

View File

@@ -173,8 +173,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20220715220455-74cc955d9dfc h1:ndD9xtll3pNiyCinZQzj5hJS++umCJI1nn34JoOSpDA=
github.com/dolthub/go-mysql-server v0.12.1-0.20220715220455-74cc955d9dfc/go.mod h1:fhyVDvV0K59cdk9N7TQsPjr2Hp/Qseej8+R9tVqPDCg=
github.com/dolthub/go-mysql-server v0.12.1-0.20220726021412-60dcca3a24be h1:0nkOM5NJjD7GGVSJm+EtWghopFRKURsy0ahLv8nA84Q=
github.com/dolthub/go-mysql-server v0.12.1-0.20220726021412-60dcca3a24be/go.mod h1:JgB3WpY0RMgyAda3YG5VHVncH2B8i1N9Mx9LOp41lIs=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
@@ -183,8 +183,8 @@ github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66 h1:WRPDbpJWEnPxP
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66/go.mod h1:N5ZIbMGuDUpTpOFQ7HcsN6WSIpTGQjHP+Mz27AfmAgk=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/vitess v0.0.0-20220628181817-dae896d89d02 h1:Z0bwRtgl3Z/D+ReaAUM+OeSthpAIbGeeXj/pl5xeVxk=
github.com/dolthub/vitess v0.0.0-20220628181817-dae896d89d02/go.mod h1:5xfuFfpljoMYespuUmyl5zrHoK0Rl7Bm6yAsnJJJzuY=
github.com/dolthub/vitess v0.0.0-20220720203453-81dc23415170 h1:cBafmAGEyfMH4M/iw2dB6+h5MhMY4k3FnGYixMOPE6s=
github.com/dolthub/vitess v0.0.0-20220720203453-81dc23415170/go.mod h1:5xfuFfpljoMYespuUmyl5zrHoK0Rl7Bm6yAsnJJJzuY=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -596,6 +596,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -1199,8 +1201,8 @@ gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mN
gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-errors.v1 v1.0.0 h1:cooGdZnCjYbeS1zb1s6pVAAimTdKceRrpn7aKOnNIfc=
gopkg.in/src-d/go-errors.v1 v1.0.0/go.mod h1:q1cBlomlw2FnDBDNGlnh6X0jPihy+QxZfMMNxPCbdYg=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=

View File

@@ -28,11 +28,11 @@ import (
"github.com/dolthub/dolt/go/store/types"
)
func NewRowDiffer(ctx context.Context, fromSch, toSch schema.Schema, buf int) RowDiffer {
func NewRowDiffer(ctx context.Context, format *types.NomsBinFormat, fromSch, toSch schema.Schema, buf int) RowDiffer {
ad := NewAsyncDiffer(buf)
// Returns an EmptyRowDiffer if the two schemas are not diffable.
if !schema.ArePrimaryKeySetsDiffable(fromSch, toSch) {
if !schema.ArePrimaryKeySetsDiffable(format, fromSch, toSch) {
return &EmptyRowDiffer{}
}

View File

@@ -66,7 +66,7 @@ func SummaryForTableDelta(ctx context.Context, ch chan DiffSummaryProgress, td T
return errhand.BuildDError("cannot retrieve schema for table %s", td.ToName).AddCause(err).Build()
}
if !schema.ArePrimaryKeySetsDiffable(fromSch, toSch) {
if !schema.ArePrimaryKeySetsDiffable(td.Format(), fromSch, toSch) {
return errhand.BuildDError("diff summary will not compute due to primary key set change with table %s", td.CurName()).Build()
}

View File

@@ -284,7 +284,7 @@ func (td TableDelta) HasSchemaChanged(ctx context.Context) (bool, error) {
}
func (td TableDelta) HasPrimaryKeySetChanged() bool {
return !schema.ArePrimaryKeySetsDiffable(td.FromSch, td.ToSch)
return !schema.ArePrimaryKeySetsDiffable(td.Format(), td.FromSch, td.ToSch)
}
func (td TableDelta) HasChanges() (bool, error) {

View File

@@ -250,7 +250,7 @@ func (i prollyIndex) Format() *types.NomsBinFormat {
// bytes implements Index.
func (i prollyIndex) bytes() ([]byte, error) {
return []byte(shim.ValueFromMap(i.index).(types.TupleRowStorage)), nil
return []byte(shim.ValueFromMap(i.index).(types.SerialMessage)), nil
}
var _ Index = prollyIndex{}

View File

@@ -194,11 +194,12 @@ func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeS
err = errors.New("table ref is unexpected noms value; not SerialMessage")
return nil, err
}
if serial.GetFileID([]byte(sm)) != serial.TableFileID {
err = errors.New("table ref is unexpected noms value; GetFileID == " + serial.GetFileID([]byte(sm)))
id := serial.GetFileID(sm)
if id != serial.TableFileID {
err = errors.New("table ref is unexpected noms value; GetFileID == " + id)
return nil, err
}
return doltDevTable{vrw, ns, serial.GetRootAsTable([]byte(sm), 0)}, nil
return doltDevTable{vrw, ns, serial.GetRootAsTable([]byte(sm), serial.MessagePrefixSz)}, nil
}
}
@@ -745,7 +746,7 @@ func (fields serialTableFields) write() *serial.Table {
builder := flatbuffers.NewBuilder(1024)
indexesam := fields.indexes
indexesbytes := []byte(tree.ValueFromNode(indexesam.Node()).(types.TupleRowStorage))
indexesbytes := []byte(tree.ValueFromNode(indexesam.Node()).(types.SerialMessage))
schemaoff := builder.CreateByteVector(fields.schema)
rowsoff := builder.CreateByteVector(fields.rows)
@@ -772,8 +773,8 @@ func (fields serialTableFields) write() *serial.Table {
serial.TableAddConflicts(builder, conflictsoff)
serial.TableAddViolations(builder, violationsoff)
serial.TableAddArtifacts(builder, artifactsoff)
builder.FinishWithFileIdentifier(serial.TableEnd(builder), []byte(serial.TableFileID))
return serial.GetRootAsTable(builder.FinishedBytes(), 0)
bs := serial.FinishMessage(builder, serial.TableEnd(builder), []byte(serial.TableFileID))
return serial.GetRootAsTable(bs, serial.MessagePrefixSz)
}
func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
@@ -871,7 +872,7 @@ func (t doltDevTable) GetTableRows(ctx context.Context) (Index, error) {
if err != nil {
return nil, err
}
m := shim.MapFromValue(types.TupleRowStorage(rowbytes), sch, t.ns)
m := shim.MapFromValue(types.SerialMessage(rowbytes), sch, t.ns)
return IndexFromProllyMap(m), nil
}
}

View File

@@ -88,7 +88,7 @@ func deserializeFlatbufferForeignKeys(msg types.SerialMessage) (*ForeignKeyColle
return nil, fmt.Errorf("expect Serial Message with ForeignKeyCollectionFileID")
}
c := serial.GetRootAsForeignKeyCollection(msg, 0)
c := serial.GetRootAsForeignKeyCollection(msg, serial.MessagePrefixSz)
collection := &ForeignKeyCollection{
foreignKeys: make(map[string]ForeignKey, c.ForeignKeysLength()),
}
@@ -203,8 +203,7 @@ func serializeFlatbufferForeignKeys(fkc *ForeignKeyCollection) types.SerialMessa
serial.ForeignKeyCollectionStart(b)
serial.ForeignKeyCollectionAddForeignKeys(b, vec)
o := serial.ForeignKeyCollectionEnd(b)
b.FinishWithFileIdentifier(o, []byte(serial.ForeignKeyCollectionFileID))
return types.SerialMessage(b.FinishedBytes())
return []byte(serial.FinishMessage(b, o, []byte(serial.ForeignKeyCollectionFileID)))
}
func serializeStringVector(b *fb.Builder, s []string) fb.UOffsetT {
@@ -231,6 +230,6 @@ func emptyForeignKeyCollection(msg types.SerialMessage) bool {
if serial.GetFileID(msg) != serial.ForeignKeyCollectionFileID {
return false
}
c := serial.GetRootAsForeignKeyCollection(msg, 0)
c := serial.GetRootAsForeignKeyCollection(msg, serial.MessagePrefixSz)
return c.ForeignKeysLength() == 0
}

View File

@@ -26,8 +26,6 @@ import (
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/encoding"
"github.com/dolthub/dolt/go/libraries/utils/set"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly"
@@ -39,10 +37,12 @@ import (
const (
ddbRootStructName = "dolt_db_root"
tablesKey = "tables"
tablesKey = "tables"
foreignKeyKey = "foreign_key"
featureVersKey = "feature_ver"
// deprecated
superSchemasKey = "super_schemas"
foreignKeyKey = "foreign_key"
featureVersKey = "feature_ver"
)
type FeatureVersion int64
@@ -83,10 +83,8 @@ type rvStorage interface {
GetFeatureVersion() (FeatureVersion, bool, error)
GetTablesMap(ctx context.Context, vr types.ValueReadWriter, ns tree.NodeStore) (tableMap, error)
GetSuperSchemaMap(ctx context.Context, vr types.ValueReader) (types.Map, bool, error)
GetForeignKeys(ctx context.Context, vr types.ValueReader) (types.Value, bool, error)
SetSuperSchemaMap(ctx context.Context, vrw types.ValueReadWriter, m types.Map) (rvStorage, error)
SetForeignKeyMap(ctx context.Context, vrw types.ValueReadWriter, m types.Value) (rvStorage, error)
SetFeatureVersion(v FeatureVersion) (rvStorage, error)
@@ -255,7 +253,7 @@ func newRootValue(vrw types.ValueReadWriter, ns tree.NodeStore, v types.Value) (
var storage rvStorage
if vrw.Format().UsesFlatbuffers() {
srv := serial.GetRootAsRootValue([]byte(v.(types.SerialMessage)), 0)
srv := serial.GetRootAsRootValue([]byte(v.(types.SerialMessage)), serial.MessagePrefixSz)
storage = fbRvStorage{srv}
} else {
st, ok := v.(types.Struct)
@@ -306,7 +304,7 @@ func decodeRootNomsValue(vrw types.ValueReadWriter, ns tree.NodeStore, val types
func isRootValue(nbf *types.NomsBinFormat, val types.Value) bool {
if nbf.UsesFlatbuffers() {
if sm, ok := val.(types.SerialMessage); ok {
return string(serial.GetFileID([]byte(sm))) == serial.RootValueFileID
return string(serial.GetFileID(sm)) == serial.RootValueFileID
}
} else {
if st, ok := val.(types.Struct); ok {
@@ -321,19 +319,16 @@ func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter, ns tree.Node
builder := flatbuffers.NewBuilder(80)
emptyam := prolly.NewEmptyAddressMap(ns)
ambytes := []byte(tree.ValueFromNode(emptyam.Node()).(types.TupleRowStorage))
ambytes := []byte(tree.ValueFromNode(emptyam.Node()).(types.SerialMessage))
tablesoff := builder.CreateByteVector(ambytes)
var empty hash.Hash
fkoff := builder.CreateByteVector(empty[:])
ssoff := builder.CreateByteVector(empty[:])
serial.RootValueStart(builder)
serial.RootValueAddFeatureVersion(builder, int64(DoltFeatureVersion))
serial.RootValueAddTables(builder, tablesoff)
serial.RootValueAddForeignKeyAddr(builder, fkoff)
serial.RootValueAddSuperSchemasAddr(builder, ssoff)
builder.FinishWithFileIdentifier(serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
bs := builder.FinishedBytes()
bs := serial.FinishMessage(builder, serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
return newRootValue(vrw, ns, types.SerialMessage(bs))
}
@@ -390,43 +385,6 @@ func (root *RootValue) HasTable(ctx context.Context, tName string) (bool, error)
return !a.IsEmpty(), nil
}
// GetSuperSchema returns the SuperSchema for the table name specified if that table exists.
func (root *RootValue) GetSuperSchema(ctx context.Context, tName string) (*schema.SuperSchema, bool, error) {
// SuperSchema is only persisted on Commit()
ss, found, err := root.getSuperSchemaAtLastCommit(ctx, tName)
if err != nil {
return nil, false, err
}
if !found {
ss, _ = schema.NewSuperSchema()
}
t, tblFound, err := root.GetTable(ctx, tName)
if err != nil {
return nil, false, err
}
if !found && !tblFound {
// table doesn't exist in current commit or in history
return nil, false, nil
}
if tblFound {
sch, err := t.GetSchema(ctx)
if err != nil {
return nil, false, err
}
err = ss.AddSchemas(sch)
if err != nil {
return nil, false, err
}
}
return ss, true, err
}
func (root *RootValue) GenerateTagsForNewColColl(ctx context.Context, tableName string, cc *schema.ColCollection) (*schema.ColCollection, error) {
newColNames := make([]string, 0, cc.Size())
newColKinds := make([]types.NomsKind, 0, cc.Size())
@@ -519,12 +477,11 @@ func (root *RootValue) GenerateTagsForNewColumns(
existingColKinds = append(existingColKinds, col.Kind)
}
rootSuperSchema, err := GetRootValueSuperSchema(ctx, root)
existingTags, err := GetAllTagsForRoot(ctx, root)
if err != nil {
return nil, err
}
existingTags := set.NewUint64Set(rootSuperSchema.AllTags())
for i := range newTags {
if newTags[i] > 0 {
continue
@@ -532,59 +489,12 @@ func (root *RootValue) GenerateTagsForNewColumns(
newTags[i] = schema.AutoGenerateTag(existingTags, tableName, existingColKinds, newColNames[i], newColKinds[i])
existingColKinds = append(existingColKinds, newColKinds[i])
existingTags.Add(newTags[i])
existingTags.Add(newTags[i], tableName)
}
return newTags, nil
}
// GerSuperSchemaMap returns the Noms map that tracks SuperSchemas, used to create new RootValues on checkout branch.
func (root *RootValue) GetSuperSchemaMap(ctx context.Context) (types.Map, error) {
return root.getOrCreateSuperSchemaMap(ctx)
}
// SuperSchemas are only persisted on commit.
func (root *RootValue) getSuperSchemaAtLastCommit(ctx context.Context, tName string) (*schema.SuperSchema, bool, error) {
ssm, err := root.getOrCreateSuperSchemaMap(ctx)
if err != nil {
return nil, false, err
}
v, found, err := ssm.MaybeGet(ctx, types.String(tName))
if err != nil {
return nil, false, err
}
if !found {
// Super Schema doesn't exist for new or nonexistent table
return nil, false, nil
}
ssValRef := v.(types.Ref)
ssVal, err := ssValRef.TargetValue(ctx, root.vrw)
if err != nil {
return nil, false, err
}
ss, err := encoding.UnmarshalSuperSchemaNomsValue(ctx, root.vrw.Format(), ssVal)
if err != nil {
return nil, false, err
}
return ss, true, nil
}
func (root *RootValue) getOrCreateSuperSchemaMap(ctx context.Context) (types.Map, error) {
m, found, err := root.st.GetSuperSchemaMap(ctx, root.vrw)
if err != nil {
return types.Map{}, err
}
if found {
return m, nil
}
return types.NewMap(ctx, root.vrw)
}
func (root *RootValue) GetAllSchemas(ctx context.Context) (map[string]schema.Schema, error) {
m := make(map[string]schema.Schema)
err := root.IterTables(ctx, func(name string, table *Table, sch schema.Schema) (stop bool, err error) {
@@ -699,8 +609,7 @@ func (root *RootValue) GetTableInsensitive(ctx context.Context, tName string) (*
return tbl, resolvedName, ok, nil
}
// GetTableByColTag looks for the table containing the given column tag. It returns false if no table exists in the history.
// If the table containing the given tag previously existed and was deleted, it will return its name and a nil pointer.
// GetTableByColTag looks for the table containing the given column tag.
func (root *RootValue) GetTableByColTag(ctx context.Context, tag uint64) (tbl *Table, name string, found bool, err error) {
err = root.IterTables(ctx, func(tn string, t *Table, s schema.Schema) (bool, error) {
_, found = s.GetAllCols().GetByTag(tag)
@@ -715,18 +624,6 @@ func (root *RootValue) GetTableByColTag(ctx context.Context, tag uint64) (tbl *T
return nil, "", false, err
}
err = root.iterSuperSchemas(ctx, func(tn string, ss *schema.SuperSchema) (bool, error) {
_, found = ss.GetByTag(tag)
if found {
name = tn
}
return found, nil
})
if err != nil {
return nil, "", false, err
}
return tbl, name, found, nil
}
@@ -846,59 +743,6 @@ func (root *RootValue) IterTables(ctx context.Context, cb func(name string, tabl
})
}
func (root *RootValue) iterSuperSchemas(ctx context.Context, cb func(name string, ss *schema.SuperSchema) (stop bool, err error)) error {
m, err := root.getOrCreateSuperSchemaMap(ctx)
if err != nil {
return err
}
return m.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
name := string(key.(types.String))
// use GetSuperSchema() to pickup uncommitted SuperSchemas
ss, _, err := root.GetSuperSchema(ctx, name)
if err != nil {
return false, err
}
return cb(name, ss)
})
}
// PutSuperSchema writes a new map entry for the table name and super schema supplied, it will overwrite an existing entry.
func (root *RootValue) PutSuperSchema(ctx context.Context, tName string, ss *schema.SuperSchema) (*RootValue, error) {
ssm, err := root.getOrCreateSuperSchemaMap(ctx)
if err != nil {
return nil, err
}
ssVal, err := encoding.MarshalSuperSchemaAsNomsValue(ctx, root.VRW(), ss)
if err != nil {
return nil, err
}
ssRef, err := WriteValAndGetRef(ctx, root.VRW(), ssVal)
if err != nil {
return nil, err
}
m, err := ssm.Edit().Set(types.String(tName), ssRef).Map(ctx)
if err != nil {
return nil, err
}
newStorage, err := root.st.SetSuperSchemaMap(ctx, root.vrw, m)
if err != nil {
return nil, err
}
return root.withStorage(newStorage), nil
}
func (root *RootValue) withStorage(st rvStorage) *RootValue {
return &RootValue{root.vrw, root.ns, st, nil}
}
@@ -970,108 +814,13 @@ func (root *RootValue) HashOf() (hash.Hash, error) {
return root.st.nomsValue().Hash(root.vrw.Format())
}
// UpdateSuperSchemasFromOther updates SuperSchemas of tblNames using SuperSchemas from other.
func (root *RootValue) UpdateSuperSchemasFromOther(ctx context.Context, tblNames []string, other *RootValue) (*RootValue, error) {
ssm, err := root.getOrCreateSuperSchemaMap(ctx)
if err != nil {
return nil, err
}
sse := ssm.Edit()
for _, tn := range tblNames {
ss, found, err := root.GetSuperSchema(ctx, tn)
if err != nil {
return nil, err
}
oss, foundOther, err := other.GetSuperSchema(ctx, tn)
if err != nil {
return nil, err
}
var newSS *schema.SuperSchema
if found && foundOther {
newSS, err = schema.SuperSchemaUnion(ss, oss)
} else if found {
newSS = ss
} else if foundOther {
newSS = oss
} else {
h, _ := root.HashOf()
oh, _ := other.HashOf()
return nil, errors.New(fmt.Sprintf("table %s does not exist in root %s or root %s", tn, h.String(), oh.String()))
}
if err != nil {
return nil, err
}
ssVal, err := encoding.MarshalSuperSchemaAsNomsValue(ctx, root.VRW(), newSS)
if err != nil {
return nil, err
}
ssRef, err := WriteValAndGetRef(ctx, root.VRW(), ssVal)
if err != nil {
return nil, err
}
sse = sse.Set(types.String(tn), ssRef)
}
m, err := sse.Map(ctx)
if err != nil {
return nil, err
}
newStorage, err := root.st.SetSuperSchemaMap(ctx, root.vrw, m)
if err != nil {
return nil, err
}
return root.withStorage(newStorage), nil
}
// RenameTable renames a table by changing its string key in the RootValue's table map. In order to preserve
// column tag information, use this method instead of a table drop + add.
func (root *RootValue) RenameTable(ctx context.Context, oldName, newName string) (*RootValue, error) {
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, root.ns, []tableEdit{{old_name: oldName, name: newName}})
if err != nil {
return nil, err
}
ssMap, err := root.getOrCreateSuperSchemaMap(ctx)
if err != nil {
return nil, err
}
ssv, found, err := ssMap.MaybeGet(ctx, types.String(oldName))
if err != nil {
return nil, err
}
if found {
ssme := ssMap.Edit().Remove(types.String(oldName))
ssme = ssme.Set(types.String(newName), ssv)
ssMap, err = ssme.Map(ctx)
if err != nil {
return nil, err
}
newStorage, err = newStorage.SetSuperSchemaMap(ctx, root.vrw, ssMap)
if err != nil {
return nil, err
}
}
return root.withStorage(newStorage), nil
}
@@ -1210,55 +959,16 @@ func (root *RootValue) ValidateForeignKeysOnSchemas(ctx context.Context) (*RootV
return root.PutForeignKeyCollection(ctx, fkCollection)
}
// GetRootValueSuperSchema creates a SuperSchema with every column in history of root.
func GetRootValueSuperSchema(ctx context.Context, root *RootValue) (*schema.SuperSchema, error) {
ssMap, err := root.getOrCreateSuperSchemaMap(ctx)
if err != nil {
return nil, err
}
var sss []*schema.SuperSchema
err = ssMap.Iter(ctx, func(key, value types.Value) (stop bool, err error) {
ssValRef := value.(types.Ref)
ssVal, err := ssValRef.TargetValue(ctx, root.vrw)
if err != nil {
return true, err
// GetAllTagsForRoot gets all tags for root
func GetAllTagsForRoot(ctx context.Context, root *RootValue) (tags schema.TagMapping, err error) {
tags = make(schema.TagMapping)
err = root.IterTables(ctx, func(tblName string, _ *Table, sch schema.Schema) (stop bool, err error) {
for _, t := range sch.GetAllCols().Tags {
tags.Add(t, tblName)
}
ss, err := encoding.UnmarshalSuperSchemaNomsValue(ctx, root.vrw.Format(), ssVal)
if err != nil {
return true, err
}
sss = append(sss, ss) // go get -f parseltongue
return false, nil
return
})
if err != nil {
return nil, err
}
rootSuperSchema, err := schema.SuperSchemaUnion(sss...)
if err != nil {
return nil, err
}
// super schemas are only persisted on commit, so add in working schemas
err = root.IterTables(ctx, func(name string, table *Table, sch schema.Schema) (stop bool, err error) {
err = rootSuperSchema.AddSchemas(sch)
if err != nil {
return true, err
}
return false, nil
})
if err != nil {
return nil, err
}
return rootSuperSchema, nil
return
}
// UnionTableNames returns an array of all table names in all roots passed as params.
@@ -1310,29 +1020,22 @@ func validateTagUniqueness(ctx context.Context, root *RootValue, tableName strin
return err
}
var ee []string
err = root.iterSuperSchemas(ctx, func(tn string, ss *schema.SuperSchema) (stop bool, err error) {
if tn == tableName {
return false, nil
}
err = sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
_, ok := ss.GetByTag(tag)
if ok {
ee = append(ee, schema.ErrTagPrevUsed(tag, col.Name, tn).Error())
}
return false, nil
})
return false, err
})
existing, err := GetAllTagsForRoot(ctx, root)
if err != nil {
return err
}
var ee []string
err = sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
name, ok := existing.Get(tag)
if ok && name != tableName {
ee = append(ee, schema.ErrTagPrevUsed(tag, col.Name, name).Error())
}
return false, nil
})
if len(ee) > 0 {
return fmt.Errorf(strings.Join(ee, "\n"))
}
return nil
}
@@ -1393,7 +1096,7 @@ func (r fbRvStorage) GetFeatureVersion() (FeatureVersion, bool, error) {
func (r fbRvStorage) getAddressMap(vrw types.ValueReadWriter, ns tree.NodeStore) prolly.AddressMap {
tbytes := r.srv.TablesBytes()
node := shim.NodeFromValue(types.TupleRowStorage(tbytes))
node := shim.NodeFromValue(types.SerialMessage(tbytes))
return prolly.NewAddressMap(node, ns)
}
@@ -1422,18 +1125,6 @@ func (m fbTableMap) Iter(ctx context.Context, cb func(string, hash.Hash) (bool,
})
}
func (r fbRvStorage) GetSuperSchemaMap(ctx context.Context, vr types.ValueReader) (types.Map, bool, error) {
addr := hash.New(r.srv.SuperSchemasAddrBytes())
if addr.IsEmpty() {
return types.Map{}, false, nil
}
v, err := vr.ReadValue(ctx, addr)
if err != nil {
return types.Map{}, false, err
}
return v.(types.Map), true, nil
}
func (r fbRvStorage) GetForeignKeys(ctx context.Context, vr types.ValueReader) (types.Value, bool, error) {
addr := hash.New(r.srv.ForeignKeyAddrBytes())
if addr.IsEmpty() {
@@ -1446,20 +1137,6 @@ func (r fbRvStorage) GetForeignKeys(ctx context.Context, vr types.ValueReader) (
return v.(types.SerialMessage), true, nil
}
func (r fbRvStorage) SetSuperSchemaMap(ctx context.Context, vrw types.ValueReadWriter, m types.Map) (rvStorage, error) {
var h hash.Hash
if !m.Empty() {
ref, err := vrw.WriteValue(ctx, m)
if err != nil {
return nil, err
}
h = ref.TargetHash()
}
ret := r.clone()
copy(ret.srv.SuperSchemasAddrBytes(), h[:])
return ret, nil
}
func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error) {
builder := flatbuffers.NewBuilder(80)
@@ -1508,19 +1185,17 @@ func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWrite
return nil, err
}
ambytes := []byte(tree.ValueFromNode(am.Node()).(types.TupleRowStorage))
ambytes := []byte(tree.ValueFromNode(am.Node()).(types.SerialMessage))
tablesoff := builder.CreateByteVector(ambytes)
fkoff := builder.CreateByteVector(r.srv.ForeignKeyAddrBytes())
ssoff := builder.CreateByteVector(r.srv.SuperSchemasAddrBytes())
serial.RootValueStart(builder)
serial.RootValueAddFeatureVersion(builder, r.srv.FeatureVersion())
serial.RootValueAddTables(builder, tablesoff)
serial.RootValueAddForeignKeyAddr(builder, fkoff)
serial.RootValueAddSuperSchemasAddr(builder, ssoff)
builder.FinishWithFileIdentifier(serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
bs := builder.FinishedBytes()
return fbRvStorage{serial.GetRootAsRootValue(bs, 0)}, nil
bs := serial.FinishMessage(builder, serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
return fbRvStorage{serial.GetRootAsRootValue(bs, serial.MessagePrefixSz)}, nil
}
func (r fbRvStorage) SetForeignKeyMap(ctx context.Context, vrw types.ValueReadWriter, v types.Value) (rvStorage, error) {
@@ -1552,11 +1227,10 @@ func (r fbRvStorage) clone() fbRvStorage {
}
func (r fbRvStorage) DebugString(ctx context.Context) string {
return fmt.Sprintf("fbRvStorage[%d, %s, %s, %s]",
return fmt.Sprintf("fbRvStorage[%d, %s, %s]",
r.srv.FeatureVersion(),
"...", // TODO: Print out tables map
hash.New(r.srv.ForeignKeyAddrBytes()).String(),
hash.New(r.srv.SuperSchemasAddrBytes()).String())
hash.New(r.srv.ForeignKeyAddrBytes()).String())
}
func (r fbRvStorage) nomsValue() types.Value {

View File

@@ -20,13 +20,13 @@ import (
"os"
"path/filepath"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
@@ -135,8 +135,7 @@ func (mr *MultiRepoTestSetup) NewRemote(remoteName string) {
os.Mkdir(remote, os.ModePerm)
remotePath := fmt.Sprintf("file:///%s", remote)
dEnv := mr.MrEnv.GetEnv(mr.DbNames[0])
rem := env.NewRemote(remoteName, remotePath, nil, dEnv)
rem := env.NewRemote(remoteName, remotePath, nil)
mr.MrEnv.Iter(func(name string, dEnv *env.DoltEnv) (stop bool, err error) {
dEnv.RepoState.AddRemote(rem)
@@ -168,7 +167,7 @@ func (mr *MultiRepoTestSetup) CloneDB(fromRemote, dbName string) {
cloneDir := filepath.Join(mr.Root, dbName)
r := mr.GetRemote(fromRemote)
srcDB, err := r.GetRemoteDB(ctx, types.Format_Default)
srcDB, err := r.GetRemoteDB(ctx, types.Format_Default, mr.MrEnv.GetEnv(dbName))
if err != nil {
mr.Errhand(err)
}
@@ -320,7 +319,16 @@ func (mr *MultiRepoTestSetup) PushToRemote(dbName, remoteName, branchName string
if err != nil {
mr.Errhand(fmt.Sprintf("Failed to push remote: %s", err.Error()))
}
err = actions.DoPush(ctx, dEnv.RepoStateReader(), dEnv.RepoStateWriter(), dEnv.DoltDB, dEnv.TempTableFilesDir(), opts, actions.NoopRunProgFuncs, actions.NoopStopProgFuncs)
remoteDB, err := opts.Remote.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), mr.MrEnv.GetEnv(dbName))
if err != nil {
if err == remotestorage.ErrInvalidDoltSpecPath {
mr.Errhand(actions.HandleInvalidDoltSpecPathErr(opts.Remote.Name, opts.Remote.Url, err))
}
mr.Errhand(fmt.Sprintf("Failed to get remote database: %s", err.Error()))
}
err = actions.DoPush(ctx, dEnv.RepoStateReader(), dEnv.RepoStateWriter(), dEnv.DoltDB, remoteDB, dEnv.TempTableFilesDir(), opts, actions.NoopRunProgFuncs, actions.NoopStopProgFuncs)
if err != nil {
mr.Errhand(fmt.Sprintf("Failed to push remote: %s", err.Error()))
}

View File

@@ -18,7 +18,6 @@ import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"sync"
@@ -56,6 +55,7 @@ var ErrUserNotFound = errors.New("could not determine user name. run dolt config
var ErrEmailNotFound = errors.New("could not determine email. run dolt config --global --add user.email")
var ErrCloneFailed = errors.New("clone failed")
// EnvForClone creates a new DoltEnv and configures it with repo state from the specified remote. The returned DoltEnv is ready for content to be cloned into it. The directory used for the new DoltEnv is determined by resolving the specified dir against the specified Filesys.
func EnvForClone(ctx context.Context, nbf *types.NomsBinFormat, r env.Remote, dir string, fs filesys.Filesys, version string, homeProvider env.HomeDirProvider) (*env.DoltEnv, error) {
exists, _ := fs.Exists(filepath.Join(dir, dbfactory.DoltDir))
@@ -64,20 +64,17 @@ func EnvForClone(ctx context.Context, nbf *types.NomsBinFormat, r env.Remote, di
}
err := fs.MkDirs(dir)
if err != nil {
return nil, fmt.Errorf("%w: %s; %s", ErrFailedToCreateDirectory, dir, err.Error())
}
err = os.Chdir(dir)
newFs, err := fs.WithWorkingDir(dir)
if err != nil {
return nil, fmt.Errorf("%w: %s; %s", ErrFailedToAccessDir, dir, err.Error())
}
dEnv := env.Load(ctx, homeProvider, fs, doltdb.LocalDirDoltDB, version)
dEnv := env.Load(ctx, homeProvider, newFs, doltdb.LocalDirDoltDB, version)
err = dEnv.InitRepoWithNoData(ctx, nbf)
if err != nil {
return nil, fmt.Errorf("%w; %s", ErrFailedToInitRepo, err.Error())
}
@@ -85,7 +82,6 @@ func EnvForClone(ctx context.Context, nbf *types.NomsBinFormat, r env.Remote, di
dEnv.RSLoadErr = nil
if !env.IsEmptyRemote(r) {
dEnv.RepoState, err = env.CloneRepoState(dEnv.FS, r)
if err != nil {
return nil, fmt.Errorf("%w: %s; %s", ErrFailedToCreateRepoStateWithRemote, r.Name, err.Error())
}

View File

@@ -78,7 +78,7 @@ func CommitStaged(ctx context.Context, roots doltdb.Roots, mergeActive bool, mer
}
}
stagedRoot, err := roots.Staged.UpdateSuperSchemasFromOther(ctx, stagedTblNames, roots.Staged)
stagedRoot := roots.Staged
if err != nil {
return nil, err
}
@@ -96,7 +96,7 @@ func CommitStaged(ctx context.Context, roots doltdb.Roots, mergeActive bool, mer
return nil, err
}
workingRoot, err := roots.Working.UpdateSuperSchemasFromOther(ctx, stagedTblNames, stagedRoot)
workingRoot := roots.Working
if err != nil {
return nil, err
}
@@ -182,11 +182,6 @@ func GetCommitStaged(
}
}
roots.Staged, err = roots.Staged.UpdateSuperSchemasFromOther(ctx, stagedTblNames, roots.Staged)
if err != nil {
return nil, err
}
if !props.Force {
roots.Staged, err = roots.Staged.ValidateForeignKeysOnSchemas(ctx)
if err != nil {
@@ -194,11 +189,6 @@ func GetCommitStaged(
}
}
roots.Working, err = roots.Working.UpdateSuperSchemasFromOther(ctx, stagedTblNames, roots.Staged)
if err != nil {
return nil, err
}
meta, err := datas.NewCommitMetaWithUserTS(props.Name, props.Email, props.Message, props.Date)
if err != nil {
return nil, err

View File

@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
"github.com/dolthub/dolt/go/libraries/events"
"github.com/dolthub/dolt/go/libraries/utils/earl"
"github.com/dolthub/dolt/go/store/datas"
@@ -91,22 +90,8 @@ func Push(ctx context.Context, tempTableDir string, mode ref.UpdateMode, destRef
return err
}
func DoPush(ctx context.Context, rsr env.RepoStateReader, rsw env.RepoStateWriter, srcDB *doltdb.DoltDB, tempTableDir string, opts *env.PushOpts, progStarter ProgStarter, progStopper ProgStopper) error {
destDB, err := opts.Remote.GetRemoteDB(ctx, srcDB.ValueReadWriter().Format())
if err != nil {
if err == remotestorage.ErrInvalidDoltSpecPath {
urlObj, _ := earl.Parse(opts.Remote.Url)
path := urlObj.Path
if path[0] == '/' {
path = path[1:]
}
var detail = fmt.Sprintf("the remote: %s %s '%s' should be in the format 'organization/repo'", opts.Remote.Name, opts.Remote.Url, path)
return fmt.Errorf("%w; %s; %s", ErrFailedToGetRemoteDb, detail, err.Error())
}
return err
}
func DoPush(ctx context.Context, rsr env.RepoStateReader, rsw env.RepoStateWriter, srcDB, destDB *doltdb.DoltDB, tempTableDir string, opts *env.PushOpts, progStarter ProgStarter, progStopper ProgStopper) error {
var err error
switch opts.SrcRef.GetType() {
case ref.BranchRefType:
@@ -375,12 +360,9 @@ func FetchRemoteBranch(
}
// FetchRefSpecs is the common SQL and CLI entrypoint for fetching branches, tags, and heads from a remote.
func FetchRefSpecs(ctx context.Context, dbData env.DbData, refSpecs []ref.RemoteRefSpec, remote env.Remote, mode ref.UpdateMode, progStarter ProgStarter, progStopper ProgStopper) error {
srcDB, err := remote.GetRemoteDBWithoutCaching(ctx, dbData.Ddb.ValueReadWriter().Format())
if err != nil {
return err
}
// This function takes dbData which is a env.DbData object for handling repoState read and write, and srcDB is
// a remote *doltdb.DoltDB object that is used to fetch remote branches from.
func FetchRefSpecs(ctx context.Context, dbData env.DbData, srcDB *doltdb.DoltDB, refSpecs []ref.RemoteRefSpec, remote env.Remote, mode ref.UpdateMode, progStarter ProgStarter, progStopper ProgStopper) error {
branchRefs, err := srcDB.GetHeadRefs(ctx)
if err != nil {
return env.ErrFailedToReadDb
@@ -480,3 +462,14 @@ func SyncRoots(ctx context.Context, srcDb, destDb *doltdb.DoltDB, tempTableDir s
return nil
}
func HandleInvalidDoltSpecPathErr(name, url string, err error) error {
urlObj, _ := earl.Parse(url)
path := urlObj.Path
if path[0] == '/' {
path = path[1:]
}
var detail = fmt.Sprintf("the remote: %s %s '%s' should be in the format 'organization/repo'", name, url, path)
return fmt.Errorf("%w; %s; %s", ErrFailedToGetRemoteDb, detail, err.Error())
}

View File

@@ -76,16 +76,6 @@ func MoveTablesBetweenRoots(ctx context.Context, tbls []string, src, dest *doltd
if err != nil {
return nil, err
}
ss, _, err := src.GetSuperSchema(ctx, td.ToName)
if err != nil {
return nil, err
}
dest, err = dest.PutSuperSchema(ctx, td.ToName, ss)
if err != nil {
return nil, err
}
}
}

View File

@@ -98,7 +98,7 @@ type DoltEnv struct {
IgnoreLockFile bool
}
// Load loads the DoltEnv for the current directory of the cli
// Load loads the DoltEnv for the .dolt directory determined by resolving the specified urlStr with the specified Filesys.
func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr, version string) *DoltEnv {
config, cfgErr := LoadDoltCliConfig(hdp, fs)
repoState, rsErr := LoadRepoState(fs)
@@ -120,14 +120,12 @@ func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr,
if dEnv.RepoState != nil {
remotes := make(map[string]Remote, len(dEnv.RepoState.Remotes))
for n, r := range dEnv.RepoState.Remotes {
r.dialer = dEnv
remotes[n] = r
}
dEnv.RepoState.Remotes = remotes
backups := make(map[string]Remote, len(dEnv.RepoState.Backups))
for n, r := range dEnv.RepoState.Backups {
r.dialer = dEnv
backups[n] = r
}
dEnv.RepoState.Backups = backups
@@ -363,7 +361,7 @@ func (dEnv *DoltEnv) InitRepoWithNoData(ctx context.Context, nbf *types.NomsBinF
return err
}
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr, filesys.LocalFS)
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr, dEnv.FS)
return err
}
@@ -641,12 +639,12 @@ func (r *repoStateWriter) SetCWBHeadRef(ctx context.Context, marshalableRef ref.
return nil
}
func (r *repoStateWriter) AddRemote(name string, url string, fetchSpecs []string, params map[string]string) error {
return r.DoltEnv.AddRemote(name, url, fetchSpecs, params)
func (r *repoStateWriter) AddRemote(remote Remote) error {
return r.DoltEnv.AddRemote(remote)
}
func (r *repoStateWriter) AddBackup(name string, url string, fetchSpecs []string, params map[string]string) error {
return r.DoltEnv.AddBackup(name, url, fetchSpecs, params)
func (r *repoStateWriter) AddBackup(remote Remote) error {
return r.DoltEnv.AddBackup(remote)
}
func (r *repoStateWriter) RemoveRemote(ctx context.Context, name string) error {
@@ -908,32 +906,28 @@ func checkRemoteAddressConflict(url string, remotes, backups map[string]Remote)
return NoRemote, false
}
func (dEnv *DoltEnv) AddRemote(name string, url string, fetchSpecs []string, params map[string]string) error {
if _, ok := dEnv.RepoState.Remotes[name]; ok {
func (dEnv *DoltEnv) AddRemote(r Remote) error {
if _, ok := dEnv.RepoState.Remotes[r.Name]; ok {
return ErrRemoteAlreadyExists
}
if strings.IndexAny(name, " \t\n\r./\\!@#$%^&*(){}[],.<>'\"?=+|") != -1 {
if strings.IndexAny(r.Name, " \t\n\r./\\!@#$%^&*(){}[],.<>'\"?=+|") != -1 {
return ErrInvalidRemoteName
}
_, absRemoteUrl, err := GetAbsRemoteUrl(dEnv.FS, dEnv.Config, url)
_, absRemoteUrl, err := GetAbsRemoteUrl(dEnv.FS, dEnv.Config, r.Url)
if err != nil {
return fmt.Errorf("%w; %s", ErrInvalidRemoteURL, err.Error())
}
// can have multiple remotes with the same address, but no conflicting backups
if r, found := checkRemoteAddressConflict(absRemoteUrl, nil, dEnv.RepoState.Backups); found {
return fmt.Errorf("%w: '%s' -> %s", ErrRemoteAddressConflict, r.Name, r.Url)
if rem, found := checkRemoteAddressConflict(absRemoteUrl, nil, dEnv.RepoState.Backups); found {
return fmt.Errorf("%w: '%s' -> %s", ErrRemoteAddressConflict, rem.Name, rem.Url)
}
r := Remote{name, absRemoteUrl, fetchSpecs, params, dEnv}
r.Url = absRemoteUrl
dEnv.RepoState.AddRemote(r)
err = dEnv.RepoState.Save(dEnv.FS)
if err != nil {
return err
}
return nil
return dEnv.RepoState.Save(dEnv.FS)
}
func (dEnv *DoltEnv) GetBackups() (map[string]Remote, error) {
@@ -944,32 +938,28 @@ func (dEnv *DoltEnv) GetBackups() (map[string]Remote, error) {
return dEnv.RepoState.Backups, nil
}
func (dEnv *DoltEnv) AddBackup(name string, url string, fetchSpecs []string, params map[string]string) error {
if _, ok := dEnv.RepoState.Backups[name]; ok {
func (dEnv *DoltEnv) AddBackup(r Remote) error {
if _, ok := dEnv.RepoState.Backups[r.Name]; ok {
return ErrBackupAlreadyExists
}
if strings.IndexAny(name, " \t\n\r./\\!@#$%^&*(){}[],.<>'\"?=+|") != -1 {
if strings.IndexAny(r.Name, " \t\n\r./\\!@#$%^&*(){}[],.<>'\"?=+|") != -1 {
return ErrInvalidBackupName
}
_, absRemoteUrl, err := GetAbsRemoteUrl(dEnv.FS, dEnv.Config, url)
_, absRemoteUrl, err := GetAbsRemoteUrl(dEnv.FS, dEnv.Config, r.Url)
if err != nil {
return fmt.Errorf("%w; %s", ErrInvalidBackupURL, err.Error())
}
// no conflicting remote or backup addresses
if r, found := checkRemoteAddressConflict(absRemoteUrl, dEnv.RepoState.Remotes, dEnv.RepoState.Backups); found {
return fmt.Errorf("%w: '%s' -> %s", ErrRemoteAddressConflict, r.Name, r.Url)
if rem, found := checkRemoteAddressConflict(absRemoteUrl, dEnv.RepoState.Remotes, dEnv.RepoState.Backups); found {
return fmt.Errorf("%w: '%s' -> %s", ErrRemoteAddressConflict, rem.Name, rem.Url)
}
r := Remote{name, absRemoteUrl, fetchSpecs, params, dEnv}
r.Url = absRemoteUrl
dEnv.RepoState.AddBackup(r)
err = dEnv.RepoState.Save(dEnv.FS)
if err != nil {
return err
}
return nil
return dEnv.RepoState.Save(dEnv.FS)
}
func (dEnv *DoltEnv) RemoveRemote(ctx context.Context, name string) error {

View File

@@ -195,7 +195,7 @@ func (m MemoryRepoState) GetRemotes() (map[string]Remote, error) {
return make(map[string]Remote), nil
}
func (m MemoryRepoState) AddRemote(name string, url string, fetchSpecs []string, params map[string]string) error {
func (m MemoryRepoState) AddRemote(r Remote) error {
return fmt.Errorf("cannot insert a remote in a memory database")
}
@@ -219,7 +219,7 @@ func (m MemoryRepoState) GetBackups() (map[string]Remote, error) {
panic("cannot get backups on in memory database")
}
func (m MemoryRepoState) AddBackup(name string, url string, fetchSpecs []string, params map[string]string) error {
func (m MemoryRepoState) AddBackup(r Remote) error {
panic("cannot add backup to in memory database")
}

View File

@@ -58,6 +58,13 @@ func (mrEnv *MultiRepoEnv) FileSystem() filesys.Filesys {
return mrEnv.fs
}
func (mrEnv *MultiRepoEnv) RemoteDialProvider() dbfactory.GRPCDialProvider {
for _, env := range mrEnv.envs {
return env.env
}
return nil
}
func (mrEnv *MultiRepoEnv) Config() config.ReadWriteConfig {
return mrEnv.cfg
}

View File

@@ -57,11 +57,10 @@ type Remote struct {
Url string `json:"url"`
FetchSpecs []string `json:"fetch_specs"`
Params map[string]string `json:"params"`
dialer dbfactory.GRPCDialProvider
}
func NewRemote(name, url string, params map[string]string, dialer dbfactory.GRPCDialProvider) Remote {
return Remote{name, url, []string{"refs/heads/*:refs/remotes/" + name + "/*"}, params, dialer}
func NewRemote(name, url string, params map[string]string) Remote {
return Remote{name, url, []string{"refs/heads/*:refs/remotes/" + name + "/*"}, params}
}
func (r *Remote) GetParam(pName string) (string, bool) {
@@ -79,26 +78,25 @@ func (r *Remote) GetParamOrDefault(pName, defVal string) string {
return val
}
func (r *Remote) GetRemoteDB(ctx context.Context, nbf *types.NomsBinFormat) (*doltdb.DoltDB, error) {
func (r *Remote) GetRemoteDB(ctx context.Context, nbf *types.NomsBinFormat, dialer dbfactory.GRPCDialProvider) (*doltdb.DoltDB, error) {
params := make(map[string]interface{})
for k, v := range r.Params {
params[k] = v
}
if r.dialer != nil {
params[dbfactory.GRPCDialProviderParam] = r.dialer
}
params[dbfactory.GRPCDialProviderParam] = dialer
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, filesys2.LocalFS, params)
}
func (r *Remote) GetRemoteDBWithoutCaching(ctx context.Context, nbf *types.NomsBinFormat) (*doltdb.DoltDB, error) {
func (r *Remote) GetRemoteDBWithoutCaching(ctx context.Context, nbf *types.NomsBinFormat, dialer dbfactory.GRPCDialProvider) (*doltdb.DoltDB, error) {
params := make(map[string]interface{})
for k, v := range r.Params {
params[k] = v
}
params[dbfactory.NoCachingParameter] = "true"
if r.dialer != nil {
params[dbfactory.GRPCDialProviderParam] = r.dialer
}
params[dbfactory.GRPCDialProviderParam] = dialer
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, filesys2.LocalFS, params)
}
@@ -412,7 +410,7 @@ func GetAbsRemoteUrl(fs filesys2.Filesys, cfg config.ReadableConfig, urlArg stri
return "", "", err
}
if u.Scheme != "" {
if u.Scheme != "" && fs != nil {
if u.Scheme == dbfactory.FileScheme || u.Scheme == dbfactory.LocalBSScheme {
absUrl, err := getAbsFileRemoteUrl(u.Host+u.Path, u.Scheme, fs)

View File

@@ -38,8 +38,8 @@ type RepoStateWriter interface {
// TODO: get rid of this
UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.RootValue) error
SetCWBHeadRef(context.Context, ref.MarshalableRef) error
AddRemote(name string, url string, fetchSpecs []string, params map[string]string) error
AddBackup(name string, url string, fetchSpecs []string, params map[string]string) error
AddRemote(r Remote) error
AddBackup(r Remote) error
RemoveRemote(ctx context.Context, name string) error
RemoveBackup(ctx context.Context, name string) error
TempTableFilesDir() string

View File

@@ -1,410 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envtestutils
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
tc "github.com/dolthub/dolt/go/libraries/doltcore/dtestutils/testcommands"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
)
const (
pkTag = 16191
c0Tag = 8734
c1Tag = 15903
c11Tag = 15001
)
type SuperSchemaTest struct {
// The name of this test. Names should be unique and descriptive.
Name string
// Name of the table to be verified
TableName string
// The modifying queries to run
Commands []tc.Command
// Expected branch
ExpectedBranch string
// The schema of the result of the query, nil if an error is expected
ExpectedSchema schema.Schema
// The rows the select query should return, nil if an error is expected
ExpectedSuperSchema *schema.SuperSchema
// An expected error string
ExpectedErrStr string
}
var testableDef = fmt.Sprintf("create table testable (pk int not null primary key);")
var SuperSchemaTests = []SuperSchemaTest{
{
Name: "can create super schema",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.CommitAll{Message: "created table testable"},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
},
{
Name: "get super schema without commit",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
},
{
Name: "add column",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.CommitAll{Message: "created table testable"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
)),
},
{
Name: "drop column",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int")},
tc.CommitAll{Message: "created table testable"},
tc.Query{Query: "alter table testable drop column c0"},
tc.CommitAll{Message: "dropped column c0"},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
)),
},
{
Name: "modify column",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.CommitAll{Message: "created table testable"},
tc.Query{Query: "alter table testable drop column c0"},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
)),
},
{
Name: "drop column from working set",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.Query{Query: "alter table testable drop column c0"},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
},
{
Name: "staged column persisted on commit, not working column",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.CommitAll{Message: "created table testable"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.StageAll{},
tc.Query{Query: fmt.Sprintf("alter table testable add column c1 int;")},
tc.CommitStaged{Message: "adding staged column c0"},
tc.ResetHard{},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
)),
},
{
Name: "super schema on branch main",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.CommitAll{Message: "created table testable"},
tc.Branch{BranchName: "other"},
tc.Checkout{BranchName: "other"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c11 int;")},
tc.CommitAll{Message: "added column c11 on branch other"},
tc.Checkout{BranchName: env.DefaultInitBranch},
tc.Query{Query: fmt.Sprintf("alter table testable add column c1 int;")},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
)),
},
{
Name: "super schema on branch other",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.CommitAll{Message: "created table testable"},
tc.Branch{BranchName: "other"},
tc.Checkout{BranchName: "other"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c11 int;")},
tc.CommitAll{Message: "added column c11 on branch other"},
tc.Checkout{BranchName: env.DefaultInitBranch},
tc.Query{Query: fmt.Sprintf("alter table testable add column c1 int;")},
tc.CommitAll{Message: "added column c1 on branch main"},
tc.Checkout{BranchName: "other"},
},
ExpectedBranch: "other",
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c11", c11Tag, typeinfo.Int32Type, false),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c11", c11Tag, typeinfo.Int32Type, false),
)),
},
// https://github.com/dolthub/dolt/issues/773
/*{
Name: "super schema merge",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.CommitAll{Message: "created table testable"},
tc.Branch{BranchName: "other"},
tc.Checkout{BranchName: "other"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c11 int;")},
tc.CommitAll{Message: "added column c11 on branch other"},
tc.Checkout{BranchName: "main"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c1 int;")},
tc.CommitAll{Message: "added column c1 on branch main"},
tc.Merge{BranchName: "other"},
},
ExpectedBranch: "main",
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
newColTypeInfo("c11", c11Tag, typeinfo.Int32Type, false),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
newColTypeInfo("c11", c11Tag, typeinfo.Int32Type, false),
)),
},
{
Name: "super schema merge with drops",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.CommitAll{Message: "created table testable"},
tc.Branch{BranchName: "other"},
tc.Checkout{BranchName: "other"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c11 int;")},
tc.Query{Query: fmt.Sprintf("alter table testable add column c12 int;")},
tc.CommitAll{Message: "added columns c11 and c12 on branch other"},
tc.Query{Query: "alter table testable drop column c12;"},
tc.CommitAll{Message: "dropped column c12 on branch other"},
tc.Checkout{BranchName: "main"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c1 int;")},
tc.CommitAll{Message: "added column c1 on branch main"},
tc.Merge{BranchName: "other"},
tc.CommitAll{Message: "Merged other into main"},
},
ExpectedBranch: "main",
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
newColTypeInfo("c11", c11Tag, typeinfo.Int32Type, false),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
newColTypeInfo("c11", c11Tag, typeinfo.Int32Type, false),
newColTypeInfo("c12", c12Tag, typeinfo.Int32Type, false),
)),
},*/
{
Name: "super schema with table add/drops",
TableName: "testable",
Commands: []tc.Command{
tc.Query{Query: testableDef},
tc.Query{Query: fmt.Sprintf("alter table testable add column c0 int;")},
tc.Query{Query: "create table foo (pk int not null primary key);"},
tc.CommitAll{Message: "created tables testable and foo"},
tc.Query{Query: fmt.Sprintf("alter table testable add column c1 int;")},
tc.Query{Query: "create table qux (pk int not null primary key);"},
tc.Query{Query: "drop table foo;"},
tc.CommitAll{Message: "added column c1 on branch main, created table qux, dropped table foo"},
},
ExpectedBranch: env.DefaultInitBranch,
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
newColTypeInfo("c0", c0Tag, typeinfo.Int32Type, false),
newColTypeInfo("c1", c1Tag, typeinfo.Int32Type, false),
)),
},
{
// This test corresponds to @test "diff sql reconciles DROP TABLE" in sql_diff.bats
Name: "sql diff bats test",
TableName: "testable",
Commands: []tc.Command{
tc.Branch{BranchName: "first"},
tc.Checkout{BranchName: "first"},
tc.Query{Query: testableDef},
tc.Query{Query: "insert into testable values (1);"},
tc.CommitAll{Message: "setup table"},
tc.Branch{BranchName: "other"},
tc.Checkout{BranchName: "other"},
tc.Query{Query: "drop table testable;"},
tc.CommitAll{Message: "removed table"},
tc.Checkout{BranchName: "first"},
},
ExpectedBranch: "first",
ExpectedSchema: schema.MustSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
ExpectedSuperSchema: superSchemaFromCols(columnCollection(
newColTypeInfo("pk", pkTag, typeinfo.Int32Type, true, schema.NotNullConstraint{}),
)),
},
}
func TestSuperSchema(t *testing.T) {
for _, test := range SuperSchemaTests {
t.Run(test.Name, func(t *testing.T) {
testSuperSchema(t, test)
})
}
}
func testSuperSchema(t *testing.T, test SuperSchemaTest) {
dEnv := dtestutils.CreateTestEnv()
var ee error
for idx, cmd := range test.Commands {
require.NoError(t, ee)
fmt.Println(fmt.Sprintf("%d: %s: %s", idx, cmd.CommandString(), cmd))
ee = cmd.Exec(t, dEnv)
}
if test.ExpectedErrStr != "" {
require.Error(t, ee, test.ExpectedErrStr)
} else {
spec := dEnv.RepoStateReader().CWBHeadRef()
require.Equal(t, "refs/heads/"+test.ExpectedBranch, spec.String())
r, err := dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
tbl, ok, err := r.GetTable(context.Background(), test.TableName)
require.NoError(t, err)
require.True(t, ok)
ss, found, err := r.GetSuperSchema(context.Background(), test.TableName)
require.True(t, found)
require.NoError(t, err)
assert.Equal(t, test.ExpectedSuperSchema, ss)
sch, err := tbl.GetSchema(context.Background())
require.NoError(t, err)
assert.Equal(t, test.ExpectedSchema, sch)
}
}
func superSchemaFromCols(cols *schema.ColCollection) *schema.SuperSchema {
sch := schema.MustSchemaFromCols(cols)
ss, _ := schema.NewSuperSchema(sch)
return ss
}
func columnCollection(cols ...schema.Column) *schema.ColCollection {
return schema.NewColCollection(cols...)
}
func newColTypeInfo(name string, tag uint64, typeInfo typeinfo.TypeInfo, partOfPK bool, constraints ...schema.ColConstraint) schema.Column {
c, err := schema.NewColumnWithTypeInfo(name, tag, typeInfo, partOfPK, "", false, "", constraints...)
if err != nil {
panic("could not create column")
}
return c
}

View File

@@ -178,11 +178,6 @@ func MergeRoots(
return nil, nil, err
}
mergedRoot, err = mergedRoot.UpdateSuperSchemasFromOther(ctx, tblNames, theirRoot)
if err != nil {
return nil, nil, err
}
h, err := merger.rightSrc.HashOf()
if err != nil {
return nil, nil, err

View File

@@ -120,7 +120,7 @@ func (rm *RootMerger) MergeTable(ctx context.Context, tblName string, opts edito
return nil, nil, errors.New(fmt.Sprintf("schema changes not supported: %s table schema does not match in current HEAD and cherry-pick commit.", tblName))
}
mergeSch, schConflicts, err := SchemaMerge(tm.leftSch, tm.rightSch, tm.ancSch, tblName)
mergeSch, schConflicts, err := SchemaMerge(tm.vrw.Format(), tm.leftSch, tm.rightSch, tm.ancSch, tblName)
if err != nil {
return nil, nil, err
}

View File

@@ -25,6 +25,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/types"
)
type conflictKind byte
@@ -120,7 +121,7 @@ func (c ChkConflict) String() string {
var ErrMergeWithDifferentPkSets = errors.New("error: cannot merge two tables with different primary key sets")
// SchemaMerge performs a three-way merge of ourSch, theirSch, and ancSch.
func SchemaMerge(ourSch, theirSch, ancSch schema.Schema, tblName string) (sch schema.Schema, sc SchemaConflict, err error) {
func SchemaMerge(format *types.NomsBinFormat, ourSch, theirSch, ancSch schema.Schema, tblName string) (sch schema.Schema, sc SchemaConflict, err error) {
// (sch - ancSch) (mergeSch - ancSch) (sch ∩ mergeSch)
sc = SchemaConflict{
TableName: tblName,
@@ -128,7 +129,7 @@ func SchemaMerge(ourSch, theirSch, ancSch schema.Schema, tblName string) (sch sc
// TODO: We'll remove this once it's possible to get diff and merge on different primary key sets
// TODO: decide how to merge different orders of PKS
if !schema.ArePrimaryKeySetsDiffable(ourSch, theirSch) {
if !schema.ArePrimaryKeySetsDiffable(format, ourSch, theirSch) {
return nil, SchemaConflict{}, ErrMergeWithDifferentPkSets
}

View File

@@ -31,6 +31,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/types"
)
type testCommand struct {
@@ -611,7 +612,7 @@ func testMergeSchemasWithConflicts(t *testing.T, test mergeSchemaConflictTest) {
otherSch := getSchema(t, dEnv)
_, actConflicts, err := merge.SchemaMerge(mainSch, otherSch, ancSch, "test")
_, actConflicts, err := merge.SchemaMerge(types.Format_Default, mainSch, otherSch, ancSch, "test")
if test.expectedErr != nil {
assert.True(t, errors.Is(err, test.expectedErr))
return

View File

@@ -212,7 +212,7 @@ func nomsParentFkConstraintViolations(
return nil, false, err
}
differ := diff.NewRowDiffer(ctx, preParentSch, postParent.Schema, 1024)
differ := diff.NewRowDiffer(ctx, preParentRowData.Format(), preParentSch, postParent.Schema, 1024)
defer differ.Close()
differ.Start(ctx, preParentRowData, durable.NomsMapFromIndex(postParent.RowData))
for {
@@ -379,7 +379,7 @@ func nomsChildFkConstraintViolations(
return nil, false, err
}
differ := diff.NewRowDiffer(ctx, preChildSch, postChild.Schema, 1024)
differ := diff.NewRowDiffer(ctx, preChildRowData.Format(), preChildSch, postChild.Schema, 1024)
defer differ.Close()
differ.Start(ctx, preChildRowData, durable.NomsMapFromIndex(postChild.RowData))
for {

View File

@@ -65,6 +65,10 @@ type SqlEngineTableWriter struct {
}
func NewSqlEngineTableWriter(ctx context.Context, dEnv *env.DoltEnv, createTableSchema, rowOperationSchema schema.Schema, options *MoverOptions, statsCB noms.StatsCB) (*SqlEngineTableWriter, error) {
if dEnv.IsLocked() {
return nil, env.ErrActiveServerLock.New(dEnv.LockFile())
}
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), dEnv.FS, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
return nil, err

View File

@@ -430,82 +430,3 @@ func UnmarshalSchemaNomsValue(ctx context.Context, nbf *types.NomsBinFormat, sch
return sch, nil
}
type superSchemaData struct {
Columns []encodedColumn `noms:"columns" json:"columns"`
TagNames map[uint64][]string `noms:"col_constraints" json:"col_constraints"`
}
func toSuperSchemaData(ss *schema.SuperSchema) (superSchemaData, error) {
encCols := make([]encodedColumn, ss.Size())
tn := make(map[uint64][]string)
i := 0
err := ss.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
encCols[i] = encodeColumn(col)
tn[tag] = ss.AllColumnNames(tag)
i++
return false, nil
})
if err != nil {
return superSchemaData{}, err
}
return superSchemaData{encCols, tn}, nil
}
func (ssd superSchemaData) decodeSuperSchema() (*schema.SuperSchema, error) {
numCols := len(ssd.Columns)
cols := make([]schema.Column, numCols)
for i, col := range ssd.Columns {
c, err := col.decodeColumn()
if err != nil {
return nil, err
}
cols[i] = c
}
colColl := schema.NewColCollection(cols...)
if ssd.TagNames == nil {
ssd.TagNames = make(map[uint64][]string)
}
return schema.UnmarshalSuperSchema(colColl, ssd.TagNames), nil
}
// MarshalSuperSchemaAsNomsValue creates a Noms value from a SuperSchema to be written to a RootValue.
func MarshalSuperSchemaAsNomsValue(ctx context.Context, vrw types.ValueReadWriter, ss *schema.SuperSchema) (types.Value, error) {
ssd, err := toSuperSchemaData(ss)
if err != nil {
return types.EmptyStruct(vrw.Format()), err
}
val, err := marshal.Marshal(ctx, vrw, ssd)
if err != nil {
return types.EmptyStruct(vrw.Format()), err
}
if _, ok := val.(types.Struct); ok {
return val, nil
}
return types.EmptyStruct(vrw.Format()), errors.New("Table Super Schema could not be converted to types.Struct")
}
// UnmarshalSuperSchemaNomsValue takes a Noms value read from a RootValue and constructs a SuperSchema from it.
func UnmarshalSuperSchemaNomsValue(ctx context.Context, nbf *types.NomsBinFormat, ssVal types.Value) (*schema.SuperSchema, error) {
var ssd superSchemaData
err := marshal.Unmarshal(ctx, nbf, ssVal, &ssd)
if err != nil {
return nil, err
}
return ssd.decodeSuperSchema()
}

View File

@@ -83,20 +83,6 @@ func TestNomsMarshalling(t *testing.T) {
t.Error("Value different after marshalling and unmarshalling.")
}
}
tSuperSchema, err := schema.NewSuperSchema(tSchema)
require.NoError(t, err)
ssVal, err := MarshalSuperSchemaAsNomsValue(context.Background(), vrw, tSuperSchema)
require.NoError(t, err)
unMarshalledSS, err := UnmarshalSuperSchemaNomsValue(context.Background(), types.Format_Default, ssVal)
require.NoError(t, err)
if !reflect.DeepEqual(tSuperSchema, unMarshalledSS) {
t.Error("Value different after marshalling and unmarshalling.")
}
}
func getSqlTypes() []sql.Type {

View File

@@ -62,8 +62,8 @@ func serializeSchemaAsFlatbuffer(sch schema.Schema) ([]byte, error) {
serial.TableSchemaAddSecondaryIndexes(b, indexes)
serial.TableSchemaAddChecks(b, checks)
root := serial.TableSchemaEnd(b)
b.FinishWithFileIdentifier(root, []byte(serial.TableSchemaFileID))
return b.FinishedBytes(), nil
bs := serial.FinishMessage(b, root, []byte(serial.TableSchemaFileID))
return bs, nil
}
// DeserializeSchema deserializes a schema.Schema from a serial.Message.
@@ -76,7 +76,7 @@ func DeserializeSchema(ctx context.Context, nbf *types.NomsBinFormat, v types.Va
func deserializeSchemaFromFlatbuffer(ctx context.Context, buf []byte) (schema.Schema, error) {
assertTrue(serial.GetFileID(buf) == serial.TableSchemaFileID)
s := serial.GetRootAsTableSchema(buf, 0)
s := serial.GetRootAsTableSchema(buf, serial.MessagePrefixSz)
cols, err := deserializeColumns(ctx, s)
if err != nil {

View File

@@ -25,7 +25,8 @@ import (
// Schema is an interface for retrieving the columns that make up a schema
type Schema interface {
// GetPKCols gets the collection of columns which make the primary key.
// GetPKCols gets the collection of columns which make the primary key. They
// are always returned in ordinal order.
GetPKCols() *ColCollection
// GetNonPKCols gets the collection of columns which are not part of the primary key.
@@ -165,9 +166,10 @@ func GetSharedCols(schema Schema, cmpNames []string, cmpKinds []types.NomsKind)
return shared
}
// ArePrimaryKeySetsDiffable checks if two schemas are diffable. Assumes the passed in schema are from the same table
// between commits.
func ArePrimaryKeySetsDiffable(fromSch, toSch Schema) bool {
// ArePrimaryKeySetsDiffable checks if two schemas are diffable. Assumes the
// passed in schema are from the same table between commits. If __DOLT_1__, then
// it also checks if the underlying SQL types of the columns are equal.
func ArePrimaryKeySetsDiffable(format *types.NomsBinFormat, fromSch, toSch Schema) bool {
if fromSch == nil && toSch == nil {
return false
// Empty case
@@ -194,6 +196,9 @@ func ArePrimaryKeySetsDiffable(fromSch, toSch Schema) bool {
if (c1.Tag != c2.Tag) || (c1.IsPartOfPK != c2.IsPartOfPK) {
return false
}
if types.IsFormat_DOLT_1(format) && !c1.TypeInfo.ToSqlType().Equals(c2.TypeInfo.ToSqlType()) {
return false
}
}
ords1 := fromSch.GetPkOrdinals()

View File

@@ -103,6 +103,47 @@ func TestGetSharedCols(t *testing.T) {
assert.Equal(t, expected, res)
}
func TestSetPkOrder(t *testing.T) {
// GetPkCols() should always return columns in ordinal order
// GetAllCols() should always return columns in the defined schema's order
t.Run("returns the correct GetPkCols() order", func(t *testing.T) {
allColColl := NewColCollection(allCols...)
pkColColl := NewColCollection(pkCols...)
sch, err := SchemaFromCols(allColColl)
require.NoError(t, err)
require.Equal(t, allColColl, sch.GetAllCols())
require.Equal(t, pkColColl, sch.GetPKCols())
err = sch.SetPkOrdinals([]int{1, 0})
require.NoError(t, err)
expectedPkColColl := NewColCollection(pkCols[1], pkCols[0])
require.Equal(t, expectedPkColColl, sch.GetPKCols())
require.Equal(t, allColColl, sch.GetAllCols())
})
t.Run("Can round-trip", func(t *testing.T) {
allColColl := NewColCollection(allCols...)
pkColColl := NewColCollection(pkCols...)
sch, err := SchemaFromCols(allColColl)
require.NoError(t, err)
require.Equal(t, allColColl, sch.GetAllCols())
require.Equal(t, pkColColl, sch.GetPKCols())
err = sch.SetPkOrdinals([]int{1, 0})
require.NoError(t, err)
err = sch.SetPkOrdinals([]int{0, 1})
require.NoError(t, err)
require.Equal(t, allColColl, sch.GetAllCols())
require.Equal(t, pkColColl, sch.GetPKCols())
})
}
func mustGetCol(collection *ColCollection, name string) Column {
col, ok := collection.GetByName(name)
if !ok {

Some files were not shown because too many files have changed in this diff Show More