mirror of
https://github.com/dolthub/dolt.git
synced 2026-01-05 00:50:17 -06:00
[no-release-notes] /.github/{scripts,workflows}: update existing benchmarking to use new format args
This commit is contained in:
@@ -1,100 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$#" -lt 10 ]; then
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <format> <issueNumber> <initBigRepo> <nomsBinFormat>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
jobName="$1"
|
||||
fromServer="$2"
|
||||
fromVersion="$3"
|
||||
toServer="$4"
|
||||
toVersion="$5"
|
||||
timePrefix="$6"
|
||||
actorPrefix="$7"
|
||||
format="$8"
|
||||
issueNumber="$9"
|
||||
initBigRepo="${10}"
|
||||
nomsBinFormat="${11}"
|
||||
tpccRegex="tpcc%"
|
||||
|
||||
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan')"
|
||||
medianLatencyChangeReadsQuery="select f.test_name as read_tests, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) < -0.1 then 1 when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) > 0.1 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
|
||||
|
||||
writeTests="('oltp_read_write', 'oltp_update_index', 'oltp_update_non_index', 'oltp_insert', 'bulk_insert', 'oltp_write_only', 'oltp_delete')"
|
||||
medianLatencyChangeWritesQuery="select f.test_name as write_tests, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) < -0.1 then 1 when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) > 0.1 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $writeTests group by f.test_name;"
|
||||
|
||||
tpccLatencyQuery="select f.test_name as test_name, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) < -0.25 then 1 when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) > 0.25 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name LIKE '$tpccRegex' group by f.test_name;"
|
||||
tpccTpsQuery="select f.test_name as test_name, f.server_name, f.server_version, avg(f.sql_transactions_per_second) as tps, t.test_name as test_name, t.server_name, t.server_version, avg(t.sql_transactions_per_second) as tps, case when ((avg(t.sql_transactions_per_second) - avg(f.sql_transactions_per_second)) / (avg(f.sql_transactions_per_second) + .0000001)) < -0.5 then 1 when ((avg(t.sql_transactions_per_second) - avg(f.sql_transactions_per_second)) / (avg(f.sql_transactions_per_second) + .0000001)) > 0.5 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name LIKE 'tpcc%' group by f.test_name;"
|
||||
|
||||
echo '
|
||||
{
|
||||
"apiVersion": "batch/v1",
|
||||
"kind": "Job",
|
||||
"metadata": {
|
||||
"name": "'$jobName'",
|
||||
"namespace": "performance-benchmarking"
|
||||
},
|
||||
"spec": {
|
||||
"backoffLimit": 1,
|
||||
"template": {
|
||||
"spec": {
|
||||
"serviceAccountName": "performance-benchmarking",
|
||||
"containers": [
|
||||
{
|
||||
"name": "performance-benchmarking",
|
||||
"image": "407903926827.dkr.ecr.us-west-2.amazonaws.com/liquidata/performance-benchmarking:andy",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "7000m"
|
||||
}
|
||||
},
|
||||
"env": [
|
||||
{ "name": "GOMAXPROCS", "value": "7" },
|
||||
{ "name": "ACTOR", "value": "'$ACTOR'" },
|
||||
{ "name": "ACTOR_EMAIL", "value": "'$ACTOR_EMAIL'" },
|
||||
{ "name": "REPO_ACCESS_TOKEN", "value": "'$REPO_ACCESS_TOKEN'" }
|
||||
],
|
||||
"imagePullPolicy": "Always",
|
||||
"args": [
|
||||
"--schema=/schema.sql",
|
||||
"--useDoltHubLuaScriptsRepo",
|
||||
"--output='$format'",
|
||||
"--from-server='$fromServer'",
|
||||
"--from-version='$fromVersion'",
|
||||
"--to-server='$toServer'",
|
||||
"--to-version='$toVersion'",
|
||||
"--bucket=performance-benchmarking-github-actions-results",
|
||||
"--region=us-west-2",
|
||||
"--issue-number='$issueNumber'",
|
||||
"--results-dir='$timePrefix'",
|
||||
"--results-prefix='$actorPrefix'",
|
||||
"--withTpcc=true",
|
||||
"--sysbenchQueries='"$medianLatencyChangeReadsQuery"'",
|
||||
"--sysbenchQueries='"$medianLatencyChangeWritesQuery"'",
|
||||
"--tpccQueries='"$tpccLatencyQuery"'",
|
||||
"--tpccQueries='"$tpccTpsQuery"'",
|
||||
"--init-big-repo='"$initBigRepo"'",
|
||||
"--noms-bin-format='"$nomsBinFormat"'"
|
||||
]
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Never",
|
||||
"nodeSelector": {
|
||||
"performance-benchmarking-worker": "true"
|
||||
},
|
||||
"tolerations": [
|
||||
{
|
||||
"effect": "NoSchedule",
|
||||
"key": "dedicated",
|
||||
"operator": "Equal",
|
||||
"value": "performance-benchmarking-worker"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$#" -lt 10 ]; then
|
||||
if [ "$#" -lt 9 ]; then
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <format> <issueNumber> <initBigRepo> <nomsBinFormat>"
|
||||
exit 1
|
||||
fi
|
||||
@@ -20,6 +20,14 @@ initBigRepo="${10}"
|
||||
nomsBinFormat="${11}"
|
||||
tpccRegex="tpcc%"
|
||||
|
||||
if [ -n "$initBigRepo" ]; then
|
||||
initBigRepo="\"--init-big-repo=$initBigRepo\","
|
||||
fi
|
||||
|
||||
if [ -n "$nomsBinFormat" ]; then
|
||||
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
|
||||
fi
|
||||
|
||||
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan')"
|
||||
medianLatencyChangeReadsQuery="select f.test_name as read_tests, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) < -0.1 then 1 when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) > 0.1 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
|
||||
|
||||
@@ -72,12 +80,12 @@ echo '
|
||||
"--results-dir='$timePrefix'",
|
||||
"--results-prefix='$actorPrefix'",
|
||||
"--withTpcc=true",
|
||||
'"$initBigRepo"'
|
||||
'"$nomsBinFormat"'
|
||||
"--sysbenchQueries='"$medianLatencyChangeReadsQuery"'",
|
||||
"--sysbenchQueries='"$medianLatencyChangeWritesQuery"'",
|
||||
"--tpccQueries='"$tpccLatencyQuery"'",
|
||||
"--tpccQueries='"$tpccTpsQuery"'",
|
||||
"--init-big-repo='"$initBigRepo"'",
|
||||
"--noms-bin-format='"$nomsBinFormat"'"
|
||||
"--tpccQueries='"$tpccTpsQuery"'"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
|
||||
if [ "$#" -lt 8 ]; then
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timeprefix> <actorprefix> <format>"
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timeprefix> <actorprefix> <format> <initBigRepo> <nomsBinFormat>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -15,9 +15,19 @@ toVersion="$5"
|
||||
timeprefix="$6"
|
||||
actorprefix="$7"
|
||||
format="$8"
|
||||
initBigRepo="$9"
|
||||
nomsBinFormat="${10}"
|
||||
precision="1"
|
||||
tpccRegex="tpcc%"
|
||||
|
||||
if [ -n "$initBigRepo" ]; then
|
||||
initBigRepo="\"--init-big-repo=$initBigRepo\","
|
||||
fi
|
||||
|
||||
if [ -n "$nomsBinFormat" ]; then
|
||||
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
|
||||
fi
|
||||
|
||||
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan')"
|
||||
medianLatencyMultiplierReadsQuery="select f.test_name as read_tests, f.server_name, f.server_version, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, t.server_name, t.server_version, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) < 1.0 then 1.0 else ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) end as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
|
||||
meanMultiplierReadsQuery="select round(avg(multipliers), $precision) as reads_mean_multiplier from (select case when (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) < 1.0 then 1.0 else (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) end as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name)"
|
||||
@@ -75,6 +85,8 @@ echo '
|
||||
"--results-dir='$timeprefix'",
|
||||
"--results-prefix='$actorprefix'",
|
||||
"--withTpcc=true",
|
||||
'"$initBigRepo"'
|
||||
'"$nomsBinFormat"'
|
||||
"--sysbenchQueries='"$medianLatencyMultiplierReadsQuery"'",
|
||||
"--sysbenchQueries='"$meanMultiplierReadsQuery"'",
|
||||
"--sysbenchQueries='"$medianLatencyMultiplierWritesQuery"'",
|
||||
|
||||
@@ -40,7 +40,7 @@ actorShort="$lowered-$short"
|
||||
sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
|
||||
|
||||
timesuffix=`date +%s%N`
|
||||
jobname="$actorShort_$timesuffix"
|
||||
jobname="$actorShort-$timesuffix"
|
||||
|
||||
timeprefix=$(date +%Y/%m/%d)
|
||||
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
name: Benchmark Pull Requests (andy test)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ opened ]
|
||||
issue_comment:
|
||||
types: [ created ]
|
||||
|
||||
jobs:
|
||||
validate-commentor:
|
||||
runs-on: ubuntu-18.04
|
||||
outputs:
|
||||
valid: ${{ steps.set_valid.outputs.valid }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Validate Commentor
|
||||
id: set_valid
|
||||
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
|
||||
env:
|
||||
ACTOR: ${{ github.actor }}
|
||||
|
||||
check-comments:
|
||||
runs-on: ubuntu-18.04
|
||||
needs: validate-commentor
|
||||
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
|
||||
outputs:
|
||||
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
|
||||
comment-body: ${{ steps.set_body.outputs.body }}
|
||||
steps:
|
||||
- name: Check for Deploy Trigger
|
||||
uses: dolthub/pull-request-comment-trigger@master
|
||||
id: check
|
||||
with:
|
||||
trigger: '#benchmarkandy'
|
||||
reaction: rocket
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Set Benchmark
|
||||
if: ${{ steps.check.outputs.triggered == 'true' }}
|
||||
id: set_benchmark
|
||||
run: |
|
||||
echo "::set-output name=benchmark::true"
|
||||
|
||||
performance:
|
||||
strategy:
|
||||
matrix:
|
||||
biginit: ["true", "false"]
|
||||
nbf: ["__LD_1__", "__DOLT_1__"]
|
||||
exclude:
|
||||
- biginit: "true"
|
||||
nbf: "__DOLT_1__"
|
||||
- biginit: "false"
|
||||
nbf: "__LD_1__"
|
||||
runs-on: ubuntu-18.04
|
||||
needs: [validate-commentor, check-comments]
|
||||
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
|
||||
name: Benchmark Performance
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: azure/setup-kubectl@v2.0
|
||||
with:
|
||||
version: 'v1.23.6'
|
||||
- name: Install aws-iam-authenticator
|
||||
run: |
|
||||
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
|
||||
chmod +x ./aws-iam-authenticator && \
|
||||
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
|
||||
aws-iam-authenticator version
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-west-2
|
||||
- uses: xt0rted/pull-request-comment-branch@v1
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create and Auth kubeconfig
|
||||
run: |
|
||||
echo "$CONFIG" > kubeconfig
|
||||
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
|
||||
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
|
||||
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
|
||||
env:
|
||||
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
|
||||
- name: Get pull number
|
||||
uses: actions/github-script@v3
|
||||
id: get_pull_number
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
|
||||
- name: Run benchmarks
|
||||
id: run-benchmarks
|
||||
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
|
||||
env:
|
||||
FROM_SERVER: 'dolt'
|
||||
FROM_VERSION: ${{ github.sha }}
|
||||
TO_SERVER: 'dolt'
|
||||
TO_VERSION: ${{ steps.comment-branch.outputs.head_sha }}
|
||||
MODE: 'pullRequest'
|
||||
ISSUE_NUMBER: ${{ steps.get_pull_number.outputs.pull_number }}
|
||||
ACTOR: ${{ github.actor }}
|
||||
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
|
||||
KUBECONFIG: "./kubeconfig"
|
||||
INIT_BIG_REPO: ${{ matrix.biginit }}
|
||||
NOMS_BIN_FORMAT: ${{ matrix.nbf }}
|
||||
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-andy-dolt-dolt-json.sh"
|
||||
@@ -35,6 +35,15 @@ jobs:
|
||||
ACTOR_EMAIL: ${{ github.event.inputs.email }}
|
||||
|
||||
benchmark-release-mysql:
|
||||
strategy:
|
||||
matrix:
|
||||
biginit: [ "true", "false" ]
|
||||
nbf: [ "__LD_1__", "__DOLT_1__" ]
|
||||
exclude:
|
||||
- biginit: "true"
|
||||
nbf: "__DOLT_1__"
|
||||
- biginit: "false"
|
||||
nbf: "__LD_1__"
|
||||
runs-on: ubuntu-18.04
|
||||
needs: set-version-actor
|
||||
name: Benchmark Dolt Release vs MySQL 8
|
||||
@@ -76,4 +85,6 @@ jobs:
|
||||
ACTOR_EMAIL: ${{ needs.set-version-actor.outputs.actor_email }}
|
||||
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
|
||||
KUBECONFIG: "./kubeconfig"
|
||||
INIT_BIG_REPO: ${{ matrix.biginit }}
|
||||
NOMS_BIN_FORMAT: ${{ matrix.nbf }}
|
||||
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"
|
||||
|
||||
Reference in New Issue
Block a user