Merge remote-tracking branch 'origin/main' into taylor/log-ancestor-spec

This commit is contained in:
Taylor Bantle
2022-10-24 14:54:57 -07:00
43 changed files with 389 additions and 107 deletions
@@ -7,10 +7,10 @@ if [[ $# -ne 1 ]]; then
exit 1
fi
validcommentors="coffeegoddd andrew-wm-arthur bheni Hydrocharged reltuk tbantle22 timsehn VinaiRachakonda zachmu max-hoffman"
validcommentors="coffeegoddd andrew-wm-arthur bheni Hydrocharged reltuk tbantle22 timsehn zachmu max-hoffman"
contains() {
[[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && echo "::set-output name=valid::true" || exit 0
[[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && echo "valid=true" >> $GITHUB_OUTPUT || exit 0
}
contains "$validcommentors" "$1"
+1 -1
View File
@@ -19,7 +19,7 @@ Workflows prefixed with `cd-` are used for releasing Dolt. Some of these workflo
## Benchmarking Workflows
Benchmarking workflows are used as an interface for deploying benchmarking jobs to one of our Kubernetes Clusters. Workflows that deploy Kubernetes Jobs are prefixed with `k8s-` and can only be triggered with `repository_dispatch` events. Notice that benchmarking workflows, like `workflows/performance-benchmarks-email-report.yaml` for example, trigger these events using the `peter-evans/repository-dispatch@v1` Action.
Benchmarking workflows are used as an interface for deploying benchmarking jobs to one of our Kubernetes Clusters. Workflows that deploy Kubernetes Jobs are prefixed with `k8s-` and can only be triggered with `repository_dispatch` events. Notice that benchmarking workflows, like `workflows/performance-benchmarks-email-report.yaml` for example, trigger these events using the `peter-evans/repository-dispatch@v2.0.0` Action.
These Kubernetes Jobs do not run on GitHub Actions Hosted Runners, so the workflow logs do not contain any information about the deployed Kubernetes Job or any errors it might have encountered. The workflow logs can only tell you if a Job was created successfully or not. To investigate an error or issue with a Job in our Kubernetes Cluster, see the debugging guide [here](https://github.com/dolthub/ld/blob/main/k8s/README.md#debug-performance-benchmarks-and-sql-correctness-jobs).
+4 -4
View File
@@ -18,7 +18,7 @@ jobs:
run: |
if [ "$REPO" == "go-mysql-server" ]
then
echo "::set-output name=label::gms-bump"
echo "label=gms-bump" >> $GITHUB_OUTPUT
else
echo "$REPO is unsupported"
exit 1
@@ -109,16 +109,16 @@ jobs:
run: |
if [ "${{ github.event.client_payload.assignee }}" == "zachmu" ]
then
echo "::set-output name=reviewer::Hydrocharged"
echo "reviewer=Hydrocharged" >> $GITHUB_OUTPUT
else
echo "::set-output name=reviewer::zachmu"
echo "reviewer=zachmu" >> $GITHUB_OUTPUT
fi
- name: Get short hash
id: short-sha
run: |
commit=${{ github.event.client_payload.head_commit_sha }}
short=${commit:0:8}
echo "::set-output name=short::$short"
echo "short=$short" >> $GITHUB_OUTPUT
- name: Create and Push new branch
run: |
git config --global --add user.name "${{ github.event.client_payload.assignee }}"
+2 -2
View File
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Create Homebrew PR
uses: mislav/bump-homebrew-formula-action@v1
uses: mislav/bump-homebrew-formula-action@v2.1
if: ${{ github.event_name == 'repository_dispatch' }}
with:
formula-name: dolt
@@ -30,7 +30,7 @@ jobs:
env:
COMMITTER_TOKEN: ${{secrets.REPO_ACCESS_TOKEN}}
- name: Create Homebrew PR
uses: mislav/bump-homebrew-formula-action@v1
uses: mislav/bump-homebrew-formula-action@v2.1
if: ${{ github.event_name == 'workflow_dispatch' }}
with:
formula-name: dolt
+1 -1
View File
@@ -28,7 +28,7 @@ jobs:
version="${{ github.event.inputs.version }}"
fi
echo "::set-output name=version::$version"
echo "version=$version" >> $GITHUB_OUTPUT
winget-bump:
needs: get-version
@@ -19,11 +19,11 @@ jobs:
if [ "$EVENT_NAME" == "workflow_dispatch" ]
then
release_id=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/dolthub/dolt/releases/tags/v${{ github.event.inputs.version }} | jq '.id')
echo "::set-output name=version::${{ github.event.inputs.version }}"
echo "::set-output name=release_id::$release_id"
echo "version=${{ github.event.inputs.version }}" >> $GITHUB_OUTPUT
echo "release_id=$release_id" >> $GITHUB_OUTPUT
else
echo "::set-output name=version::${{ github.event.client_payload.version }}"
echo "::set-output name=release_id::${{ github.event.client_payload.release_id }}"
echo "version=${{ github.event.client_payload.version }}" >> $GITHUB_OUTPUT
echo "release_id=${{ github.event.client_payload.release_id }}" >> $GITHUB_OUTPUT
fi
env:
EVENT_NAME: ${{ github.event_name }}
+12 -9
View File
@@ -21,7 +21,7 @@ jobs:
then
version="${version:1}"
fi
echo "::set-output name=version::$version"
echo "version=$version" >> $GITHUB_OUTPUT
create-release:
needs: format-version
@@ -38,21 +38,24 @@ jobs:
FILE: ${{ format('{0}/go/cmd/dolt/dolt.go', github.workspace) }}
NEW_VERSION: ${{ needs.format-version.outputs.version }}
- name: Update Dockerfile
run: sed -i -e 's/ARG DOLT_VERSION=.*/ARG DOLT_VERSION='"$NEW_VERSION"'/' "$AMD64" "$ARM64"
run: sed -i -e 's/ARG DOLT_VERSION=.*/ARG DOLT_VERSION='"$NEW_VERSION"'/' "$AMD64" "$ARM64" "$SERVERAMD64" "$SERVERARM64"
env:
AMD64: ${{ format('{0}/docker/Dockerfile', github.workspace) }}
ARM64: ${{ format('{0}/docker/Dockerfile.arm64', github.workspace) }}
SERVERAMD64: ${{ format('{0}/docker/serverDockerfile', github.workspace) }}
SERVERARM64: ${{ format('{0}/docker/serverDockerfile.arm64', github.workspace) }}
NEW_VERSION: ${{ needs.format-version.outputs.version }}
- uses: EndBug/add-and-commit@v7
- uses: EndBug/add-and-commit@v9.1.1
with:
message: ${{ format('[ga-bump-release] Update Dolt version to {0} and release v{0}', needs.format-version.outputs.version) }}
add: ${{ format('[{0}/go/cmd/dolt/dolt.go,{0}/docker/Dockerfile,{0}/docker/Dockerfile.arm64]', github.workspace) }}
add: ${{ format('[{0}/go/cmd/dolt/dolt.go,{0}/docker/Dockerfile,{0}/docker/Dockerfile.arm64,{0}/docker/serverDockerfile,{0}/docker/serverDockerfile.arm64]', github.workspace) }}
cwd: "."
pull: "--ff"
- name: Build Binaries
id: build_binaries
run: |
latest=$(git rev-parse HEAD)
echo "::set-output name=commitish::$latest"
echo "commitish=$latest" >> $GITHUB_OUTPUT
GO_BUILD_VERSION=1.19 go/utils/publishrelease/buildbinaries.sh
- name: Create Release
id: create_release
@@ -131,7 +134,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Upload MSI
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: upload-msi
@@ -143,7 +146,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Release Notes
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: release-notes
@@ -154,7 +157,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Bump Homebrew
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: bump-homebrew
@@ -165,7 +168,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Performance Benchmarks
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: release-dolt
+2 -2
View File
@@ -40,7 +40,7 @@ jobs:
fi
- name: Configure AWS Credentials
if: ${{ env.use_credentials == 'true' }}
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -108,7 +108,7 @@ jobs:
cd parquet-mr/parquet-cli
mvn clean install -DskipTests
runtime_jar="$(pwd)"/target/parquet-cli-1.13.0-SNAPSHOT-runtime.jar
echo "::set-output name=runtime_jar::$runtime_jar"
echo "runtime_jar=$runtime_jar" >> $GITHUB_OUTPUT
- name: Check expect
run: expect -v
- name: Test all Unix
+3 -3
View File
@@ -32,7 +32,7 @@ jobs:
IFS=$'\n'
file_arr=($files)
echo "::set-output name=files::${file_arr[@]}"
echo "files=${file_arr[@]}" >> $GITHUB_OUTPUT
IFS=$SAVEIFS
working-directory: ./integration-tests/bats
@@ -85,7 +85,7 @@ jobs:
fi
- name: Configure AWS Credentials
if: ${{ env.use_credentials == 'true' }}
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -159,7 +159,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -44,8 +44,8 @@ jobs:
go build -mod=readonly -o ../.ci_bin/dolt ./cmd/dolt/.
- name: Setup Dolt Config
run: |
dolt config --global --add user.name 'Liquidata Actions'
dolt config --global --add user.email 'actions@liquidata.co'
dolt config --global --add user.name 'DoltHub Actions'
dolt config --global --add user.email 'actions@dolthub.com'
- name: Test all
run: ./runner.sh
working-directory: ./integration-tests/compatibility
+3 -2
View File
@@ -40,11 +40,12 @@ jobs:
run: |
changes=$(git status --porcelain)
if [ ! -z "$changes" ]; then
echo "::set-output name=has-changes::true"
echo "has-changes=true" >> $GITHUB_OUTPUT
fi
- uses: EndBug/add-and-commit@v7
- uses: EndBug/add-and-commit@v9.1.1
if: ${{ steps.detect-changes.outputs.has-changes == 'true' }}
with:
message: "[ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh"
add: "."
cwd: "."
pull: "--ff"
+2 -2
View File
@@ -11,7 +11,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -29,7 +29,7 @@ jobs:
if [ ! -z "$RECIPIENT" ]; then
addresses="[\"$RECIPIENT\"]"
fi
echo "::set-output name=addresses::$addresses"
echo "addresses=$addresses" >> $GITHUB_OUTPUT
env:
RECIPIENT: ${{ github.event.client_payload.email_recipient }}
TEAM: '["${{ secrets.PERF_REPORTS_EMAIL_ADDRESS }}"]'
@@ -39,7 +39,7 @@ jobs:
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
echo "benchmark=true" >> $GITHUB_OUTPUT
performance:
runs-on: ubuntu-22.04
@@ -47,7 +47,7 @@ jobs:
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Trigger Benchmark Import K8s Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
- uses: dolthub/pull-request-comment-branch@v3
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
@@ -57,7 +57,7 @@ jobs:
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
+2 -2
View File
@@ -14,7 +14,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: azure/setup-kubectl@v2.0
- uses: azure/setup-kubectl@v3.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
@@ -24,7 +24,7 @@ jobs:
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+2 -2
View File
@@ -14,7 +14,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: azure/setup-kubectl@v2.0
- uses: azure/setup-kubectl@v3.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
@@ -24,7 +24,7 @@ jobs:
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+2 -2
View File
@@ -14,11 +14,11 @@ jobs:
name: Run Fuzzer
steps:
- uses: actions/checkout@v3
- uses: azure/setup-kubectl@v2.0
- uses: azure/setup-kubectl@v3.0
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+2 -2
View File
@@ -13,11 +13,11 @@ jobs:
dolt_fmt: [ "__LD_1__", "__DOLT__" ]
steps:
- uses: actions/checkout@v3
- uses: azure/setup-kubectl@v2.0
- uses: azure/setup-kubectl@v3.0
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -10,17 +10,17 @@ jobs:
runs-on: ubuntu-22.04
name: Trigger Benchmark Latency, Benchmark Import, and SQL Correctness K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
client-payload: '{"from_server": "mysql", "from_version": "8.0.28", "to_server": "dolt", "to_version": "${{ github.sha }}", "mode": "nightly", "actor": "${{ github.actor }}", "template_script": "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
client-payload: '{"to_version": "${{ github.sha }}", "mode": "nightly", "actor": "${{ github.actor }}", "actor_email": "max@dolthub.com", "template_script": "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
@@ -26,9 +26,9 @@ jobs:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
echo "::set-output name=actor_email::$ACTOR_EMAIL"
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "actor=$ACTOR" >> $GITHUB_OUTPUT
echo "actor_email=$ACTOR_EMAIL" >> $GITHUB_OUTPUT
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
@@ -39,12 +39,12 @@ jobs:
needs: set-version-actor
name: Trigger Benchmark Latency and Benchmark Import K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
client-payload: '{"from_server": "mysql", "from_version": "8.0.28", "to_server": "dolt", "to_version": "${{ needs.set-version-actor.outputs.version }}", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
@@ -39,7 +39,7 @@ jobs:
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
echo "benchmark=true" >> $GITHUB_OUTPUT
performance:
runs-on: ubuntu-22.04
@@ -47,7 +47,7 @@ jobs:
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Trigger Benchmark Latency K8s Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
- uses: dolthub/pull-request-comment-branch@v3
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
@@ -57,7 +57,7 @@ jobs:
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
+1 -1
View File
@@ -13,7 +13,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+3 -3
View File
@@ -25,8 +25,8 @@ jobs:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "actor=$ACTOR" >> $GITHUB_OUTPUT
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
@@ -36,7 +36,7 @@ jobs:
needs: set-version-actor
name: Trigger SQL Correctness K8s Workflow
steps:
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
+1 -1
View File
@@ -1,7 +1,7 @@
# syntax=docker/dockerfile:1.3-labs
FROM --platform=linux/amd64 ubuntu:22.04
ARG DOLT_VERSION=0.50.6
ARG DOLT_VERSION=0.50.8
ADD https://github.com/dolthub/dolt/releases/download/v${DOLT_VERSION}/dolt-linux-amd64.tar.gz dolt-linux-amd64.tar.gz
RUN tar zxvf dolt-linux-amd64.tar.gz && \
+1 -1
View File
@@ -2,7 +2,7 @@
FROM --platform=linux/arm64 ubuntu:22.04
COPY docker/qemu-aarch64-static /usr/bin/
ARG DOLT_VERSION=0.50.6
ARG DOLT_VERSION=0.50.8
ADD https://github.com/dolthub/dolt/releases/download/v${DOLT_VERSION}/dolt-linux-arm64.tar.gz dolt-linux-arm64.tar.gz
RUN tar zxvf dolt-linux-arm64.tar.gz && \
+15 -2
View File
@@ -32,5 +32,18 @@ For example, we support `linux/amd64` and `linux/arm64`, so we need build for ta
`0.50.4-arm64`,
if the current release version is '0.50.4'.
-- `COPY docker/qemu-aarch64-static /usr/bin/` is required for building (non x86 architecture image) in x86 host
before any RUN commands.
-- `COPY docker/qemu-aarch64-static /usr/bin/` is required for building (non x86 architecture image) in x86 host
before any RUN commands.
--- WHY WE HAVE FIXED HOST=0.0.0.0 AND PORT=3306 ---
Setting the localhost to either `localhost` or `127.0.0.1` does not allow connection from outside the container.
According to MySQL ( https://dev.mysql.com/blog-archive/the-bind-address-option-now-supports-multiple-addresses/ ),
`Wildcard address values here means one of the following string values: '*', '::' and '0.0.0.0'`
The default bind-address is `::` which Dolt does not support currently. It is the same as host `0.0.0.0`, which Dolt
supports. This means that any host remote connections will be allowed to connect to the server in the container.
User needs to use port-mapping to expose a certain port from the container using `-p 3307:3306`, which means port 3306
in the container is mapped to port 3307 in host system.
+162
View File
@@ -0,0 +1,162 @@
#!/bin/bash
set -eo pipefail
# logging functions
mysql_log() {
local type="$1"; shift
# accept argument string or stdin
local text="$*"; if [ "$#" -eq 0 ]; then text="$(cat)"; fi
local dt; dt="$(date --rfc-3339=seconds)"
printf '%s [%s] [Entrypoint]: %s\n' "$dt" "$type" "$text"
}
mysql_note() {
mysql_log Note "$@"
}
mysql_warn() {
mysql_log Warn "$@" >&2
}
mysql_error() {
mysql_log ERROR "$@" >&2
exit 1
}
CONTAINER_DATA_DIR="/var/lib/dolt"
DOLT_CONFIG_DIR="/etc/dolt/doltcfg.d"
SERVER_CONFIG_DIR="/etc/dolt/servercfg.d"
DOLT_ROOT_PATH="/.dolt"
# create all dirs in path
_create_dir() {
local path="$1"
mkdir -p "$path"
}
check_for_dolt() {
mysql_log "Verifying dolt executable..."
local dolt_bin=$(which dolt)
if [ ! -x "$dolt_bin" ]; then
mysql_error "dolt binary executable not found"
fi
}
# check arguments for an option that would cause mysqld to stop
# return true if there is one
_mysql_want_help() {
local arg
for arg; do
case "$arg" in
-'?'|-h|--help)
return 0
;;
esac
done
return 1
}
# arg $1 is the directory to search in
# arg $2 is the type file to search for
get_config_file_path_if_exists() {
CONFIG_PROVIDED=
CONFIG_DIR=$1
FILE_TYPE=$2
if [ -d "$CONFIG_DIR" ]; then
mysql_log "Checking for config provided in $CONFIG_DIR"
number_of_files_found=( `find .$CONFIG_DIR -type f -name "*.$FILE_TYPE" | wc -l` )
if [ $number_of_files_found -gt 1 ]; then
CONFIG_PROVIDED=
mysql_warn "multiple config file found in $CONFIG_DIR, using default config"
elif [ $number_of_files_found -eq 1 ]; then
files_found=( `ls $CONFIG_DIR/*$FILE_TYPE` )
mysql_log "$files_found file is found"
CONFIG_PROVIDED=$files_found
else
CONFIG_PROVIDED=
fi
fi
}
# taken from https://github.com/docker-library/mysql/blob/master/8.0/docker-entrypoint.sh
# this function will run files found in /docker-entrypoint-initdb.d directory AFTER server is started
# usage: docker_process_init_files [file [file [...]]]
# ie: docker_process_init_files /always-initdb.d/*
# process initializer files, based on file extensions
docker_process_init_files() {
echo
local f
for f; do
case "$f" in
*.sh)
# https://github.com/docker-library/postgres/issues/450#issuecomment-393167936
# https://github.com/docker-library/postgres/pull/452
if [ -x "$f" ]; then
mysql_note "$0: running $f"
"$f"
else
mysql_note "$0: sourcing $f"
. "$f"
fi
;;
*.sql) mysql_note "$0: running $f"; docker_process_sql < "$f"; echo ;;
*.sql.bz2) mysql_note "$0: running $f"; bunzip2 -c "$f" | docker_process_sql; echo ;;
*.sql.gz) mysql_note "$0: running $f"; gunzip -c "$f" | docker_process_sql; echo ;;
*.sql.xz) mysql_note "$0: running $f"; xzcat "$f" | docker_process_sql; echo ;;
*.sql.zst) mysql_note "$0: running $f"; zstd -dc "$f" | docker_process_sql; echo ;;
*) mysql_warn "$0: ignoring $f" ;;
esac
echo
done
}
start_server() {
# start the server in fixed data directory at /var/lib/dolt
cd $CONTAINER_DATA_DIR
"$@"
}
# if there is config file provided through /etc/dolt/doltcfg.d,
# we overwrite $HOME/.dolt/config_global.json file with this file.
set_dolt_config_if_defined() {
get_config_file_path_if_exists "$DOLT_CONFIG_DIR" "json"
if [ ! -z $CONFIG_PROVIDED ]; then
/bin/cp -rf $CONFIG_PROVIDED $HOME/$DOLT_ROOT_PATH/config_global.json
fi
}
_main() {
# check for dolt binary executable
check_for_dolt
if [ "${1:0:1}" = '-' ]; then
# if there is any command line argument defined we use
# them with default command `dolt sql-server --host=0.0.0.0 --port=3306`
# why we use fixed host=0.0.0.0 and port=3306 in README
set -- dolt sql-server --host=0.0.0.0 --port=3306 "$@"
fi
if [ "$1" = 'dolt' ] && [ "$2" = 'sql-server' ] && ! _mysql_want_help "$@"; then
local dolt_version=$(dolt version | grep 'dolt version' | cut -f3 -d " ")
mysql_note "Entrypoint script for Dolt Server $dolt_version starting."
declare -g CONFIG_PROVIDED
# dolt config will be set if user provided a single json file in /etc/dolt/doltcfg.d directory.
# It will overwrite config_global.json file in $HOME/.dolt
set_dolt_config_if_defined
# if there is a single yaml provided in /etc/dolt/servercfg.d directory,
# it will be used to start the server with --config flag
get_config_file_path_if_exists "$SERVER_CONFIG_DIR" "yaml"
if [ ! -z $CONFIG_PROVIDED ]; then
set -- "$@" --config=$CONFIG_PROVIDED
fi
start_server
# run any file provided in /docker-entrypoint-initdb.d directory after the server starts
docker_process_init_files /docker-entrypoint-initdb.d/*
mysql_note "Dolt Server $dolt_version is started."
fi
exec "$@"
}
_main "$@"
+3
View File
@@ -7,6 +7,9 @@ BUILD_ARCH=$(echo "${DOCKERFILE_PATH}" | cut -d '.' -f 2)
[ "${BUILD_ARCH}" == "docker/Dockerfile" ] && \
{ echo 'qemu-user-static: Download not required for current arch'; exit 0; }
[ "${BUILD_ARCH}" == "docker/serverDockerfile" ] && \
{ echo 'qemu-user-static: Download not required for current arch'; exit 0; }
case ${BUILD_ARCH} in
amd64 ) QEMU_ARCH="x86_64" ;;
arm64 ) QEMU_ARCH="aarch64" ;
+3
View File
@@ -6,4 +6,7 @@ BUILD_ARCH=$(echo "${DOCKERFILE_PATH}" | cut -d '.' -f 2)
[ "${BUILD_ARCH}" == "docker/Dockerfile" ] && \
{ echo 'qemu-user-static: Registration not required for current arch'; exit 0; }
[ "${BUILD_ARCH}" == "docker/serverDockerfile" ] && \
{ echo 'qemu-user-static: Registration not required for current arch'; exit 0; }
docker run --rm --privileged multiarch/qemu-user-static:register --reset
+22
View File
@@ -0,0 +1,22 @@
# syntax=docker/dockerfile:1.3-labs
FROM --platform=linux/amd64 ubuntu:22.04 as builder
ARG DOLT_VERSION=0.50.8
ADD https://github.com/dolthub/dolt/releases/download/v${DOLT_VERSION}/dolt-linux-amd64.tar.gz dolt-linux-amd64.tar.gz
RUN tar zxvf dolt-linux-amd64.tar.gz && \
cp dolt-linux-amd64/bin/dolt /usr/local/bin && \
rm -rf dolt-linux-amd64 dolt-linux-amd64.tar.gz \
FROM --platform=linux/amd64 builder
RUN mkdir /docker-entrypoint-initdb.d
VOLUME /var/lib/dolt
COPY docker/docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]
EXPOSE 3306 33060
CMD [ "dolt", "sql-server", "--host=0.0.0.0" , "--port=3306" ]
+23
View File
@@ -0,0 +1,23 @@
# syntax=docker/dockerfile:1.3-labs
FROM --platform=linux/arm64 ubuntu:22.04 as builder
COPY docker/qemu-aarch64-static /usr/bin/
ARG DOLT_VERSION=0.50.8
ADD https://github.com/dolthub/dolt/releases/download/v${DOLT_VERSION}/dolt-linux-arm64.tar.gz dolt-linux-arm64.tar.gz
RUN tar zxvf dolt-linux-arm64.tar.gz && \
cp dolt-linux-arm64/bin/dolt /usr/local/bin && \
rm -rf dolt-linux-arm64 dolt-linux-arm64.tar.gz
FROM --platform=linux/arm64 builder
RUN mkdir /docker-entrypoint-initdb.d
VOLUME /var/lib/dolt
COPY docker/docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]
EXPOSE 3306 33060
CMD [ "dolt", "sql-server", "--host=0.0.0.0" , "--port=3306" ]
+1 -1
View File
@@ -57,7 +57,7 @@ import (
)
const (
Version = "0.50.6"
Version = "0.50.8"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
+1 -1
View File
@@ -57,7 +57,7 @@ require (
require (
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible
github.com/cenkalti/backoff/v4 v4.1.3
github.com/dolthub/go-mysql-server v0.12.1-0.20221019203727-b99ff94e4329
github.com/dolthub/go-mysql-server v0.12.1-0.20221021223414-ba22c01e96ea
github.com/google/flatbuffers v2.0.6+incompatible
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/mitchellh/go-ps v1.0.0
+2 -2
View File
@@ -178,8 +178,8 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20221019203727-b99ff94e4329 h1:/8NXayusmfEwmL4Dgfy1t1n9w73/t5NUDCt+n8su6M0=
github.com/dolthub/go-mysql-server v0.12.1-0.20221019203727-b99ff94e4329/go.mod h1:9Q9FhWO82GrV4he13V2ZuDE0T/eDZbPVMOWLcZluOvg=
github.com/dolthub/go-mysql-server v0.12.1-0.20221021223414-ba22c01e96ea h1:uC4TLBdMJekObaLmYp9Pqc/mZSBnOJh2rT4BrMWbyzA=
github.com/dolthub/go-mysql-server v0.12.1-0.20221021223414-ba22c01e96ea/go.mod h1:9Q9FhWO82GrV4he13V2ZuDE0T/eDZbPVMOWLcZluOvg=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
+28 -16
View File
@@ -408,7 +408,7 @@ func (root *RootValue) GenerateTagsForNewColumns(
return nil, fmt.Errorf("error generating tags, newColNames and newColKinds must be of equal length")
}
newTags := make([]uint64, len(newColNames))
newTags := make([]*uint64, len(newColNames))
// Get existing columns from the current root, or the head root if the table doesn't exist in the current root. The
// latter case is to support reusing table tags in the case of drop / create in the same session, which is common
@@ -420,12 +420,13 @@ func (root *RootValue) GenerateTagsForNewColumns(
// If we found any existing columns set them in the newTags list.
for _, col := range existingCols {
col := col
for i := range newColNames {
// Only re-use tags if the noms kind didn't change
// TODO: revisit this when new storage format is further along
if strings.ToLower(newColNames[i]) == strings.ToLower(col.Name) &&
newColKinds[i] == col.TypeInfo.NomsKind() {
newTags[i] = col.Tag
newTags[i] = &col.Tag
break
}
}
@@ -436,22 +437,24 @@ func (root *RootValue) GenerateTagsForNewColumns(
existingColKinds = append(existingColKinds, col.Kind)
}
existingTags, err := GetAllTagsForRoot(ctx, root)
existingTags, err := GetAllTagsForRoots(ctx, headRoot, root)
if err != nil {
return nil, err
}
outputTags := make([]uint64, len(newTags))
for i := range newTags {
if newTags[i] > 0 {
if newTags[i] != nil {
outputTags[i] = *newTags[i]
continue
}
newTags[i] = schema.AutoGenerateTag(existingTags, tableName, existingColKinds, newColNames[i], newColKinds[i])
outputTags[i] = schema.AutoGenerateTag(existingTags, tableName, existingColKinds, newColNames[i], newColKinds[i])
existingColKinds = append(existingColKinds, newColKinds[i])
existingTags.Add(newTags[i], tableName)
existingTags.Add(outputTags[i], tableName)
}
return newTags, nil
return outputTags, nil
}
func getExistingColumns(
@@ -459,7 +462,8 @@ func getExistingColumns(
root, headRoot *RootValue,
tableName string,
newColNames []string,
newColKinds []types.NomsKind) ([]schema.Column, error) {
newColKinds []types.NomsKind,
) ([]schema.Column, error) {
var existingCols []schema.Column
tbl, found, err := root.GetTable(ctx, tableName)
@@ -969,15 +973,23 @@ func (root *RootValue) ValidateForeignKeysOnSchemas(ctx context.Context) (*RootV
return root.PutForeignKeyCollection(ctx, fkCollection)
}
// GetAllTagsForRoot gets all tags for root
func GetAllTagsForRoot(ctx context.Context, root *RootValue) (tags schema.TagMapping, err error) {
// GetAllTagsForRoots gets all tags for |roots|.
func GetAllTagsForRoots(ctx context.Context, roots ...*RootValue) (tags schema.TagMapping, err error) {
tags = make(schema.TagMapping)
err = root.IterTables(ctx, func(tblName string, _ *Table, sch schema.Schema) (stop bool, err error) {
for _, t := range sch.GetAllCols().Tags {
tags.Add(t, tblName)
for _, root := range roots {
if root == nil {
continue
}
return
})
err = root.IterTables(ctx, func(tblName string, _ *Table, sch schema.Schema) (stop bool, err error) {
for _, t := range sch.GetAllCols().Tags {
tags.Add(t, tblName)
}
return
})
if err != nil {
break
}
}
return
}
@@ -1030,7 +1042,7 @@ func validateTagUniqueness(ctx context.Context, root *RootValue, tableName strin
return err
}
existing, err := GetAllTagsForRoot(ctx, root)
existing, err := GetAllTagsForRoots(ctx, root)
if err != nil {
return err
}
@@ -46,7 +46,7 @@ var skipPrepared bool
// SkipPreparedsCount is used by the "ci-check-repo CI workflow
// as a reminder to consider prepareds when adding a new
// enginetest suite.
const SkipPreparedsCount = 80
const SkipPreparedsCount = 81
const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS"
@@ -1254,6 +1254,10 @@ func TestAddDropPks(t *testing.T) {
enginetest.TestAddDropPks(t, newDoltHarness(t))
}
func TestAddAutoIncrementColumn(t *testing.T) {
enginetest.TestAddAutoIncrementColumn(t, newDoltHarness(t))
}
func TestNullRanges(t *testing.T) {
enginetest.TestNullRanges(t, newDoltHarness(t))
}
+6 -3
View File
@@ -112,7 +112,8 @@ make_it() {
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT * FROM test;"
# Trying to checkout a new branch throws an error, but doesn't panic
server_query "dolt_repo_$$/main" 1 dolt "" "SELECT DOLT_CHECKOUT('to_keep');" "" "Could not load database dolt_repo_$$"
run server_query "dolt_repo_$$/main" 1 dolt "" "CALL DOLT_CHECKOUT('to_keep');" "" 1
[[ "$output" =~ "branch not found" ]] || false
}
@test "deleted-branches: calling DOLT_CHECKOUT on SQL connection with existing branch revision specifier set to existing branch when default branch is deleted does not panic" {
@@ -128,7 +129,9 @@ make_it() {
server_query "dolt_repo_$$/to_keep" 1 dolt "" "SELECT * FROM test;"
# Trying to checkout a new branch throws an error, but doesn't panic
server_query "dolt_repo_$$/to_keep" 1 dolt "" "SELECT DOLT_CHECKOUT('to_checkout');" "" "Could not load database dolt_repo_$$"
run server_query "dolt_repo_$$/to_keep" 1 dolt "" "CALL DOLT_CHECKOUT('to_checkout');" "" 1
[[ "$output" =~ "branch not found" ]] || false
}
@test "deleted-branches: can DOLT_CHECKOUT on SQL connection with dolt_default_branch set to existing branch when checked out branch is deleted" {
@@ -144,5 +147,5 @@ make_it() {
server_query "dolt_repo_$$" 1 dolt "" "SELECT * FROM test" ""
server_query "dolt_repo_$$" 1 dolt "" "DOLT_CHECKOUT('to_checkout');" ""
server_query "dolt_repo_$$" 1 dolt "" "CALL DOLT_CHECKOUT('to_checkout');" ""
}
+33
View File
@@ -397,3 +397,36 @@ SQL
[ "$status" -eq 1 ]
[[ "$output" =~ "table not found: test" ]] || false
}
@test "drop-create: regression test for 0 value tags" {
dolt sql -q "CREATE TABLE clan_home_level (level INTEGER NOT NULL, price_teleport JSON NOT NULL);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "clan_home_level | price_teleport | 0" ]] || false
dolt commit -Am "add table"
dolt sql -q "DROP TABLE clan_home_level;"
dolt sql -q "CREATE TABLE clan_home_level (level INTEGER NOT NULL, price_teleport JSON NOT NULL);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "clan_home_level | price_teleport | 0" ]] || false
}
@test "drop-create: ensure no tag collisions" {
dolt sql -q "CREATE TABLE my_table (pk int primary key)"
dolt commit -Am "added my_table"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "my_table | pk | 2803" ]] || false
dolt sql -q "DROP TABLE my_table"
dolt sql -q "CREATE TABLE mytable (pk int primary key)"
dolt sql -q "CREATE TABLE my_table (pk int primary key)"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "my_table | pk | 2803" ]] || false
[[ $output =~ "mytable | pk | 11671" ]] || false
}
@@ -74,7 +74,7 @@ for i in range(len(queries)):
print('expected exception: ', expected_exception, '\n got: ', str(e))
sys.exit(1)
else:
sys.exit(0)
sys.exit(1)
if expected[i] is not None and expected[i] != '':
print('Raw Expected: ', expected[i])
+1 -1
View File
@@ -31,7 +31,7 @@ teardown() {
cd dbs1
start_multi_db_server repo1
server_query repo1 1 dolt "" "create database new; use new; call dcheckout('-b', 'feat'); create table t (x int); call dolt_add('.'); call dcommit('-am', 'cm'); set @@global.new_default_branch='feat'"
server_query repo1 1 "use repo1"
server_query repo1 1 dolt "" "use repo1"
}
@test "multidb: incompatible BIN FORMATs" {
@@ -45,13 +45,13 @@ teardown() {
dolt checkout -b other
start_sql_server repo1
run server_query repo1 1 dolt "" "select dolt_push() as p" "p\n0" "" 1
run server_query repo1 1 dolt "" "call dolt_push()" "" "" 1
[[ "$output" =~ "the current branch has no upstream branch" ]] || false
server_query repo1 1 "select dolt_push('--set-upstream', 'origin', 'other') as p" "p\n1"
server_query repo1 1 dolt "" "call dolt_push('--set-upstream', 'origin', 'other') " ""
skip "In-memory branch doesn't track upstream"
server_query repo1 1 dolt "" "select dolt_push() as p" "p\n1"
server_query repo1 1 dolt "" "call dolt_push()" ""
}
@test "remotes-sql-server: push on sql-session commit" {
+2 -2
View File
@@ -375,8 +375,8 @@ EOF
[[ "$output" =~ "$EXPECTED" ]] || false
}
# We passed nil where a sql ctx was expected in merge. When we added collations,
# the sql ctx became required and merge started to panic.
# We passed nil where a sql ctx was expected in merge. When we added
# collations, the sql ctx became required and merge started to panic.
@test "schema-changes: regression test for merging check constraints with TEXT type panicking due to a nil sql ctx" {
dolt sql -q "create table t (pk int primary key, col1 text);"
dolt commit -Am "initial"
+1 -1
View File
@@ -73,7 +73,7 @@ teardown() {
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
server_query test_db 1 root "select user from mysql.user order by user" "" 1
server_query test_db 1 root "" "select user from mysql.user order by user" "" 1
}
@test "sql-privs: starting server with empty config works" {
+11 -11
View File
@@ -41,7 +41,7 @@ teardown() {
# start the server and ensure there are no databases yet
cd $tempDir/empty_server
start_sql_server
server_query "" 1 "show databases" "Database\ninformation_schema\nmysql"
server_query "" 1 dolt "" "show databases" "Database\ninformation_schema\nmysql"
# verify that dolt_clone works
# TODO: Once dolt_clone can be called without a selected database, this can be removed
@@ -172,7 +172,7 @@ SQL
echo '2,2,2' >> import.csv
run dolt table import -u one_pk import.csv
[ "$status" -eq 1 ]
server_query repo1 1 "SELECT * FROM one_pk ORDER by pk" ""
server_query repo1 1 dolt "" "SELECT * FROM one_pk ORDER by pk" ""
}
@test "sql-server: test dolt sql interface works properly with autocommit" {
@@ -198,7 +198,8 @@ SQL
[[ "$output" =~ "No tables in working set" ]] || false
# check that dolt_commit throws an error when there are no changes to commit
server_query repo1 0 dolt "" "CALL DOLT_COMMIT('-a', '-m', 'Commit1')" 1
run server_query repo1 0 dolt "" "CALL DOLT_COMMIT('-a', '-m', 'Commit1')" "" 1
[[ "$output" =~ "nothing to commit" ]] || false
run dolt ls
[ "$status" -eq 0 ]
@@ -403,7 +404,7 @@ SQL
SELECT DOLT_CHECKOUT('-b', 'feature-branch');
"
server_query repo1 1 dolt "" "SELECT * FROM testorder by pk" "pk\n0\n1\n2"
server_query repo1 1 dolt "" "SELECT * FROM test order by pk" "pk\n0\n1\n2"
server_query repo1 1 dolt "" "
SELECT DOLT_CHECKOUT('feature-branch');
@@ -546,7 +547,7 @@ SQL
# make some changes to main and commit to branch test_branch
server_query repo1 1 dolt "" "
SET @@repo1_head_ref='main';
CALL DOLT_CHECKOUT('main');
CREATE TABLE one_pk (
pk BIGINT NOT NULL,
c1 BIGINT,
@@ -555,8 +556,7 @@ SQL
);
INSERT INTO one_pk (pk,c1,c2) VALUES (2,2,2),(3,3,3);
CALL DOLT_ADD('.');
SELECT commit('-am', 'test commit message', '--author', 'John Doe <john@example.com>');
CALL DOLT_BRANCH('main', @@repo1_head);"
CALL dolt_commit('-am', 'test commit message', '--author', 'John Doe <john@example.com>');"
server_query repo1 1 dolt "" "call dolt_add('.')" "status\n0"
run dolt ls
@@ -1114,7 +1114,7 @@ databases:
start_sql_server_with_config repo1 server.yaml
server_query repo1 1 dolt "" "call dolt_fetch() as f" "f\n1"
server_query repo1 1 dolt "" "call dolt_fetch()" ""
}
@test "sql-server: run mysql from shell" {
@@ -1210,7 +1210,7 @@ databases:
SERVER_PID=$!
wait_for_connection $PORT 5000
server_query repo2 1 dolt "" "select 1 dolt ""as col1" "col1\n1"
server_query repo2 1 dolt "" "select 1 as col1" "col1\n1"
run grep '\"/tmp/mysql.sock\"' log.txt
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 1 ]
@@ -1234,7 +1234,7 @@ databases:
SERVER_PID=$!
wait_for_connection $PORT 5000
server_query repo2 1 dolt "" "select 1 dolt ""as col1" "col1\n1"
server_query repo2 1 dolt "" "select 1 as col1" "col1\n1"
run grep '\"/tmp/mysql.sock\"' log.txt
[ "$status" -eq 0 ]
@@ -1285,7 +1285,7 @@ behavior:
SERVER_PID=$!
wait_for_connection $PORT 5000
server_query repo2 1 dolt "" "select 1 dolt ""as col1" "col1\n1"
server_query repo2 1 dolt "" "select 1 as col1" "col1\n1"
run grep '\"/tmp/mysql.sock\"' log.txt
[ "$status" -eq 0 ]