Merge remote-tracking branch 'origin/main' into dhruv/update-tag

This commit is contained in:
Dhruv Sringari
2022-11-18 12:24:14 -08:00
601 changed files with 37954 additions and 15179 deletions
+1 -1
View File
@@ -38,5 +38,5 @@ inputs:
default: ''
required: false
runs:
using: 'node12'
using: 'node16'
main: 'dist/index.js'
File diff suppressed because it is too large Load Diff
+6 -1
View File
@@ -24,11 +24,16 @@ if [ -z "$ACTOR" ]; then
exit 1
fi
nomsFormat="ldnbf"
if [ "$NOMS_BIN_FORMAT" == "__DOLT__"]; then
nomsFormat="doltnbf"
fi
# use first 8 characters of TO_VERSION to differentiate
# jobs
short=${VERSION:0:8}
lowered=$(echo "$ACTOR" | tr '[:upper:]' '[:lower:]')
actorShort="$lowered-$short"
actorShort="$lowered-$nomsFormat-$short"
jobname="$actorShort"
@@ -1,98 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -lt 9 ]; then
echo "Usage: ./get-dolt-dolt-job-json.sh <jobName> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <nomsBinFormat> <format> <issueNumber>"
exit 1
fi
jobName="$1"
fromServer="$2"
fromVersion="$3"
toServer="$4"
toVersion="$5"
timePrefix="$6"
actorPrefix="$7"
nomsBinFormat="$8"
format="$9"
issueNumber="${10}"
echo '
{
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "'$jobName'",
"namespace": "performance-benchmarking"
},
"spec": {
"backoffLimit": 1,
"template": {
"metadata": {
"annotations": {
"alert_recipients": "'$ACTOR_EMAIL'"
},
"labels": {
"k8s-liquidata-inc-monitored-job": "created-by-static-config"
}
},
"spec": {
"serviceAccountName": "import-benchmarking",
"containers": [
{
"name": "import-benchmarking",
"image": "407903926827.dkr.ecr.us-west-2.amazonaws.com/liquidata/import-benchmarking:latest",
"resources": {
"limits": {
"cpu": "7000m"
}
},
"env": [
{ "name": "GOMAXPROCS", "value": "7" },
{ "name": "ACTOR", "value": "'$ACTOR'" },
{ "name": "ACTOR_EMAIL", "value": "'$ACTOR_EMAIL'" },
{ "name": "REPO_ACCESS_TOKEN", "value": "'$REPO_ACCESS_TOKEN'" }
],
"imagePullPolicy": "Always",
"args": [
"--from-server='$fromServer'",
"--from-version='$fromVersion'",
"--to-server='$toServer'",
"--to-version='$toVersion'",
"--bucket=import-benchmarking-github-actions-results",
"--region=us-west-2",
"--results-dir='$timePrefix'",
"--results-prefix='$actorPrefix'",
"--mysql-schema-file=schema.sql",
"--nbf='$nomsBinFormat'",
"--email-template=ImportBenchmarkingReleaseTemplate",
"--results-schema=/results-schema.sql",
"--issue-number='$issueNumber'",
"--output='$format'",
"--fileNames=100k-sorted.csv",
"--fileNames=100k-random.csv",
"--fileNames=1m-sorted.csv",
"--fileNames=1m-random.csv",
"--fileNames=10m-sorted.csv",
"--fileNames=10m-random.csv"
]
}
],
"restartPolicy": "Never",
"nodeSelector": {
"performance-benchmarking-worker": "true"
},
"tolerations": [
{
"effect": "NoSchedule",
"key": "dedicated",
"operator": "Equal",
"value": "performance-benchmarking-worker"
}
]
}
}
}
}
'
@@ -1,99 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -lt 9 ]; then
echo "Usage: ./get-mysql-dolt-job-json.sh <jobName> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <nomsBinFormat> <format> <issueNumber>"
exit 1
fi
jobName="$1"
fromServer="$2"
fromVersion="$3"
toServer="$4" # make this mysql
toVersion="$5"
timePrefix="$6"
actorPrefix="$7"
nomsBinFormat="$8"
format="$9"
issueNumber="${10}"
echo '
{
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "'$jobName'",
"namespace": "performance-benchmarking"
},
"spec": {
"backoffLimit": 1,
"template": {
"metadata": {
"annotations": {
"alert_recipients": "'$ACTOR_EMAIL'"
},
"labels": {
"k8s-liquidata-inc-monitored-job": "created-by-static-config"
}
},
"spec": {
"serviceAccountName": "import-benchmarking",
"containers": [
{
"name": "import-benchmarking",
"image": "407903926827.dkr.ecr.us-west-2.amazonaws.com/liquidata/import-benchmarking:latest",
"resources": {
"limits": {
"cpu": "7000m"
}
},
"env": [
{ "name": "GOMAXPROCS", "value": "7" },
{ "name": "ACTOR", "value": "'$ACTOR'" },
{ "name": "ACTOR_EMAIL", "value": "'$ACTOR_EMAIL'" },
{ "name": "REPO_ACCESS_TOKEN", "value": "'$REPO_ACCESS_TOKEN'" }
],
"imagePullPolicy": "Always",
"args": [
"--from-server='$fromServer'",
"--from-version='$fromVersion'",
"--to-server='$toServer'",
"--to-version='$toVersion'",
"--bucket=import-benchmarking-github-actions-results",
"--region=us-west-2",
"--results-dir='$timePrefix'",
"--results-prefix='$actorPrefix'",
"--mysql-exec=/usr/sbin/mysqld",
"--mysql-schema-file=schema.sql",
"--nbf='$nomsBinFormat'",
"--email-template=ImportBenchmarkingReleaseTemplate",
"--results-schema=/results-schema.sql",
"--issue-number='$issueNumber'",
"--output='$format'",
"--fileNames=100k-sorted.csv",
"--fileNames=100k-random.csv",
"--fileNames=1m-sorted.csv",
"--fileNames=1m-random.csv",
"--fileNames=10m-sorted.csv",
"--fileNames=10m-random.csv"
]
}
],
"restartPolicy": "Never",
"nodeSelector": {
"performance-benchmarking-worker": "true"
},
"tolerations": [
{
"effect": "NoSchedule",
"key": "dedicated",
"operator": "Equal",
"value": "performance-benchmarking-worker"
}
]
}
}
}
}
'
@@ -1,78 +0,0 @@
#!/bin/bash
set -e
if [ -z "$KUBECONFIG" ]; then
echo "Must set KUBECONFIG"
exit 1
fi
if [ -z "$TEMPLATE_SCRIPT" ]; then
echo "Must set TEMPLATE_SCRIPT"
exit 1
fi
if [ -z "$FROM_SERVER" ] || [ -z "$FROM_VERSION" ] || [ -z "$TO_SERVER" ] || [ -z "$TO_VERSION" ]; then
echo "Must set FROM_SERVER FROM_VERSION TO_SERVER and TO_VERSION"
exit 1
fi
if [ -z "$ACTOR" ]; then
echo "Must set ACTOR"
exit 1
fi
if [ -z "$MODE" ]; then
echo "Must set MODE"
exit 1
fi
# use first 8 characters of TO_VERSION to differentiate
# jobs
short=${TO_VERSION:0:8}
lowered=$(echo "$ACTOR" | tr '[:upper:]' '[:lower:]')
actorShort="$lowered-$short"
# random sleep
sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
timesuffix=`date +%s`
jobname="$actorShort-$timesuffix"
timeprefix=$(date +%Y/%m/%d)
actorprefix="$MODE/$ACTOR/$actorShort"
format="markdown"
if [[ "$MODE" = "release" || "$MODE" = "nightly" ]]; then
format="html"
fi
# set value to ISSUE_NUMBER environment variable
# or default to -1
issuenumber=${ISSUE_NUMBER:-"-1"}
source \
"$TEMPLATE_SCRIPT" \
"$jobname" \
"$FROM_SERVER" \
"$FROM_VERSION" \
"$TO_SERVER" \
"$TO_VERSION" \
"$timeprefix" \
"$actorprefix" \
"$NOMS_BIN_FORMAT" \
"$format" \
"$issuenumber" > job.json
out=$(KUBECONFIG="$KUBECONFIG" kubectl apply -f job.json || true)
if [ "$out" != "job.batch/$jobname created" ]; then
echo "something went wrong creating job... this job likely already exists in the cluster"
echo "$out"
exit 1
else
echo "$out"
fi
exit 0
@@ -32,8 +32,11 @@ if [ -z "$MODE" ]; then
exit 1
fi
nomsFormat="ldnbf"
if [ "$NOMS_BIN_FORMAT" = "__DOLT__" ]; then
INIT_BIG_REPO="false"
nomsFormat="doltnbf"
fi
echo "Setting from $FROM_SERVER: $FROM_VERSION"
@@ -43,7 +46,7 @@ echo "Setting to $TO_SERVER: $TO_VERSION"
# jobs
short=${TO_VERSION:0:8}
lowered=$(echo "$ACTOR" | tr '[:upper:]' '[:lower:]')
actorShort="$lowered-$short"
actorShort="$lowered-$nomsFormat-$short"
# random sleep
sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
@@ -7,10 +7,10 @@ if [[ $# -ne 1 ]]; then
exit 1
fi
validcommentors="coffeegoddd andrew-wm-arthur bheni Hydrocharged reltuk tbantle22 timsehn VinaiRachakonda zachmu max-hoffman"
validcommentors="coffeegoddd andrew-wm-arthur bheni Hydrocharged reltuk tbantle22 timsehn zachmu max-hoffman"
contains() {
[[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && echo "::set-output name=valid::true" || exit 0
[[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && echo "valid=true" >> $GITHUB_OUTPUT || exit 0
}
contains "$validcommentors" "$1"
@@ -38,6 +38,11 @@ if [ -z "$MODE" ]; then
exit 1
fi
nomsFormat="ldnbf"
if [ "$NOMS_BIN_FORMAT" == "__DOLT__" ]; then
nomsFormat="doltnbf"
fi
# use first 8 characters of TO_VERSION to differentiate
# jobs
short=${TO_VERSION:0:8}
@@ -49,7 +54,7 @@ sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
timesuffix=`date +%s%N`
jobname="$actorShort-$timesuffix"
jobname="$actorShort-$nomsFormat-$timesuffix"
timeprefix=$(date +%Y/%m/%d)
+1 -1
View File
@@ -19,7 +19,7 @@ Workflows prefixed with `cd-` are used for releasing Dolt. Some of these workflo
## Benchmarking Workflows
Benchmarking workflows are used as an interface for deploying benchmarking jobs to one of our Kubernetes Clusters. Workflows that deploy Kubernetes Jobs are prefixed with `k8s-` and can only be triggered with `repository_dispatch` events. Notice that benchmarking workflows, like `workflows/performance-benchmarks-email-report.yaml` for example, trigger these events using the `peter-evans/repository-dispatch@v1` Action.
Benchmarking workflows are used as an interface for deploying benchmarking jobs to one of our Kubernetes Clusters. Workflows that deploy Kubernetes Jobs are prefixed with `k8s-` and can only be triggered with `repository_dispatch` events. Notice that benchmarking workflows, like `workflows/performance-benchmarks-email-report.yaml` for example, trigger these events using the `peter-evans/repository-dispatch@v2.0.0` Action.
These Kubernetes Jobs do not run on GitHub Actions Hosted Runners, so the workflow logs do not contain any information about the deployed Kubernetes Job or any errors it might have encountered. The workflow logs can only tell you if a Job was created successfully or not. To investigate an error or issue with a Job in our Kubernetes Cluster, see the debugging guide [here](https://github.com/dolthub/ld/blob/main/k8s/README.md#debug-performance-benchmarks-and-sql-correctness-jobs).
+11 -11
View File
@@ -18,7 +18,7 @@ jobs:
run: |
if [ "$REPO" == "go-mysql-server" ]
then
echo "::set-output name=label::gms-bump"
echo "label=gms-bump" >> $GITHUB_OUTPUT
else
echo "$REPO is unsupported"
exit 1
@@ -33,7 +33,7 @@ jobs:
steps:
- name: Get Open Bump PRs
id: get-stale-prs
uses: actions/github-script@v4
uses: actions/github-script@v6
env:
LABEL: ${{ needs.get-label.outputs.label }}
with:
@@ -43,7 +43,7 @@ jobs:
try {
const { LABEL } = process.env;
const { owner, repo } = context.repo;
const res = await github.pulls.list({
const res = await github.rest.pulls.list({
owner,
repo,
state: 'open',
@@ -92,11 +92,11 @@ jobs:
outputs:
latest-pr: ${{ steps.latest-pr.outputs.pr_url }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
token: ${{ secrets.REPO_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
- name: Set up Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
- name: Bump dependency
@@ -109,16 +109,16 @@ jobs:
run: |
if [ "${{ github.event.client_payload.assignee }}" == "zachmu" ]
then
echo "::set-output name=reviewer::Hydrocharged"
echo "reviewer=Hydrocharged" >> $GITHUB_OUTPUT
else
echo "::set-output name=reviewer::zachmu"
echo "reviewer=zachmu" >> $GITHUB_OUTPUT
fi
- name: Get short hash
id: short-sha
run: |
commit=${{ github.event.client_payload.head_commit_sha }}
short=${commit:0:8}
echo "::set-output name=short::$short"
echo "short=$short" >> $GITHUB_OUTPUT
- name: Create and Push new branch
run: |
git config --global --add user.name "${{ github.event.client_payload.assignee }}"
@@ -151,7 +151,7 @@ jobs:
steps:
- name: Comment/Close Stale PRs
id: get-stale-prs
uses: actions/github-script@v4
uses: actions/github-script@v6
env:
PULL: ${{ toJson(matrix.pull) }}
SUPERSEDED_BY: ${{ needs.open-bump-pr.outputs.latest-pr }}
@@ -167,14 +167,14 @@ jobs:
if (pull.keepAlive) process.exit(0);
console.log(`Closing open pr ${pull.number}`);
await github.issues.createComment({
await github.rest.issues.createComment({
issue_number: pull.number,
owner,
repo,
body: `This PR has been superseded by ${SUPERSEDED_BY}`
});
await github.pulls.update({
await github.rest.pulls.update({
owner,
repo,
pull_number: pull.number,
+2 -2
View File
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Create Homebrew PR
uses: mislav/bump-homebrew-formula-action@v1
uses: mislav/bump-homebrew-formula-action@v2.1
if: ${{ github.event_name == 'repository_dispatch' }}
with:
formula-name: dolt
@@ -30,7 +30,7 @@ jobs:
env:
COMMITTER_TOKEN: ${{secrets.REPO_ACCESS_TOKEN}}
- name: Create Homebrew PR
uses: mislav/bump-homebrew-formula-action@v1
uses: mislav/bump-homebrew-formula-action@v2.1
if: ${{ github.event_name == 'workflow_dispatch' }}
with:
formula-name: dolt
+1 -1
View File
@@ -28,7 +28,7 @@ jobs:
version="${{ github.event.inputs.version }}"
fi
echo "::set-output name=version::$version"
echo "version=$version" >> $GITHUB_OUTPUT
winget-bump:
needs: get-version
@@ -19,16 +19,16 @@ jobs:
if [ "$EVENT_NAME" == "workflow_dispatch" ]
then
release_id=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/dolthub/dolt/releases/tags/v${{ github.event.inputs.version }} | jq '.id')
echo "::set-output name=version::${{ github.event.inputs.version }}"
echo "::set-output name=release_id::$release_id"
echo "version=${{ github.event.inputs.version }}" >> $GITHUB_OUTPUT
echo "release_id=$release_id" >> $GITHUB_OUTPUT
else
echo "::set-output name=version::${{ github.event.client_payload.version }}"
echo "::set-output name=release_id::${{ github.event.client_payload.release_id }}"
echo "version=${{ github.event.client_payload.version }}" >> $GITHUB_OUTPUT
echo "release_id=${{ github.event.client_payload.release_id }}" >> $GITHUB_OUTPUT
fi
env:
EVENT_NAME: ${{ github.event_name }}
- name: Checkout Release Notes Generator
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
repository: dolthub/release-notes-generator
token: ${{ secrets.REPO_ACCESS_TOKEN }}
@@ -46,7 +46,7 @@ jobs:
env:
TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
- name: Post Changelog to Release
uses: actions/github-script@v4
uses: actions/github-script@v6
with:
debug: true
github-token: ${{ secrets.REPO_ACCESS_TOKEN }}
@@ -55,7 +55,7 @@ jobs:
const path = require('path')
try {
const body = fs.readFileSync(path.join(process.env.WORKSPACE, "changelog.txt"), { encoding: "utf8" })
const res = await github.repos.updateRelease({
const res = await github.rest.repos.updateRelease({
owner: "dolthub",
repo: "dolt",
release_id: parseInt(process.env.RELEASE_ID, 10),
@@ -0,0 +1,63 @@
name: Push Docker Image to DockerHub
on:
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
repository_dispatch:
types: [ push-docker-image ]
jobs:
docker-image-push:
name: Push Docker Image
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: linux/amd64,linux/arm64
- name: Build and push dolt image
uses: docker/build-push-action@v3
with:
platforms: linux/amd64,linux/arm64
context: .
file: ./docker/Dockerfile
push: true
tags: dolthub/dolt:${{ github.event.inputs.version || github.event.client_payload.version }} , dolthub/dolt:latest
build-args: |
DOLT_VERSION=${{ github.event.inputs.version || github.event.client_payload.version }}
- name: Build and push dolt-sql-server image
uses: docker/build-push-action@v3
with:
platforms: linux/amd64,linux/arm64
context: .
file: ./docker/serverDockerfile
push: true
tags: dolthub/dolt-sql-server:${{ github.event.inputs.version || github.event.client_payload.version }} , dolthub/dolt-sql-server:latest
build-args: |
DOLT_VERSION=${{ github.event.inputs.version || github.event.client_payload.version }}
- name: Update Docker Hub Readme for dolt image
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
repository: dolthub/dolt
readme-filepath: ./docker/README.md
- name: Update Docker Hub Readme for dolt-sql-server image
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
repository: dolthub/dolt-sql-server
readme-filepath: ./docker/serverREADME.md
+27 -15
View File
@@ -21,7 +21,7 @@ jobs:
then
version="${version:1}"
fi
echo "::set-output name=version::$version"
echo "version=$version" >> $GITHUB_OUTPUT
create-release:
needs: format-version
@@ -31,26 +31,27 @@ jobs:
release_id: ${{ steps.create_release.outputs.id }}
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Update dolt version command
run: sed -i -e 's/ Version = ".*"/ Version = "'"$NEW_VERSION"'"/' "$FILE"
env:
FILE: ${{ format('{0}/go/cmd/dolt/dolt.go', github.workspace) }}
NEW_VERSION: ${{ needs.format-version.outputs.version }}
- uses: EndBug/add-and-commit@v7
- uses: EndBug/add-and-commit@v9.1.1
with:
message: ${{ format('[ga-bump-release] Update Dolt version to {0} and release v{0}', needs.format-version.outputs.version) }}
add: ${{ format('{0}/go/cmd/dolt/dolt.go', github.workspace) }}
cwd: "."
pull: "--ff"
- name: Build Binaries
id: build_binaries
run: |
latest=$(git rev-parse HEAD)
echo "::set-output name=commitish::$latest"
echo "commitish=$latest" >> $GITHUB_OUTPUT
GO_BUILD_VERSION=1.19 go/utils/publishrelease/buildbinaries.sh
- name: Create Release
id: create_release
uses: actions/create-release@v1
uses: dolthub/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -61,7 +62,7 @@ jobs:
commitish: ${{ steps.build_binaries.outputs.commitish }}
- name: Upload Linux AMD64 Distro
id: upload-linux-amd64-distro
uses: actions/upload-release-asset@v1
uses: dolthub/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -71,7 +72,7 @@ jobs:
asset_content_type: application/zip
- name: Upload Linux ARM64 Distro
id: upload-linux-arm64-distro
uses: actions/upload-release-asset@v1
uses: dolthub/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -81,7 +82,7 @@ jobs:
asset_content_type: application/zip
- name: Upload OSX AMD64 Distro
id: upload-osx-amd64-distro
uses: actions/upload-release-asset@v1
uses: dolthub/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -91,7 +92,7 @@ jobs:
asset_content_type: application/zip
- name: Upload OSX ARM64 Distro
id: upload-osx-arm64-distro
uses: actions/upload-release-asset@v1
uses: dolthub/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -101,7 +102,7 @@ jobs:
asset_content_type: application/zip
- name: Upload Windows Distro
id: upload-windows-distro
uses: actions/upload-release-asset@v1
uses: dolthub/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -111,7 +112,7 @@ jobs:
asset_content_type: application/zip
- name: Upload Install Script
id: upload-install-script
uses: actions/upload-release-asset@v1
uses: dolthub/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -125,7 +126,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Upload MSI
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: upload-msi
@@ -137,7 +138,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Release Notes
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: release-notes
@@ -148,7 +149,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Bump Homebrew
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: bump-homebrew
@@ -159,8 +160,19 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Trigger Performance Benchmarks
uses: peter-evans/repository-dispatch@v1
uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: release-dolt
client-payload: '{"version": "${{ needs.format-version.outputs.version }}", "actor": "${{ github.actor }}"}'
docker-image-push:
needs: [ format-version, create-release ]
runs-on: ubuntu-22.04
steps:
- name: Trigger Push Docker Image
uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: push-docker-image
client-payload: '{"version": "${{ needs.format-version.outputs.version }}"}'
+14 -13
View File
@@ -22,10 +22,10 @@ jobs:
fail-fast: true
matrix:
os: [ ubuntu-22.04, macos-latest ]
dolt_fmt: [ "", "__DOLT_DEV__", "__DOLT__" ]
dolt_fmt: [ "__DOLT__", "__LD_1__" ]
exclude:
- os: "macos-latest"
dolt_fmt: ["__DOLT_DEV__", "__DOLT__" ]
dolt_fmt: "__LD_1__"
env:
use_credentials: ${{ secrets.AWS_SECRET_ACCESS_KEY != '' && secrets.AWS_ACCESS_KEY_ID != '' }}
steps:
@@ -40,7 +40,7 @@ jobs:
fi
- name: Configure AWS Credentials
if: ${{ env.use_credentials == 'true' }}
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -48,18 +48,18 @@ jobs:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
role-duration-seconds: 3600
- name: Setup Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
id: go
- name: Setup Python 3.x
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: ^3.6
- uses: actions/checkout@v2
- uses: actions/setup-node@v1
python-version: "3.10"
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: ^12
node-version: ^16
- name: Create CI Bin
run: |
mkdir -p ./.ci_bin
@@ -104,11 +104,12 @@ jobs:
id: parquet_cli
working-directory: ./.ci_bin
run: |
git clone https://github.com/apache/parquet-mr.git
cd parquet-mr/parquet-cli
curl -OL https://github.com/apache/parquet-mr/archive/refs/tags/apache-parquet-1.12.3.tar.gz
tar zxvf apache-parquet-1.12.3.tar.gz
cd parquet-mr-apache-parquet-1.12.3/parquet-cli
mvn clean install -DskipTests
runtime_jar="$(pwd)"/target/parquet-cli-1.13.0-SNAPSHOT-runtime.jar
echo "::set-output name=runtime_jar::$runtime_jar"
runtime_jar="$(pwd)"/target/parquet-cli-1.12.3-runtime.jar
echo "runtime_jar=$runtime_jar" >> $GITHUB_OUTPUT
- name: Check expect
run: expect -v
- name: Test all Unix
+14 -14
View File
@@ -17,11 +17,11 @@ jobs:
outputs:
files: ${{ steps.get_file_list.outputs.files }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
if: ${{ github.event_name == 'repository_dispatch' }}
with:
ref: ${{ github.event.client_payload.ref }}
- uses: actions/checkout@v2
- uses: actions/checkout@v3
if: ${{ github.event_name == 'workflow_dispatch' }}
- name: Get file list
id: get_file_list
@@ -32,7 +32,7 @@ jobs:
IFS=$'\n'
file_arr=($files)
echo "::set-output name=files::${file_arr[@]}"
echo "files=${file_arr[@]}" >> $GITHUB_OUTPUT
IFS=$SAVEIFS
working-directory: ./integration-tests/bats
@@ -45,7 +45,7 @@ jobs:
steps:
- name: Format
id: format_files
uses: actions/github-script@v4
uses: actions/github-script@v6
env:
FILES: ${{ needs.get-files.outputs.files }}
with:
@@ -85,7 +85,7 @@ jobs:
fi
- name: Configure AWS Credentials
if: ${{ env.use_credentials == 'true' }}
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -93,23 +93,23 @@ jobs:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
role-duration-seconds: 3600
- name: Setup Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
id: go
- name: Setup Python 3.x
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: ^3.6
- uses: actions/checkout@v2
python-version: "3.10"
- uses: actions/checkout@v3
if: ${{ github.event_name == 'repository_dispatch' }}
with:
ref: ${{ github.event.client_payload.ref }}
- uses: actions/checkout@v2
- uses: actions/checkout@v3
if: ${{ github.event_name == 'workflow_dispatch' }}
- uses: actions/setup-node@v1
- uses: actions/setup-node@v3
with:
node-version: ^12
node-version: ^16
- name: Create CI Bin
run: |
mkdir -p ./.ci_bin
@@ -157,9 +157,9 @@ jobs:
runs-on: ubuntu-22.04
if: always() && (needs.test-per-file.result == 'failure')
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+2 -2
View File
@@ -14,10 +14,10 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Setup Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Check all
working-directory: ./go
# Keep this in sync with //go/utils/prepr/prepr.sh.
@@ -21,14 +21,14 @@ jobs:
os: [ ubuntu-22.04 ]
steps:
- name: Setup Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
id: go
- uses: actions/checkout@v2
- uses: actions/setup-node@v1
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: ^12
node-version: ^16
- name: Create CI Bin
run: |
mkdir -p ./.ci_bin
@@ -44,8 +44,8 @@ jobs:
go build -mod=readonly -o ../.ci_bin/dolt ./cmd/dolt/.
- name: Setup Dolt Config
run: |
dolt config --global --add user.name 'Liquidata Actions'
dolt config --global --add user.email 'actions@liquidata.co'
dolt config --global --add user.name 'DoltHub Actions'
dolt config --global --add user.email 'actions@dolthub.com'
- name: Test all
run: ./runner.sh
working-directory: ./integration-tests/compatibility
@@ -17,7 +17,7 @@ jobs:
name: Run tests
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Copy go package
run: cp -r ./go ./integration-tests/go
- name: Test data dump loading integrations
@@ -1,38 +0,0 @@
name: __DOLT__ Enginetests
on:
pull_request:
branches: [ main ]
paths:
- 'go/**'
workflow_dispatch:
concurrency:
group: ci-dolt1-format-go-tests-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
test:
name: Go tests (new format)
defaults:
run:
shell: bash
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [macos-latest, ubuntu-22.04, windows-latest]
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.19
id: go
- uses: actions/checkout@v2
- name: Test All with New Format
working-directory: ./go
run: |
go test -timeout 30m "./libraries/doltcore/sqle/enginetest/..."
env:
MATRIX_OS: ${{ matrix.os }}
DOLT_DEFAULT_BIN_FORMAT: "__DOLT__"
+5 -4
View File
@@ -15,10 +15,10 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Setup Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref || github.ref }}
token: ${{ secrets.REPO_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
@@ -40,11 +40,12 @@ jobs:
run: |
changes=$(git status --porcelain)
if [ ! -z "$changes" ]; then
echo "::set-output name=has-changes::true"
echo "has-changes=true" >> $GITHUB_OUTPUT
fi
- uses: EndBug/add-and-commit@v7
- uses: EndBug/add-and-commit@v9.1.1
if: ${{ steps.detect-changes.outputs.has-changes == 'true' }}
with:
message: "[ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh"
add: "."
cwd: "."
pull: "--ff"
@@ -18,14 +18,14 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-22.04 ]
dolt_fmt: [ "", "__DOLT_DEV__" ]
dolt_fmt: [ "__DOLT__", "__LD_1__", "__DOLT_DEV__" ]
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
id: go
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Test All
working-directory: ./go
run: |
+6 -6
View File
@@ -22,17 +22,17 @@ jobs:
fail-fast: false
matrix:
os: [macos-latest, ubuntu-22.04, windows-latest]
dolt_fmt: [ "" ]
dolt_fmt: [ "__DOLT__", "__LD_1__" ]
include:
- os: "ubuntu-22.04"
dolt_fmt: "__DOLT_DEV__"
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
id: go
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Test All
working-directory: ./go
run: |
@@ -74,17 +74,17 @@ jobs:
fail-fast: false
matrix:
os: [macos-latest, ubuntu-22.04, windows-latest]
dolt_fmt: [ "" ]
dolt_fmt: [ "__DOLT__", "__LD_1__" ]
include:
- os: "ubuntu-22.04"
dolt_fmt: "__DOLT_DEV__"
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ^1.19
id: go
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Test All
working-directory: ./go
run: |
+1 -1
View File
@@ -17,7 +17,7 @@ jobs:
name: Run tests
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Copy go package
run: cp -r ./go ./integration-tests/go
- name: Test mysql client integrations
@@ -0,0 +1,52 @@
name: sql-server Integration Tests
on:
pull_request:
branches: [ main ]
paths:
- 'go/**'
- 'integration-tests/go-sql-server-driver/**'
concurrency:
group: ci-sql-server-integration-tests-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
test:
name: sql-server Integration Tests
defaults:
run:
shell: bash
runs-on: ${{ matrix.os }}
strategy:
fail-fast: true
matrix:
os: [ ubuntu-22.04, macos-latest ] # [ ubuntu-22.04, macos-latest, windows-latest ]
dolt_fmt: [ "__DOLT__", "__LD_1__" ]
exclude:
- os: "macos-latest"
dolt_fmt: ["__LD_1__" ]
- os: "windows-latest"
dolt_fmt: ["__LD_1__" ]
steps:
- name: Setup Go 1.x
uses: actions/setup-go@v3
with:
go-version: ^1.19
id: go
- name: Create CI Bin
run: |
mkdir -p ./.ci_bin
- uses: actions/checkout@v3
- name: Install Dolt
working-directory: ./go
run: |
go build -mod=readonly -o ../.ci_bin/dolt ./cmd/dolt/
- name: Test all
env:
DOLT_FMT: ${{ matrix.dolt_fmt }}
run: |
if [ -n "$DOLT_FMT" ]; then export DOLT_DEFAULT_BIN_FORMAT="$DOLT_FMT"; fi
export DOLT_BIN_PATH="$(pwd)/../../.ci_bin/dolt"
go test .
working-directory: ./integration-tests/go-sql-server-driver
@@ -17,7 +17,7 @@ jobs:
name: Test Sysbench Runner
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Copy Dockerfile
run: cp -r ./go/performance/continuous_integration/. .
- name: Test sysbench runner
+3 -3
View File
@@ -9,9 +9,9 @@ jobs:
runs-on: ubuntu-22.04
name: Email Team Members
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -29,7 +29,7 @@ jobs:
if [ ! -z "$RECIPIENT" ]; then
addresses="[\"$RECIPIENT\"]"
fi
echo "::set-output name=addresses::$addresses"
echo "addresses=$addresses" >> $GITHUB_OUTPUT
env:
RECIPIENT: ${{ github.event.client_payload.email_recipient }}
TEAM: '["${{ secrets.PERF_REPORTS_EMAIL_ADDRESS }}"]'
@@ -12,7 +12,7 @@ jobs:
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
@@ -39,26 +39,34 @@ jobs:
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
echo "benchmark=true" >> $GITHUB_OUTPUT
performance:
runs-on: ubuntu-22.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Trigger Benchmark Import K8s Workflow
name: Trigger Benchmark Import Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
- uses: dolthub/pull-request-comment-branch@v3
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Get pull number
uses: actions/github-script@v3
uses: actions/github-script@v6
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
client-payload: '{"from_server": "dolt", "from_version": "${{ github.sha }}", "to_server": "dolt", "to_version": "${{ steps.comment-branch.outputs.head_sha }}", "mode": "pullRequest", "issue_number": "${{ steps.get_pull_number.outputs.pull_number }}", "actor": "${{ github.actor }}", "template_script": "./.github/scripts/import-benchmarking/get-dolt-dolt-job-json.sh"}'
client-payload: |
{
"version": "${{ steps.comment-branch.outputs.head_sha }}",
"run_file": "ci.yaml",
"report": "three_way_compare.sql",
"commit_to_branch": "${{ steps.comment-branch.outputs.head_sha }}",
"actor": "${{ github.actor }}",
"issue_id": "${{ steps.get_pull_number.outputs.pull_number }}"
}
+170
View File
@@ -0,0 +1,170 @@
name: Import Benchmarks
on:
repository_dispatch:
types: [ benchmark-import ]
env:
BENCH_DIR: 'go/performance/import_benchmarker'
MYSQL_PORT: 3309
MYSQL_PASSWORD: password
jobs:
bench:
name: Benchmark
defaults:
run:
shell: bash
strategy:
fail-fast: true
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
id: go
uses: actions/setup-go@v3
with:
go-version: ^1.19
- name: Dolt version
id: version
run: |
version=${{ github.event.client_payload.version }}
- uses: actions/checkout@v3
with:
ref: ${{ github.event.client_payload.version }}
- name: Install dolt
working-directory: ./go
run: go install ./cmd/dolt
- uses: shogo82148/actions-setup-mysql@v1
with:
mysql-version: '8.0'
auto-start: true
root-password: ${{ env.MYSQL_PASSWORD }}
my-cnf: |
local_infile=1
socket=/tmp/mysqld2.sock
port=${{ env.MYSQL_PORT }}
- name: Setup MySQL
run: mysql -uroot -p${{ env.MYSQL_PASSWORD }} -h127.0.0.1 -P${{ env.MYSQL_PORT }} -e 'create database test;'
- name: Run bench
id: bench
working-directory: go/
run: |
out="$GITHUB_WORKSPACE/results.sql"
testspec="../${{ env.BENCH_DIR }}/testdata/${{ github.event.client_payload.run_file }}"
go run \
"github.com/dolthub/dolt/${{ env.BENCH_DIR }}/cmd" \
-test "$testspec" \
-out "$out"
echo "::set-output name=result_path::$out"
- name: Report
id: report
run: |
gw=$GITHUB_WORKSPACE
in="${{ steps.bench.outputs.result_path }}"
query="$(pwd)/${{ env.BENCH_DIR }}/reporting/${{ github.event.client_payload.report }}"
out="$gw/results.csv"
dolt_dir="$gw/import-perf"
dolt config --global --add user.email "import-perf@dolthub.com"
dolt config --global --add user.name "import-perf"
echo '${{ secrets.DOLTHUB_IMPORT_PERF_CREDS_VALUE }}' | dolt creds import
dolt clone import-perf/import-perf "$dolt_dir"
cd "$dolt_dir"
branch="${{ github.event.client_payload.commit_to_branch }}"
# checkout branch
if [ -z $(dolt sql -q "select 1 from dolt_branches where name = '$branch';") ]; then
dolt checkout -b $branch
else
dolt checkout $branch
fi
dolt sql -q "drop table if exists import_perf_results"
# load results
dolt sql < "$in"
# push results to dolthub
dolt add import_perf_results
dolt commit -m "CI commit"
dolt push -f origin $branch
# generate report
dolt sql -r csv < "$query" > "$out"
cat "$out"
echo "::set-output name=report_path::$out"
- name: Format HTML
id: html
if: ${{ github.event.client_payload.email_recipient }} != ""
run: |
gw="$GITHUB_WORKSPACE"
in="${{ steps.report.outputs.report_path }}"
out="$gw/results.html"
echo "<table>" > "$out"
print_header=true
while read line; do
if "$print_header"; then
echo " <tr><th>${line//,/</th><th>}</th></tr>" >> "$out"
print_header=false
continue
fi
echo " <tr><td>${line//,/</td><td>}</td></tr>" >> "$out"
done < "$in"
echo "</table>" >> "$out"
cat "$out"
echo "::set-output name=html::$(echo $out)"
- name: Configure AWS Credentials
if: ${{ github.event.client_payload.email_recipient }} != ""
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Send Email
uses: ./.github/actions/ses-email-action
if: ${{ github.event.client_payload.email_recipient }} != ""
with:
region: us-west-2
toAddresses: '["${{ github.event.client_payload.email_recipient }}"]'
version: ${{ steps.version.outputs.ref }}
format: '__DOLT__'
dataFile: ${{ steps.html.outputs.html }}
- name: Read CSV
if: ${{ github.event.client_payload.issue_id }} != ""
id: csv
uses: juliangruber/read-file-action@v1
with:
path: "${{ steps.report.outputs.report_path }}"
- name: Create MD
if: ${{ github.event.client_payload.issue_id }} != ""
uses: petems/csv-to-md-table-action@master
id: md
with:
csvinput: ${{ steps.csv.outputs.content }}
- uses: mshick/add-pr-comment@v2
if: ${{ github.event.client_payload.issue_id }} != ""
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue: ${{ github.event.client_payload.issue_id }}
message-failure: import benchmark failed
message-cancelled: import benchmark cancelled
allow-repeats: true
message: |
@${{ github.event.client_payload.actor }} __DOLT__
${{ steps.md.outputs.markdown-table }}
@@ -1,55 +0,0 @@
name: Benchmark Imports
on:
repository_dispatch:
types: [ benchmark-import ]
jobs:
performance:
runs-on: ubuntu-22.04
name: Benchmark Performance
strategy:
matrix:
dolt_fmt: [ "__LD_1__"]
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Create Import Benchmarking K8s Job
run: ./.github/scripts/import-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: ${{ github.event.client_payload.from_server }}
FROM_VERSION: ${{ github.event.client_payload.from_version }}
TO_SERVER: ${{ github.event.client_payload.to_server }}
TO_VERSION: ${{ github.event.client_payload.to_version }}
MODE: ${{ github.event.client_payload.mode }}
ISSUE_NUMBER: ${{ github.event.client_payload.issue_number }}
ACTOR: ${{ github.event.client_payload.actor }}
ACTOR_EMAIL: ${{ github.event.client_payload.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
INIT_BIG_REPO: ${{ github.event.client_payload.init_big_repo }}
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}
+3 -3
View File
@@ -13,8 +13,8 @@ jobs:
dolt_fmt: [ "__LD_1__", "__DOLT__" ]
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
uses: actions/checkout@v3
- uses: azure/setup-kubectl@v3.0
with:
version: 'v1.23.6'
- name: Install aws-iam-authenticator
@@ -24,7 +24,7 @@ jobs:
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+3 -3
View File
@@ -13,12 +13,12 @@ jobs:
runs-on: ubuntu-22.04
name: Run Fuzzer
steps:
- uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
- uses: actions/checkout@v3
- uses: azure/setup-kubectl@v3.0
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+3 -3
View File
@@ -12,12 +12,12 @@ jobs:
matrix:
dolt_fmt: [ "__LD_1__", "__DOLT__" ]
steps:
- uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
- uses: actions/checkout@v3
- uses: azure/setup-kubectl@v3.0
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -10,18 +10,26 @@ jobs:
runs-on: ubuntu-22.04
name: Trigger Benchmark Latency, Benchmark Import, and SQL Correctness K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
client-payload: '{"from_server": "mysql", "from_version": "8.0.28", "to_server": "dolt", "to_version": "${{ github.sha }}", "mode": "nightly", "actor": "${{ github.actor }}", "template_script": "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
client-payload: '{"to_version": "${{ github.sha }}", "mode": "nightly", "actor": "${{ github.actor }}", "actor_email": "max@dolthub.com", "template_script": "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
client-payload: '{"to_version": "${{ github.sha }}", "mode": "nightly", "actor": "${{ github.actor }}", "template_script": "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
client-payload: '{"to_server": "dolt", "to_version": "${{ github.sha }}", "from_server": "mysql", "from_version": "8.0.28", "mode": "nightly", "actor": "${{ github.actor }}", "actor_email": "vinai@dolthub.com", "template_script": "./.github/scripts/import-benchmarking/get-mysql-dolt-job-json.sh"}'
client-payload: |
{
"email_recipient": "${{ secrets.PERF_REPORTS_EMAIL_ADDRESS }}",
"version": "${{ github.sha }}",
"run_file": "ci.yaml",
"report": "three_way_compare.sql",
"commit_to_branch": "nightly",
"actor": "${{ github.actor }}"
}
@@ -26,9 +26,9 @@ jobs:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
echo "::set-output name=actor_email::$ACTOR_EMAIL"
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "actor=$ACTOR" >> $GITHUB_OUTPUT
echo "actor_email=$ACTOR_EMAIL" >> $GITHUB_OUTPUT
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
@@ -39,13 +39,21 @@ jobs:
needs: set-version-actor
name: Trigger Benchmark Latency and Benchmark Import K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
client-payload: '{"from_server": "mysql", "from_version": "8.0.28", "to_server": "dolt", "to_version": "${{ needs.set-version-actor.outputs.version }}", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"}'
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-import
client-payload: '{"to_server": "dolt", "to_version": "${{ needs.set-version-actor.outputs.version }}", "from_server": "mysql", "from_version": "8.0.28", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/import-benchmarking/get-mysql-dolt-job-json.sh"}'
client-payload: |
{
"email_recipient": "${{ secrets.PERF_REPORTS_EMAIL_ADDRESS }}",
"version": "${{ github.sha }}",
"run_file": "ci.yaml",
"report": "three_way_compare.sql",
"commit_to_branch": "main",
"actor": "${{ github.actor }}"
}
@@ -12,7 +12,7 @@ jobs:
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
@@ -39,7 +39,7 @@ jobs:
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "::set-output name=benchmark::true"
echo "benchmark=true" >> $GITHUB_OUTPUT
performance:
runs-on: ubuntu-22.04
@@ -47,17 +47,17 @@ jobs:
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Trigger Benchmark Latency K8s Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
- uses: dolthub/pull-request-comment-branch@v3
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Get pull number
uses: actions/github-script@v3
uses: actions/github-script@v6
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-latency
+4 -4
View File
@@ -11,9 +11,9 @@ jobs:
if: ${{ github.event.client_payload.issue_number != -1 }}
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -25,7 +25,7 @@ jobs:
KEY: ${{ github.event.client_payload.key }}
BUCKET: ${{ github.event.client_payload.bucket }}
- name: Post results to PR
uses: actions/github-script@v3
uses: actions/github-script@v6
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -37,7 +37,7 @@ jobs:
if (err) {
return console.log(err);
}
return github.issues.createComment({
return github.rest.issues.createComment({
issue_number,
owner,
repo,
+3 -3
View File
@@ -25,8 +25,8 @@ jobs:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "actor=$ACTOR" >> $GITHUB_OUTPUT
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
@@ -36,7 +36,7 @@ jobs:
needs: set-version-actor
name: Trigger SQL Correctness K8s Workflow
steps:
- uses: peter-evans/repository-dispatch@v1
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
+1
View File
@@ -6,6 +6,7 @@ venv
.DS_Store
.sqlhistory
.doltcfg
test.sh
# ignore cp'd sysbench runner testing files
+3 -3
View File
@@ -219,7 +219,7 @@ MySQL comes with a MySQL server called `mysqld` and a MySQL client called `mysql
mysql Ver 8.0.29 for macos12.2 on x86_64 (Homebrew)
```
Now, to connect the `mysql` client to Dolt, you have to force the MySQL client through the TCP interface by passing in a host and port. The default is the socket interface which Dolt supports, but is not on by default. The MySQL client also requires you specify a user, in this case `root`.
Now, to connect the `mysql` client to Dolt, you are going to force the MySQL client through the TCP interface by passing in a host and port. The default is the socket interface which Dolt supports, but is only available on `localhost`. So, it's better to show off the TCP interface. The MySQL client also requires you specify a user, in this case `root`.
```bash
% mysql --host 127.0.0.1 --port 3306 -uroot
@@ -584,10 +584,10 @@ mysql> select * from employees as of 'modifications';
5 rows in set (0.01 sec)
```
If I'd like to see the diff between the two branches, I can use the `dolt_diff()` table function. It takes the table name and two branches as arguments.
If I'd like to see the diff between the two branches, I can use the `dolt_diff()` table function. It takes two branches and the table name as arguments.
```
mysql> select * from dolt_diff('employees', 'main','modifications');
mysql> select * from dolt_diff('main', 'modifications', 'employees');
+--------------+---------------+-------+---------------+-------------------------+----------------+-----------------+---------+-------------+-------------------------+-----------+
| to_last_name | to_first_name | to_id | to_commit | to_commit_date | from_last_name | from_first_name | from_id | from_commit | from_commit_date | diff_type |
+--------------+---------------+-------+---------------+-------------------------+----------------+-----------------+---------+-------------+-------------------------+-----------+
+12
View File
@@ -0,0 +1,12 @@
# syntax=docker/dockerfile:1.3-labs
FROM --platform=$BUILDPLATFORM ubuntu:22.04
ARG DOLT_VERSION
ARG BUILDARCH
ADD https://github.com/dolthub/dolt/releases/download/v${DOLT_VERSION}/dolt-linux-${BUILDARCH}.tar.gz dolt-linux-${BUILDARCH}.tar.gz
RUN tar zxvf dolt-linux-${BUILDARCH}.tar.gz && \
cp dolt-linux-${BUILDARCH}/bin/dolt /usr/local/bin && \
rm -rf dolt-linux-${BUILDARCH} dolt-linux-${BUILDARCH}.tar.gz
ENTRYPOINT ["/usr/local/bin/dolt"]
+78
View File
@@ -0,0 +1,78 @@
# Dolt is Git for Data!
[Dolt](https://doltdb.com) is a SQL database that you can fork, clone, branch, merge, push
and pull just like a Git repository. Connect to Dolt just like any
MySQL database to run queries or update the data using SQL
commands. Use the command line interface to import CSV files, commit
your changes, push them to a remote, or merge your teammate's changes.
All the commands you know for Git work exactly the same for Dolt. Git
versions files, Dolt versions tables. It's like Git and MySQL had a
baby.
We also built [DoltHub](https://www.dolthub.com), a place to share
Dolt databases. We host public data for free. If you want to host
your own version of DoltHub, we have [DoltLab](https://www.doltlab.com). If you want us to run a Dolt server for you, we have [Hosted Dolt](https://hosted.doltdb.com).
[Join us on Discord](https://discord.com/invite/RFwfYpu) to say hi and
ask questions, or [check out our roadmap](https://docs.dolthub.com/other/roadmap)
to see what we're building next.
## What's it for?
Lots of things! Dolt is a generally useful tool with countless
applications. But if you want some ideas, [here's how people are using
it so far](https://www.dolthub.com/blog/2022-07-11-dolt-case-studies/).
Learn more about Dolt use cases, configuration and guides to use dolt on our [documentation page](https://docs.dolthub.com/introduction/what-is-dolt).
# How to use this image
This image is for Dolt CLI, which has the same commands as `git`, with some extras. Running this image without any
arguments is equivalent to running `dolt` command locally.
```shell
$ docker run dolthub/dolt:latest
Valid commands for dolt are
init - Create an empty Dolt data repository.
status - Show the working tree status.
add - Add table changes to the list of staged table changes.
diff - Diff a table.
reset - Remove table changes from the list of staged table changes.
clean - Remove untracked tables from working set.
commit - Record changes to the repository.
sql - Run a SQL query against tables in repository.
sql-server - Start a MySQL-compatible server.
sql-client - Starts a built-in MySQL client.
log - Show commit logs.
branch - Create, list, edit, delete branches.
checkout - Checkout a branch or overwrite a table from HEAD.
merge - Merge a branch.
conflicts - Commands for viewing and resolving merge conflicts.
cherry-pick - Apply the changes introduced by an existing commit.
revert - Undo the changes introduced in a commit.
clone - Clone from a remote data repository.
fetch - Update the database from a remote data repository.
pull - Fetch from a dolt remote data repository and merge.
push - Push to a dolt remote.
config - Dolt configuration.
remote - Manage set of tracked repositories.
backup - Manage a set of server backups.
login - Login to a dolt remote host.
creds - Commands for managing credentials.
ls - List tables in the working set.
schema - Commands for showing and importing table schemas.
table - Commands for copying, renaming, deleting, and exporting tables.
tag - Create, list, delete tags.
blame - Show what revision and author last modified each row of a table.
constraints - Commands for handling constraints.
migrate - Executes a database migration to use the latest Dolt data format.
read-tables - Fetch table(s) at a specific commit into a new dolt repo
gc - Cleans up unreferenced data from the repository.
filter-branch - Edits the commit history using the provided query.
merge-base - Find the common ancestor of two commits.
version - Displays the current Dolt cli version.
dump - Export all tables in the working set into a file.
```
This image is useful for creating custom Docker Image using this image as base image.
+161
View File
@@ -0,0 +1,161 @@
#!/bin/bash
set -eo pipefail
# logging functions
mysql_log() {
local type="$1"; shift
# accept argument string or stdin
local text="$*"; if [ "$#" -eq 0 ]; then text="$(cat)"; fi
local dt; dt="$(date --rfc-3339=seconds)"
printf '%s [%s] [Entrypoint]: %s\n' "$dt" "$type" "$text"
}
mysql_note() {
mysql_log Note "$@"
}
mysql_warn() {
mysql_log Warn "$@" >&2
}
mysql_error() {
mysql_log ERROR "$@" >&2
exit 1
}
CONTAINER_DATA_DIR="/var/lib/dolt"
DOLT_CONFIG_DIR="/etc/dolt/doltcfg.d"
SERVER_CONFIG_DIR="/etc/dolt/servercfg.d"
DOLT_ROOT_PATH="/.dolt"
# create all dirs in path
_create_dir() {
local path="$1"
mkdir -p "$path"
}
check_for_dolt() {
local dolt_bin=$(which dolt)
if [ ! -x "$dolt_bin" ]; then
mysql_error "dolt binary executable not found"
fi
}
# check arguments for an option that would cause mysqld to stop
# return true if there is one
_mysql_want_help() {
local arg
for arg; do
case "$arg" in
-'?'|-h|--help)
return 0
;;
esac
done
return 1
}
# arg $1 is the directory to search in
# arg $2 is the type file to search for
get_config_file_path_if_exists() {
CONFIG_PROVIDED=
CONFIG_DIR=$1
FILE_TYPE=$2
if [ -d "$CONFIG_DIR" ]; then
mysql_note "Checking for config provided in $CONFIG_DIR"
number_of_files_found=( `find .$CONFIG_DIR -type f -name "*.$FILE_TYPE" | wc -l` )
if [ $number_of_files_found -gt 1 ]; then
CONFIG_PROVIDED=
mysql_warn "multiple config file found in $CONFIG_DIR, using default config"
elif [ $number_of_files_found -eq 1 ]; then
files_found=( `ls $CONFIG_DIR/*$FILE_TYPE` )
mysql_note "$files_found file is found"
CONFIG_PROVIDED=$files_found
else
CONFIG_PROVIDED=
fi
fi
}
# taken from https://github.com/docker-library/mysql/blob/master/8.0/docker-entrypoint.sh
# this function will run files found in /docker-entrypoint-initdb.d directory AFTER server is started
# usage: docker_process_init_files [file [file [...]]]
# ie: docker_process_init_files /always-initdb.d/*
# process initializer files, based on file extensions
docker_process_init_files() {
echo
local f
for f; do
case "$f" in
*.sh)
# https://github.com/docker-library/postgres/issues/450#issuecomment-393167936
# https://github.com/docker-library/postgres/pull/452
if [ -x "$f" ]; then
mysql_note "$0: running $f"
"$f"
else
mysql_note "$0: sourcing $f"
. "$f"
fi
;;
*.sql) mysql_note "$0: running $f"; docker_process_sql < "$f"; echo ;;
*.sql.bz2) mysql_note "$0: running $f"; bunzip2 -c "$f" | docker_process_sql; echo ;;
*.sql.gz) mysql_note "$0: running $f"; gunzip -c "$f" | docker_process_sql; echo ;;
*.sql.xz) mysql_note "$0: running $f"; xzcat "$f" | docker_process_sql; echo ;;
*.sql.zst) mysql_note "$0: running $f"; zstd -dc "$f" | docker_process_sql; echo ;;
*) mysql_warn "$0: ignoring $f" ;;
esac
echo
done
}
start_server() {
# start the server in fixed data directory at /var/lib/dolt
cd $CONTAINER_DATA_DIR
"$@"
}
# if there is config file provided through /etc/dolt/doltcfg.d,
# we overwrite $HOME/.dolt/config_global.json file with this file.
set_dolt_config_if_defined() {
get_config_file_path_if_exists "$DOLT_CONFIG_DIR" "json"
if [ ! -z $CONFIG_PROVIDED ]; then
/bin/cp -rf $CONFIG_PROVIDED $HOME/$DOLT_ROOT_PATH/config_global.json
fi
}
_main() {
# check for dolt binary executable
check_for_dolt
if [ "${1:0:1}" = '-' ]; then
# if there is any command line argument defined we use
# them with default command `dolt sql-server --host=0.0.0.0 --port=3306`
# why we use fixed host=0.0.0.0 and port=3306 in README
set -- dolt sql-server --host=0.0.0.0 --port=3306 "$@"
fi
if [ "$1" = 'dolt' ] && [ "$2" = 'sql-server' ] && ! _mysql_want_help "$@"; then
local dolt_version=$(dolt version | grep 'dolt version' | cut -f3 -d " ")
mysql_note "Entrypoint script for Dolt Server $dolt_version starting."
declare -g CONFIG_PROVIDED
# dolt config will be set if user provided a single json file in /etc/dolt/doltcfg.d directory.
# It will overwrite config_global.json file in $HOME/.dolt
set_dolt_config_if_defined
# if there is a single yaml provided in /etc/dolt/servercfg.d directory,
# it will be used to start the server with --config flag
get_config_file_path_if_exists "$SERVER_CONFIG_DIR" "yaml"
if [ ! -z $CONFIG_PROVIDED ]; then
set -- "$@" --config=$CONFIG_PROVIDED
fi
start_server
# run any file provided in /docker-entrypoint-initdb.d directory after the server starts
docker_process_init_files /docker-entrypoint-initdb.d/*
mysql_note "Dolt Server $dolt_version is started."
fi
exec "$@"
}
_main "$@"
+23
View File
@@ -0,0 +1,23 @@
# syntax=docker/dockerfile:1.3-labs
FROM --platform=$BUILDPLATFORM ubuntu:22.04 as builder
ARG DOLT_VERSION
ARG BUILDARCH
ADD https://github.com/dolthub/dolt/releases/download/v${DOLT_VERSION}/dolt-linux-${BUILDARCH}.tar.gz dolt-linux-${BUILDARCH}.tar.gz
RUN tar zxvf dolt-linux-${BUILDARCH}.tar.gz && \
cp dolt-linux-${BUILDARCH}/bin/dolt /usr/local/bin && \
rm -rf dolt-linux-${BUILDARCH} dolt-linux-${BUILDARCH}.tar.gz
FROM --platform=$BUILDPLATFORM builder
RUN mkdir /docker-entrypoint-initdb.d
VOLUME /var/lib/dolt
COPY docker/docker-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]
EXPOSE 3306 33060
CMD [ "dolt", "sql-server", "--host=0.0.0.0" , "--port=3306" ]
+139
View File
@@ -0,0 +1,139 @@
# Dolt is Git for Data!
[Dolt](https://doltdb.com) is a SQL database that you can fork, clone, branch, merge, push
and pull just like a Git repository. Connect to Dolt just like any
MySQL database to run queries or update the data using SQL
commands. Use the command line interface to import CSV files, commit
your changes, push them to a remote, or merge your teammate's changes.
All the commands you know for Git work exactly the same for Dolt. Git
versions files, Dolt versions tables. It's like Git and MySQL had a
baby.
We also built [DoltHub](https://www.dolthub.com), a place to share
Dolt databases. We host public data for free. If you want to host
your own version of DoltHub, we have [DoltLab](https://www.doltlab.com). If you want us to run a Dolt server for you, we have [Hosted Dolt](https://hosted.doltdb.com).
[Join us on Discord](https://discord.com/invite/RFwfYpu) to say hi and
ask questions, or [check out our roadmap](https://docs.dolthub.com/other/roadmap)
to see what we're building next.
## What's it for?
Lots of things! Dolt is a generally useful tool with countless
applications. But if you want some ideas, [here's how people are using
it so far](https://www.dolthub.com/blog/2022-07-11-dolt-case-studies/).
# Dolt CLI
The `dolt` CLI has the same commands as `git`, with some extras.
```
$ dolt
Valid commands for dolt are
init - Create an empty Dolt data repository.
status - Show the working tree status.
add - Add table changes to the list of staged table changes.
diff - Diff a table.
reset - Remove table changes from the list of staged table changes.
clean - Remove untracked tables from working set.
commit - Record changes to the repository.
sql - Run a SQL query against tables in repository.
sql-server - Start a MySQL-compatible server.
sql-client - Starts a built-in MySQL client.
log - Show commit logs.
branch - Create, list, edit, delete branches.
checkout - Checkout a branch or overwrite a table from HEAD.
merge - Merge a branch.
conflicts - Commands for viewing and resolving merge conflicts.
cherry-pick - Apply the changes introduced by an existing commit.
revert - Undo the changes introduced in a commit.
clone - Clone from a remote data repository.
fetch - Update the database from a remote data repository.
pull - Fetch from a dolt remote data repository and merge.
push - Push to a dolt remote.
config - Dolt configuration.
remote - Manage set of tracked repositories.
backup - Manage a set of server backups.
login - Login to a dolt remote host.
creds - Commands for managing credentials.
ls - List tables in the working set.
schema - Commands for showing and importing table schemas.
table - Commands for copying, renaming, deleting, and exporting tables.
tag - Create, list, delete tags.
blame - Show what revision and author last modified each row of a table.
constraints - Commands for handling constraints.
migrate - Executes a database migration to use the latest Dolt data format.
read-tables - Fetch table(s) at a specific commit into a new dolt repo
gc - Cleans up unreferenced data from the repository.
filter-branch - Edits the commit history using the provided query.
merge-base - Find the common ancestor of two commits.
version - Displays the current Dolt cli version.
dump - Export all tables in the working set into a file.
```
Learn more about Dolt use cases, configuration and guides to use dolt on our [documentation page](https://docs.dolthub.com/introduction/what-is-dolt).
# How to use this image
This image is for Dolt SQL Server, which is similar to MySQL Docker Image. Running this image without any arguments
is equivalent to running `dolt sql-server --host 0.0.0.0 --port 3306` command locally. The reason for persisted host
and port is that it allows user to connect to the server inside the container from the local host system through
port-mapping.
To check out supported options for `dolt sql-server`, you can run the image with `--help` flag.
```shell
$ docker run dolthub/dolt-sql-server:latest --help
```
### Connect to the server in the container from the host system
To be able to connect to the server running in the container, we need to set up a port to connect to locally that
maps to the port in the container. The host is set to `0.0.0.0` for accepting connections to any available network
interface.
```shell
$ docker run -p 3307:3306 dolthub/dolt-sql-server:latest
```
Now, you have a running server in the container, and we can connect to it by specifying our host, 3307 for the port, and root for the user,
since that's the default user and we didn't provide any configuration when running the server.
For example, you can run mysql client to connect to the server like this:
```shell
$ mysql --host 0.0.0.0 -P 3307 -u root
```
### Define configuration for the server
You can either define server configuration as commandline arguments, or you can use yaml configuration file.
For the commandline argument definition you can simply define arguments after whole docker command.
```shell
$ docker run -p 3307:3306 dolthub/dolt-sql-server:latest -l debug --no-auto-commit
```
Or, we can mount a local directory to specific directories in the container.
The special directory for server configuration is `/etc/dolt/servercfg.d/`. You can only have one `.yaml` configuration
file in this directory. If there are multiple, the default configuration will be used. If the location of
configuration file was `/Users/jennifer/docker/server/config.yaml`, this is how to use `-v` flag which mounts
`/Users/jennifer/docker/server/` local directory to `/etc/dolt/servercfg.d/` directory in the container.
```shell
$ docker run -p 3307:3306 -v /Users/jennifer/docker/server/:/etc/dolt/servercfg.d/ dolthub/dolt-sql-server:latest
```
The Dolt configuration and data directories can be configured similarly:
- The dolt configuration directory is `/etc/dolt/doltcfg.d/`
There should be one `.json` dolt configuration file. It will replace the global dolt configuration file in the
container.
- We set the location of where data to be stored to default location at `/var/lib/dolt/` in the container.
The data directory does not need to be defined in server configuration for container, but to store the data
on the host system, it can also be mounted to this default location.
```shell
$ docker run -p 3307:3306 -v /Users/jennifer/docker/databases/:/var/lib/dolt/ dolthub/dolt-sql-server:latest
```
+74 -793
View File
@@ -868,33 +868,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= git.sr.ht/~sbinet/gg licensed under: =
Copyright (C) 2022 The gg Authors
Copyright (C) 2016 Michael Fogleman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
= LICENSE.md 6ce9a5e176cfbd7286201308cde581cd0074888742a82ceffeceacee =
================================================================================
================================================================================
= github.com/HdrHistogram/hdrhistogram-go licensed under: =
@@ -953,230 +926,23 @@ SOFTWARE.
================================================================================
================================================================================
= github.com/ajstarks/svgo licensed under: =
= github.com/aliyun/aliyun-oss-go-sdk licensed under: =
Creative Commons Attribution 4.0 International Public License
Copyright (c) 2015 aliyun.com
By exercising the Licensed Rights (defined below), You accept and agree to
be bound by the terms and conditions of this Creative Commons Attribution
4.0 International Public License ("Public License"). To the extent this
Public License may be interpreted as a contract, You are granted the
Licensed Rights in consideration of Your acceptance of these terms and
conditions, and the Licensor grants You such rights in consideration
of benefits the Licensor receives from making the Licensed Material
available under these terms and conditions.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
Section 1 Definitions.
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
Adapted Material means material subject to Copyright and Similar Rights
that is derived from or based upon the Licensed Material and in which
the Licensed Material is translated, altered, arranged, transformed, or
otherwise modified in a manner requiring permission under the Copyright
and Similar Rights held by the Licensor. For purposes of this Public
License, where the Licensed Material is a musical work, performance,
or sound recording, Adapted Material is always produced where the
Licensed Material is synched in timed relation with a moving image.
Adapter's License means the license You apply to Your Copyright and
Similar Rights in Your contributions to Adapted Material in accordance
with the terms and conditions of this Public License. Copyright and
Similar Rights means copyright and/or similar rights closely related to
copyright including, without limitation, performance, broadcast, sound
recording, and Sui Generis Database Rights, without regard to how the
rights are labeled or categorized. For purposes of this Public License,
the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights. Effective Technological Measures means those measures that,
in the absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright Treaty
adopted on December 20, 1996, and/or similar international agreements.
Exceptions and Limitations means fair use, fair dealing, and/or any other
exception or limitation to Copyright and Similar Rights that applies to
Your use of the Licensed Material. Licensed Material means the artistic
or literary work, database, or other material to which the Licensor
applied this Public License. Licensed Rights means the rights granted
to You subject to the terms and conditions of this Public License, which
are limited to all Copyright and Similar Rights that apply to Your use
of the Licensed Material and that the Licensor has authority to license.
Licensor means the individual(s) or entity(ies) granting rights under
this Public License. Share means to provide material to the public by
any means or process that requires permission under the Licensed Rights,
such as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the public
may access the material from a place and at a time individually chosen
by them. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of the
Council of 11 March 1996 on the legal protection of databases, as amended
and/or succeeded, as well as other essentially equivalent rights anywhere
in the world. You means the individual or entity exercising the Licensed
Rights under this Public License. Your has a corresponding meaning.
Section 2 Scope.
License grant. Subject to the terms and conditions of this Public
License, the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to exercise the
Licensed Rights in the Licensed Material to: reproduce and Share the
Licensed Material, in whole or in part; and produce, reproduce, and
Share Adapted Material. Exceptions and Limitations. For the avoidance
of doubt, where Exceptions and Limitations apply to Your use, this
Public License does not apply, and You do not need to comply with
its terms and conditions. Term. The term of this Public License is
specified in Section 6(a). Media and formats; technical modifications
allowed. The Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created, and to make
technical modifications necessary to do so. The Licensor waives and/or
agrees not to assert any right or authority to forbid You from making
technical modifications necessary to exercise the Licensed Rights,
including technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License, simply making
modifications authorized by this Section 2(a)(4) never produces Adapted
Material. Downstream recipients. Offer from the Licensor Licensed
Material. Every recipient of the Licensed Material automatically receives
an offer from the Licensor to exercise the Licensed Rights under the terms
and conditions of this Public License. No downstream restrictions. You
may not offer or impose any additional or different terms or conditions
on, or apply any Effective Technological Measures to, the Licensed
Material if doing so restricts exercise of the Licensed Rights by any
recipient of the Licensed Material. No endorsement. Nothing in this
Public License constitutes or may be construed as permission to assert
or imply that You are, or that Your use of the Licensed Material is,
connected with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as provided in
Section 3(a)(1)(A)(i). Other rights.
Moral rights, such as the right of integrity, are not licensed under
this Public License, nor are publicity, privacy, and/or other similar
personality rights; however, to the extent possible, the Licensor waives
and/or agrees not to assert any such rights held by the Licensor to the
limited extent necessary to allow You to exercise the Licensed Rights, but
not otherwise. Patent and trademark rights are not licensed under this
Public License. To the extent possible, the Licensor waives any right
to collect royalties from You for the exercise of the Licensed Rights,
whether directly or through a collecting society under any voluntary or
waivable statutory or compulsory licensing scheme. In all other cases
the Licensor expressly reserves any right to collect such royalties.
Section 3 License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
Attribution.
If You Share the Licensed Material (including in modified form), You must:
retain the following if it is supplied by the Licensor with the Licensed
Material: identification of the creator(s) of the Licensed Material and
any others designated to receive attribution, in any reasonable manner
requested by the Licensor (including by pseudonym if designated); a
copyright notice; a notice that refers to this Public License; a notice
that refers to the disclaimer of warranties; a URI or hyperlink to the
Licensed Material to the extent reasonably practicable; indicate if You
modified the Licensed Material and retain an indication of any previous
modifications; and indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or hyperlink to,
this Public License. You may satisfy the conditions in Section 3(a)(1)
in any reasonable manner based on the medium, means, and context in which
You Share the Licensed Material. For example, it may be reasonable to
satisfy the conditions by providing a URI or hyperlink to a resource
that includes the required information. If requested by the Licensor,
You must remove any of the information required by Section 3(a)(1)(A)
to the extent reasonably practicable. If You Share Adapted Material You
produce, the Adapter's License You apply must not prevent recipients of
the Adapted Material from complying with this Public License. Section 4
Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that apply
to Your use of the Licensed Material:
for the avoidance of doubt, Section 2(a)(1) grants You the right to
extract, reuse, reproduce, and Share all or a substantial portion of the
contents of the database; if You include all or a substantial portion of
the database contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database Rights
(but not its individual contents) is Adapted Material; and You must comply
with the conditions in Section 3(a) if You Share all or a substantial
portion of the contents of the database. For the avoidance of doubt,
this Section 4 supplements and does not replace Your obligations under
this Public License where the Licensed Rights include other Copyright and
Similar Rights. Section 5 Disclaimer of Warranties and Limitation
of Liability.
Unless otherwise separately undertaken by the Licensor, to the
extent possible, the Licensor offers the Licensed Material as-is and
as-available, and makes no representations or warranties of any kind
concerning the Licensed Material, whether express, implied, statutory,
or other. This includes, without limitation, warranties of title,
merchantability, fitness for a particular purpose, non-infringement,
absence of latent or other defects, accuracy, or the presence or absence
of errors, whether or not known or discoverable. Where disclaimers of
warranties are not allowed in full or in part, this disclaimer may not
apply to You. To the extent possible, in no event will the Licensor
be liable to You on any legal theory (including, without limitation,
negligence) or otherwise for any direct, special, indirect, incidental,
consequential, punitive, exemplary, or other losses, costs, expenses,
or damages arising out of this Public License or use of the Licensed
Material, even if the Licensor has been advised of the possibility of
such losses, costs, expenses, or damages. Where a limitation of liability
is not allowed in full or in part, this limitation may not apply to You.
The disclaimer of warranties and limitation of liability provided above
shall be interpreted in a manner that, to the extent possible, most
closely approximates an absolute disclaimer and waiver of all liability.
Section 6 Term and Termination.
This Public License applies for the term of the Copyright and Similar
Rights licensed here. However, if You fail to comply with this
Public License, then Your rights under this Public License terminate
automatically. Where Your right to use the Licensed Material has
terminated under Section 6(a), it reinstates:
automatically as of the date the violation is cured, provided it is
cured within 30 days of Your discovery of the violation; or upon express
reinstatement by the Licensor. For the avoidance of doubt, this Section
6(b) does not affect any right the Licensor may have to seek remedies
for Your violations of this Public License. For the avoidance of doubt,
the Licensor may also offer the Licensed Material under separate terms
or conditions or stop distributing the Licensed Material at any time;
however, doing so will not terminate this Public License. Sections 1,
5, 6, 7, and 8 survive termination of this Public License. Section 7
Other Terms and Conditions.
The Licensor shall not be bound by any additional or different terms or
conditions communicated by You unless expressly agreed. Any arrangements,
understandings, or agreements regarding the Licensed Material not stated
herein are separate from and independent of the terms and conditions of
this Public License. Section 8 Interpretation.
For the avoidance of doubt, this Public License does not, and shall not be
interpreted to, reduce, limit, restrict, or impose conditions on any use
of the Licensed Material that could lawfully be made without permission
under this Public License. To the extent possible, if any provision of
this Public License is deemed unenforceable, it shall be automatically
reformed to the minimum extent necessary to make it enforceable. If
the provision cannot be reformed, it shall be severed from this Public
License without affecting the enforceability of the remaining terms
and conditions. No term or condition of this Public License will be
waived and no failure to comply consented to unless expressly agreed
to by the Licensor. Nothing in this Public License constitutes or may
be interpreted as a limitation upon, or waiver of, any privileges and
immunities that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority. Creative Commons is not
a party to its public licenses. Notwithstanding, Creative Commons may
elect to apply one of its public licenses to material it publishes and
in those instances will be considered the “Licensor.” The text of
the Creative Commons public licenses is dedicated to the public domain
under the CC0 Public Domain Dedication. Except for the limited purpose of
indicating that material is shared under a Creative Commons public license
or as otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark “Creative Commons” or any other trademark or
logo of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements, understandings,
or agreements concerning use of licensed material. For the avoidance of
doubt, this paragraph does not form part of the public licenses.
Creative Commons may be contacted at creativecommons.org.
= LICENSE 6970290672b172c6ac6ce9de62f8fd574d48de35e68b48af45dc4edc =
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= LICENSE e38acb655cae71b0deffe4354c611894b747c461da744bbb5f3bc54f =
================================================================================
================================================================================
@@ -1786,7 +1552,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
================================================================================
================================================================================
= github.com/cenkalti/backoff licensed under: =
= github.com/cenkalti/backoff/v4 licensed under: =
The MIT License (MIT)
@@ -1870,28 +1636,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= LICENSE.txt 726f1b8f64f7e439b1b12c7cbde7b1427752a00ddea15019e4156465 =
================================================================================
================================================================================
= github.com/davecgh/go-spew licensed under: =
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
= LICENSE 1df7eb862ea59e064cc5b27e5d88aad979fad02e3755973892829af8 =
================================================================================
================================================================================
= github.com/denisbrodbeck/machineid licensed under: =
@@ -2593,38 +2337,6 @@ SOFTWARE.
= LICENSE a33ad37999b0aa5d38b8bc56a9c6b2d6287a7e2478ee822af7fa7a11 =
================================================================================
================================================================================
= github.com/dolthub/mmap-go licensed under: =
Copyright (c) 2011, Evan Shaw <edsrzf@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 086af8ff5be785cbd4da914acec46f45197c2b0fd3b370cd140cedd3 =
================================================================================
================================================================================
= github.com/dolthub/vitess licensed under: =
@@ -3098,36 +2810,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= COPYING 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= github.com/go-fonts/liberation licensed under: =
Copyright ©2020 The go-fonts Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the go-fonts project nor the names of its authors and
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ec29fd02e2b1520da7df79262d6a977cb3986924353ae24b61affe9b =
================================================================================
================================================================================
= github.com/go-kit/kit licensed under: =
@@ -3157,36 +2839,6 @@ SOFTWARE.
= LICENSE 517fd017ba968d4bdbe3905b55314df7ea5e83d9d7422365dcee5566 =
================================================================================
================================================================================
= github.com/go-latex/latex licensed under: =
Copyright ©2020 The go-latex Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the go-latex project nor the names of its authors and
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 23be5c2f34988d946e85585e1099e63ff0962030b9612f582a793cda =
================================================================================
================================================================================
= github.com/go-logr/logr licensed under: =
@@ -3603,35 +3255,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 41cbff0d41b7d20dd9d70de1e0380fdca6ec1f42d2533c75c5c1bec3 =
================================================================================
================================================================================
= github.com/go-pdf/fpdf licensed under: =
MIT License
Copyright (c) 2020 David Barnes
Copyright (c) 2017 Kurt Jung and contributors acknowledged in the documentation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
= LICENSE 58283e72fa52eb34341bbd16492272bca575265e4b3a0c769f3aa978 =
================================================================================
================================================================================
= github.com/go-sql-driver/mysql licensed under: =
@@ -4039,25 +3662,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= LICENSE 58527ba2d199f39270f67bbe7b7878b370704ff4f3b715a9f7fa667b =
================================================================================
================================================================================
= github.com/golang/freetype licensed under: =
Use of the Freetype-Go software is subject to your choice of exactly one of
the following two licenses:
* The FreeType License, which is similar to the original BSD license with
an advertising clause, or
* The GNU General Public License (GPL), version 2 or later.
The text of these licenses are available in the licenses/ftl.txt and the
licenses/gpl.txt files respectively. They are also available at
http://freetype.sourceforge.net/license.html
The Luxi fonts in the testdata directory are licensed separately. See the
testdata/COPYING file for details.
= LICENSE 6c5ae159496bacd951e6cd937d5e6427b172a2a6284bfdf1954ae338 =
================================================================================
================================================================================
= github.com/golang/groupcache licensed under: =
@@ -4325,40 +3929,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 23da488bf4950b37bebc2eaa6de2a09e2301a6f4ed5ae2cd648aad9d =
================================================================================
================================================================================
= github.com/google/go-cmp licensed under: =
Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE bda64ae869be18b50125d9cfe5c370eb7248e84a2324823e4d7f2295 =
================================================================================
================================================================================
= github.com/google/uuid licensed under: =
@@ -5446,215 +5016,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 6681c42f6974591d2056518a26201323fa7d42bdc4d64bfc12c332b3 =
================================================================================
================================================================================
= github.com/pquerna/cachecontrol licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= github.com/prometheus/client_golang licensed under: =
@@ -6661,34 +6022,6 @@ OTHER DEALINGS IN THE SOFTWARE.
= LICENSE 470f204648dd700d3c0229df525a9607693e6a3d9fd6422fe0212c62 =
================================================================================
================================================================================
= github.com/stretchr/testify licensed under: =
MIT License
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
= LICENSE 07f20b96549b71d39ebb2bf1e006f7b2885e3808423818000545119c =
================================================================================
================================================================================
= github.com/tealeg/xlsx licensed under: =
@@ -8354,40 +7687,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ed6066ae50f153e2965216c6d4b9335900f1f8b2b526527f49a619d7 =
================================================================================
================================================================================
= golang.org/x/image licensed under: =
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ed6066ae50f153e2965216c6d4b9335900f1f8b2b526527f49a619d7 =
================================================================================
================================================================================
= golang.org/x/net licensed under: =
@@ -8559,32 +7858,71 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= gonum.org/v1/plot licensed under: =
= golang.org/x/time licensed under: =
Copyright ©2013 The Gonum Authors. All rights reserved.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Gonum project nor the names of its authors and
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
modification, are permitted provided that the following conditions are
met:
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 318191149295a8b7a8dbac1ae8fb814d11997919e0484a5a995cb6ae =
= LICENSE ed6066ae50f153e2965216c6d4b9335900f1f8b2b526527f49a619d7 =
================================================================================
================================================================================
= golang.org/x/tools licensed under: =
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ed6066ae50f153e2965216c6d4b9335900f1f8b2b526527f49a619d7 =
================================================================================
================================================================================
@@ -9697,60 +9035,3 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 9820a37ca0fcacbc82c8eb2bdd3049706550a4ebf97ad7fde1310dec =
================================================================================
================================================================================
= gopkg.in/yaml.v3 licensed under: =
This project is covered by two different licenses: MIT and Apache.
#### MIT License ####
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original MIT license, with the additional
copyright staring in 2011 when the project was ported over:
apic.go emitterc.go parserc.go readerc.go scannerc.go
writerc.go yamlh.go yamlprivateh.go
Copyright (c) 2006-2010 Kirill Simonov
Copyright (c) 2006-2011 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
### Apache License ###
All the remaining project files are covered by the Apache license:
Copyright (c) 2011-2019 Canonical Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= LICENSE 1fcda9aa5c036a1d3975c8c4a007e1b3c05f0e450567d8bdb46a6d61 =
================================================================================
+56 -4
View File
@@ -100,10 +100,18 @@ const (
BranchParam = "branch"
TrackFlag = "track"
AmendFlag = "amend"
NewFormatFlag = "new-format"
CommitFlag = "commit"
NoCommitFlag = "no-commit"
NoEditFlag = "no-edit"
OursFlag = "ours"
TheirsFlag = "theirs"
NumberFlag = "number"
NotFlag = "not"
MergesFlag = "merges"
ParentsFlag = "parents"
MinParentsFlag = "min-parents"
DecorateFlag = "decorate"
OneLineFlag = "oneline"
)
const (
@@ -134,6 +142,13 @@ func CreateCommitArgParser() *argparser.ArgParser {
return ap
}
func CreateConflictsResolveArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(OursFlag, "", "For all conflicts, take the version from our branch and resolve the conflict")
ap.SupportsFlag(TheirsFlag, "", "For all conflicts, take the version from their branch and resolve the conflict")
return ap
}
func CreateMergeArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(NoFFParam, "", "Create a merge commit even when the merge resolves as a fast-forward.")
@@ -170,6 +185,8 @@ func CreateCloneArgParser() *argparser.ArgParser {
ap.SupportsValidatedString(dbfactory.AWSCredsTypeParam, "", "creds-type", "", argparser.ValidatorFromStrList(dbfactory.AWSCredsTypeParam, dbfactory.AWSCredTypes))
ap.SupportsString(dbfactory.AWSCredsFileParam, "", "file", "AWS credentials file.")
ap.SupportsString(dbfactory.AWSCredsProfile, "", "profile", "AWS profile to use.")
ap.SupportsString(dbfactory.OSSCredsFileParam, "", "file", "OSS credentials file.")
ap.SupportsString(dbfactory.OSSCredsProfile, "", "profile", "OSS profile to use.")
return ap
}
@@ -277,18 +294,33 @@ func CreateVerifyConstraintsArgParser() *argparser.ArgParser {
return ap
}
func CreateLogArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsInt(NumberFlag, "n", "num_commits", "Limit the number of commits to output.")
ap.SupportsInt(MinParentsFlag, "", "parent_count", "The minimum number of parents a commit must have to be included in the log.")
ap.SupportsFlag(MergesFlag, "", "Equivalent to min-parents == 2, this will limit the log to commits with 2 or more parents.")
ap.SupportsFlag(ParentsFlag, "", "Shows all parents of each commit in the log.")
ap.SupportsString(DecorateFlag, "", "decorate_fmt", "Shows refs next to commits. Valid options are short, full, no, and auto")
ap.SupportsFlag(OneLineFlag, "", "Shows logs in a compact format.")
ap.SupportsStringList(NotFlag, "", "revision", "Excludes commits from revision.")
return ap
}
var awsParams = []string{dbfactory.AWSRegionParam, dbfactory.AWSCredsTypeParam, dbfactory.AWSCredsFileParam, dbfactory.AWSCredsProfile}
var ossParams = []string{dbfactory.OSSCredsFileParam, dbfactory.OSSCredsProfile}
func ProcessBackupArgs(apr *argparser.ArgParseResults, scheme, backupUrl string) (map[string]string, error) {
params := map[string]string{}
var err error
if scheme == dbfactory.AWSScheme {
switch scheme {
case dbfactory.AWSScheme:
err = AddAWSParams(backupUrl, apr, params)
} else {
case dbfactory.OSSScheme:
err = AddOSSParams(backupUrl, apr, params)
default:
err = VerifyNoAwsParams(apr)
}
return params, err
}
@@ -312,6 +344,26 @@ func AddAWSParams(remoteUrl string, apr *argparser.ArgParseResults, params map[s
return nil
}
func AddOSSParams(remoteUrl string, apr *argparser.ArgParseResults, params map[string]string) error {
isOSS := strings.HasPrefix(remoteUrl, "oss")
if !isOSS {
for _, p := range ossParams {
if _, ok := apr.GetValue(p); ok {
return fmt.Errorf("%s param is only valid for oss cloud remotes in the format oss://oss-bucket/database", p)
}
}
}
for _, p := range ossParams {
if val, ok := apr.GetValue(p); ok {
params[p] = val
}
}
return nil
}
func VerifyNoAwsParams(apr *argparser.ArgParseResults) error {
if awsParams := apr.GetValues(awsParams...); len(awsParams) > 0 {
awsParamKeys := make([]string, 0, len(awsParams))
-2
View File
@@ -82,12 +82,10 @@ func (cmdDoc CommandDocumentation) CmdDocToMd() (string, error) {
}
templ, templErr := template.New("shortDesc").Parse(cmdMdDocTempl)
if templErr != nil {
return "", templErr
}
var templBuffer bytes.Buffer
if err := templ.Execute(&templBuffer, cmdMdDoc); err != nil {
return "", err
}
ret := strings.Replace(templBuffer.String(), "HEAD~", "HEAD\\~", -1)
+2 -2
View File
@@ -19,8 +19,6 @@ import (
"encoding/json"
"strings"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/store/datas/pull"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
@@ -28,7 +26,9 @@ import (
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/store/datas/pull"
)
var backupDocs = cli.CommandDocumentationContent{
+2 -9
View File
@@ -19,6 +19,8 @@ import (
"path"
"strings"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
@@ -26,11 +28,9 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/remotestorage"
"github.com/dolthub/dolt/go/libraries/events"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/earl"
"github.com/dolthub/dolt/go/store/types"
)
var cloneDocs = cli.CommandDocumentationContent{
@@ -203,15 +203,8 @@ func createRemote(ctx context.Context, remoteName, remoteUrl string, params map[
r := env.NewRemote(remoteName, remoteUrl, params)
ddb, err := r.GetRemoteDB(ctx, types.Format_Default, dEnv)
if err != nil {
bdr := errhand.BuildDError("error: failed to get remote db").AddCause(err)
if err == remotestorage.ErrInvalidDoltSpecPath {
urlObj, _ := earl.Parse(remoteUrl)
bdr.AddDetails("'%s' should be in the format 'organization/repo'", urlObj.Path)
}
return env.NoRemote, nil, bdr.Build()
}
+13 -3
View File
@@ -40,7 +40,7 @@ import (
)
var commitDocs = cli.CommandDocumentationContent{
ShortDesc: "Record changes to the repository",
ShortDesc: "Record changes to the database",
LongDesc: `
Stores the current contents of the staged tables in a new commit along with a log message from the user describing the changes.
@@ -193,7 +193,7 @@ func performCommit(ctx context.Context, commandStr string, args []string, dEnv *
mergeParentCommits = parentsHeadForAmend
}
pendingCommit, err := actions.GetCommitStaged(ctx, roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData(), actions.CommitStagedProps{
pendingCommit, err := actions.GetCommitStaged(ctx, roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData().Ddb, actions.CommitStagedProps{
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag) || apr.Contains(cli.AmendFlag),
@@ -306,15 +306,25 @@ func getCommitMessageFromEditor(ctx context.Context, dEnv *env.DoltEnv, suggeste
}
backupEd := "vim"
// try getting default editor on the user system
if ed, edSet := os.LookupEnv("EDITOR"); edSet {
backupEd = ed
}
// try getting Dolt config core.editor
editorStr := dEnv.Config.GetStringOrDefault(env.DoltEditor, backupEd)
cli.ExecuteWithStdioRestored(func() {
commitMsg, _ := editor.OpenCommitEditor(editorStr, initialMsg)
commitMsg, cErr := editor.OpenCommitEditor(editorStr, initialMsg)
if cErr != nil {
err = cErr
}
finalMsg = parseCommitMessage(commitMsg)
})
if err != nil {
return "", err
}
return finalMsg, nil
}
+15 -3
View File
@@ -40,9 +40,21 @@ var cfgDocs = cli.CommandDocumentationContent{
ShortDesc: `Get and set repository or global options`,
LongDesc: `You can query/set/replace/unset options with this command.
When reading, the values are read from the global and repository local configuration files, and options {{.LessThan}}--global{{.GreaterThan}}, and {{.LessThan}}--local{{.GreaterThan}} can be used to tell the command to read from only that location.
When writing, the new value is written to the repository local configuration file by default, and options {{.LessThan}}--global{{.GreaterThan}}, can be used to tell the command to write to that location (you can say {{.LessThan}}--local{{.GreaterThan}} but that is the default).
When reading, the values are read from the global and repository local configuration files, and options {{.LessThan}}--global{{.GreaterThan}}, and {{.LessThan}}--local{{.GreaterThan}} can be used to tell the command to read from only that location.
When writing, the new value is written to the repository local configuration file by default, and options {{.LessThan}}--global{{.GreaterThan}}, can be used to tell the command to write to that location (you can say {{.LessThan}}--local{{.GreaterThan}} but that is the default).
Valid configuration variables:
- core.editor - lets you edit 'commit' or 'tag' messages by launching the set editor.
- creds.add_url - sets the endpoint used to authenticate a client for 'dolt login'.
- doltlab.insecure - boolean flag used to authenticate a client against DoltLab.
- init.defaultbranch - allows overriding the default branch name e.g. when initializing a new repository.
- metrics.disabled - boolean flag disables sending metrics when true.
- user.creds - sets user keypairs for authenticating with doltremoteapi
- user.email - sets name used in the author and committer field of commit objects
- user.name - sets email used in the author and committer field of commit objects
- remotes.default_host - sets default host for authenticating eith doltremoteapi
- remotes.default_port - sets default port for authenticating eith doltremoteapi
`,
Synopsis: []string{
+4 -4
View File
@@ -124,7 +124,7 @@ func loadCred(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (creds.DoltCred
}
return dc, nil
} else {
dc, valid, err := dEnv.UserRPCCreds()
dc, valid, err := dEnv.UserDoltCreds()
if !valid {
return creds.EmptyCreds, errhand.BuildDError("error: no user credentials found").Build()
}
@@ -136,14 +136,14 @@ func loadCred(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (creds.DoltCred
}
func checkCredAndPrintSuccess(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds, endpoint string) errhand.VerboseError {
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: endpoint,
Creds: dc,
Creds: dc.RPCCreds(),
})
if err != nil {
return errhand.BuildDError("error: unable to build server endpoint options.").AddCause(err).Build()
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return errhand.BuildDError("error: unable to connect to server with credentials.").AddCause(err).Build()
}
+3 -3
View File
@@ -161,14 +161,14 @@ func updateProfileWithCredentials(ctx context.Context, dEnv *env.DoltEnv, c cred
host := dEnv.Config.GetStringOrDefault(env.RemotesApiHostKey, env.DefaultRemotesApiHost)
port := dEnv.Config.GetStringOrDefault(env.RemotesApiHostPortKey, env.DefaultRemotesApiPort)
hostAndPort := fmt.Sprintf("%s:%s", host, port)
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: hostAndPort,
Creds: c,
Creds: c.RPCCreds(),
})
if err != nil {
return fmt.Errorf("error: unable to build dial options server with credentials: %w", err)
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return fmt.Errorf("error: unable to connect to server with credentials: %w", err)
}
+1 -1
View File
@@ -93,7 +93,7 @@ func (cmd LsCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
}
func getJWKHandler(dEnv *env.DoltEnv) func(string, int64, bool) bool {
current, valid, _ := dEnv.UserRPCCreds()
current, valid, _ := dEnv.UserDoltCreds()
first := false
return func(path string, size int64, isDir bool) (stop bool) {
if strings.HasSuffix(path, creds.JWKFileExtension) {
+136 -11
View File
@@ -61,6 +61,7 @@ const (
SQLFlag = "sql"
CachedFlag = "cached"
SkinnyFlag = "skinny"
MergeBase = "merge-base"
)
var diffDocs = cli.CommandDocumentationContent{
@@ -71,11 +72,17 @@ Show changes between the working and staged tables, changes between the working
{{.EmphasisLeft}}dolt diff [--options] [<tables>...]{{.EmphasisRight}}
This form is to view the changes you made relative to the staging area for the next commit. In other words, the differences are what you could tell Dolt to further add but you still haven't. You can stage these changes by using dolt add.
{{.EmphasisLeft}}dolt diff [--options] <commit> [<tables>...]{{.EmphasisRight}}
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch.
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> [<tables>...]{{.EmphasisRight}}
This form is to view the changes you have in your working tables relative to the named {{.LessThan}}commit{{.GreaterThan}}. You can use HEAD to compare it with the latest commit, or a branch name to compare with the tip of a different branch. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, instead of using {{.LessThan}}commit{{.GreaterThan}}, use the merge base of {{.LessThan}}commit{{.GreaterThan}} and HEAD. {{.EmphasisLeft}}dolt diff --merge-base A{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A HEAD){{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...HEAD{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] <commit> <commit> [<tables>...]{{.EmphasisRight}}
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] [--merge-base] <commit> <commit> [<tables>...]{{.EmphasisRight}}
This is to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}. If {{.EmphasisLeft}}--merge-base{{.EmphasisRight}} is given, use the merge base of the two commits for the "before" side. {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] <commit>..<commit> [<tables>...]{{.EmphasisRight}}
This is synonymous to the above form (without the ..) to view the changes between two arbitrary {{.EmphasisLeft}}commit{{.EmphasisRight}}.
{{.EmphasisLeft}}dolt diff [--options] <commit>...<commit> [<tables>...]{{.EmphasisRight}}
This is to view the changes on the branch containing and up to the second {{.LessThan}}commit{{.GreaterThan}}, starting at a common ancestor of both {{.LessThan}}commit{{.GreaterThan}}. {{.EmphasisLeft}}dolt diff A...B{{.EmphasisRight}} is equivalent to {{.EmphasisLeft}}dolt diff $(dolt merge-base A B) B{{.EmphasisRight}} and {{.EmphasisLeft}}dolt diff --merge-base A B{{.EmphasisRight}}. You can omit any one of {{.LessThan}}commit{{.GreaterThan}}, which has the same effect as using HEAD instead.
The diffs displayed can be limited to show the first N by providing the parameter {{.EmphasisLeft}}--limit N{{.EmphasisRight}} where {{.EmphasisLeft}}N{{.EmphasisRight}} is the number of diffs to display.
@@ -132,6 +139,7 @@ func (cmd DiffCmd) ArgParser() *argparser.ArgParser {
ap.SupportsInt(limitParam, "", "record_count", "limits to the first N diffs.")
ap.SupportsFlag(CachedFlag, "c", "Show only the unstaged data changes.")
ap.SupportsFlag(SkinnyFlag, "sk", "Shows only primary key columns and any columns with data changes.")
ap.SupportsFlag(MergeBase, "", "Uses merge base of the first commit and second commit (or HEAD if not supplied) as the first commit")
return ap
}
@@ -202,7 +210,7 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
dArgs.limit, _ = apr.GetInt(limitParam)
dArgs.where = apr.GetValueOrDefault(whereParam, "")
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(CachedFlag))
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(CachedFlag), apr.Contains(MergeBase))
if err != nil {
return nil, err
}
@@ -243,7 +251,7 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
// applyDiffRoots applies the appropriate |from| and |to| root values to the receiver and returns the table names
// (if any) given to the command.
func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, args []string, isCached bool) ([]string, error) {
func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, args []string, isCached, useMergeBase bool) ([]string, error) {
headRoot, err := dEnv.HeadRoot(ctx)
if err != nil {
return nil, err
@@ -271,15 +279,33 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
}
if len(args) == 0 {
if useMergeBase {
return nil, fmt.Errorf("Must supply at least one revision when using --merge-base flag")
}
// `dolt diff`
return nil, nil
}
if strings.Contains(args[0], "..") {
if useMergeBase {
return nil, fmt.Errorf("Cannot use `..` or `...` with --merge-base flag")
}
err = dArgs.applyDotRevisions(ctx, dEnv, args)
if err != nil {
return nil, err
}
return args[1:], err
}
// treat the first arg as a ref spec
fromRoot, ok := maybeResolve(ctx, dEnv, args[0])
// if it doesn't resolve, treat it as a table name
if !ok {
// `dolt diff table`
if useMergeBase {
return nil, fmt.Errorf("Must supply at least one revision when using --merge-base flag")
}
return args, nil
}
@@ -288,23 +314,123 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
if len(args) == 1 {
// `dolt diff from_commit`
if useMergeBase {
err := dArgs.applyMergeBase(ctx, dEnv, args[0], "HEAD")
if err != nil {
return nil, err
}
}
return nil, nil
}
toRoot, ok := maybeResolve(ctx, dEnv, args[1])
if !ok {
// `dolt diff from_commit ...tables`
// `dolt diff from_commit [...tables]`
if useMergeBase {
err := dArgs.applyMergeBase(ctx, dEnv, args[0], "HEAD")
if err != nil {
return nil, err
}
}
return args[1:], nil
}
dArgs.toRoot = toRoot
dArgs.toRef = args[1]
// `dolt diff from_commit to_commit ...tables`
if useMergeBase {
err := dArgs.applyMergeBase(ctx, dEnv, args[0], args[1])
if err != nil {
return nil, err
}
}
// `dolt diff from_commit to_commit [...tables]`
return args[2:], nil
}
// applyMergeBase applies the merge base of two revisions to the |from| root
// values.
func (dArgs *diffArgs) applyMergeBase(ctx context.Context, dEnv *env.DoltEnv, leftStr, rightStr string) error {
mergeBaseStr, err := getMergeBaseFromStrings(ctx, dEnv, leftStr, rightStr)
if err != nil {
return err
}
fromRoot, ok := maybeResolve(ctx, dEnv, mergeBaseStr)
if !ok {
return fmt.Errorf("merge base invalid %s", mergeBaseStr)
}
dArgs.fromRoot = fromRoot
dArgs.fromRef = mergeBaseStr
return nil
}
// applyDotRevisions applies the appropriate |from| and |to| root values to the
// receiver for arguments containing `..` or `...`
func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv, args []string) error {
// `dolt diff from_commit...to_commit [...tables]`
if strings.Contains(args[0], "...") {
refs := strings.Split(args[0], "...")
var toRoot *doltdb.RootValue
ok := true
if len(refs[0]) > 0 {
right := refs[1]
// Use current HEAD if right side of `...` does not exist
if len(refs[1]) == 0 {
right = "HEAD"
}
err := dArgs.applyMergeBase(ctx, dEnv, refs[0], right)
if err != nil {
return err
}
}
if len(refs[1]) > 0 {
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
return fmt.Errorf("to ref in three dot diff must be valid ref: %s", refs[1])
}
dArgs.toRoot = toRoot
dArgs.toRef = refs[1]
}
return nil
}
// `dolt diff from_commit..to_commit [...tables]`
if strings.Contains(args[0], "..") {
refs := strings.Split(args[0], "..")
var fromRoot *doltdb.RootValue
var toRoot *doltdb.RootValue
ok := true
if len(refs[0]) > 0 {
if fromRoot, ok = maybeResolve(ctx, dEnv, refs[0]); !ok {
return fmt.Errorf("from ref in two dot diff must be valid ref: %s", refs[0])
}
dArgs.fromRoot = fromRoot
dArgs.fromRef = refs[0]
}
if len(refs[1]) > 0 {
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
return fmt.Errorf("to ref in two dot diff must be valid ref: %s", refs[1])
}
dArgs.toRoot = toRoot
dArgs.toRef = refs[1]
}
return nil
}
return nil
}
// todo: distinguish between non-existent CommitSpec and other errors, don't assume non-existent
func maybeResolve(ctx context.Context, dEnv *env.DoltEnv, spec string) (*doltdb.RootValue, bool) {
cs, err := doltdb.NewCommitSpec(spec)
@@ -391,8 +517,7 @@ func diffUserTable(
}
if dArgs.diffParts&Summary != 0 {
numCols := fromSch.GetAllCols().Size()
return printDiffSummary(ctx, td, numCols)
return printDiffSummary(ctx, td, fromSch.GetAllCols().Size(), toSch.GetAllCols().Size())
}
if dArgs.diffParts&SchemaOnlyDiff != 0 {
@@ -594,7 +719,7 @@ func diffRows(
}
columns := getColumnNamesString(td.FromSch, td.ToSch)
query := fmt.Sprintf("select %s, %s from dolt_diff('%s', '%s', '%s')", columns, "diff_type", tableName, from, to)
query := fmt.Sprintf("select %s, %s from dolt_diff('%s', '%s', '%s')", columns, "diff_type", from, to, tableName)
if len(dArgs.where) > 0 {
query += " where " + dArgs.where
+23 -16
View File
@@ -65,7 +65,7 @@ func newDiffWriter(diffOutput diffOutput) (diffWriter, error) {
}
}
func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errhand.VerboseError {
func printDiffSummary(ctx context.Context, td diff.TableDelta, oldColLen, newColLen int) errhand.VerboseError {
// todo: use errgroup.Group
ae := atomicerr.New()
ch := make(chan diff.DiffSummaryProgress)
@@ -89,11 +89,13 @@ func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errha
acc.Removes += p.Removes
acc.Changes += p.Changes
acc.CellChanges += p.CellChanges
acc.NewSize += p.NewSize
acc.OldSize += p.OldSize
acc.NewRowSize += p.NewRowSize
acc.OldRowSize += p.OldRowSize
acc.NewCellSize += p.NewCellSize
acc.OldCellSize += p.OldCellSize
if count%10000 == 0 {
eP.Printf("prev size: %d, new size: %d, adds: %d, deletes: %d, modifications: %d\n", acc.OldSize, acc.NewSize, acc.Adds, acc.Removes, acc.Changes)
eP.Printf("prev size: %d, new size: %d, adds: %d, deletes: %d, modifications: %d\n", acc.OldRowSize, acc.NewRowSize, acc.Adds, acc.Removes, acc.Changes)
eP.Display()
}
@@ -108,10 +110,10 @@ func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errha
keyless, err := td.IsKeyless(ctx)
if err != nil {
return nil
return errhand.BuildDError("").AddCause(err).Build()
}
if (acc.Adds + acc.Removes + acc.Changes) == 0 {
if (acc.Adds+acc.Removes+acc.Changes) == 0 && (acc.OldCellSize-acc.NewCellSize) == 0 {
cli.Println("No data changes. See schema changes by using -s or --schema.")
return nil
}
@@ -119,24 +121,27 @@ func printDiffSummary(ctx context.Context, td diff.TableDelta, colLen int) errha
if keyless {
printKeylessSummary(acc)
} else {
printSummary(acc, colLen)
printSummary(acc, oldColLen, newColLen)
}
return nil
}
func printSummary(acc diff.DiffSummaryProgress, colLen int) {
rowsUnmodified := uint64(acc.OldSize - acc.Changes - acc.Removes)
func printSummary(acc diff.DiffSummaryProgress, oldColLen, newColLen int) {
numCellInserts, numCellDeletes := sqle.GetCellsAddedAndDeleted(acc, newColLen)
rowsUnmodified := uint64(acc.OldRowSize - acc.Changes - acc.Removes)
unmodified := pluralize("Row Unmodified", "Rows Unmodified", rowsUnmodified)
insertions := pluralize("Row Added", "Rows Added", acc.Adds)
deletions := pluralize("Row Deleted", "Rows Deleted", acc.Removes)
changes := pluralize("Row Modified", "Rows Modified", acc.Changes)
cellInsertions := pluralize("Cell Added", "Cells Added", numCellInserts)
cellDeletions := pluralize("Cell Deleted", "Cells Deleted", numCellDeletes)
cellChanges := pluralize("Cell Modified", "Cells Modified", acc.CellChanges)
oldValues := pluralize("Entry", "Entries", acc.OldSize)
newValues := pluralize("Entry", "Entries", acc.NewSize)
oldValues := pluralize("Row Entry", "Row Entries", acc.OldRowSize)
newValues := pluralize("Row Entry", "Row Entries", acc.NewRowSize)
percentCellsChanged := float64(100*acc.CellChanges) / (float64(acc.OldSize) * float64(colLen))
percentCellsChanged := float64(100*acc.CellChanges) / (float64(acc.OldRowSize) * float64(oldColLen))
safePercent := func(num, dom uint64) float64 {
// returns +Inf for x/0 where x > 0
@@ -146,10 +151,12 @@ func printSummary(acc diff.DiffSummaryProgress, colLen int) {
return float64(100*num) / (float64(dom))
}
cli.Printf("%s (%.2f%%)\n", unmodified, safePercent(rowsUnmodified, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", insertions, safePercent(acc.Adds, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", deletions, safePercent(acc.Removes, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", changes, safePercent(acc.Changes, acc.OldSize))
cli.Printf("%s (%.2f%%)\n", unmodified, safePercent(rowsUnmodified, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", insertions, safePercent(acc.Adds, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", deletions, safePercent(acc.Removes, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", changes, safePercent(acc.Changes, acc.OldRowSize))
cli.Printf("%s (%.2f%%)\n", cellInsertions, safePercent(numCellInserts, acc.OldCellSize))
cli.Printf("%s (%.2f%%)\n", cellDeletions, safePercent(numCellDeletes, acc.OldCellSize))
cli.Printf("%s (%.2f%%)\n", cellChanges, percentCellsChanged)
cli.Printf("(%s vs %s)\n\n", oldValues, newValues)
}
+2 -2
View File
@@ -20,6 +20,6 @@ import (
var Commands = cli.NewSubCommandHandler("docs", "Commands for working with Dolt documents.", []cli.Command{
DiffCmd{},
WriteCmd{},
ReadCmd{},
PrintCmd{},
UploadCmd{},
})
+14 -14
View File
@@ -31,39 +31,39 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/argparser"
)
var readDocs = cli.CommandDocumentationContent{
ShortDesc: "Reads Dolt Docs from the file system into the database",
LongDesc: "Reads Dolt Docs from the file system into the database",
var uploadDocs = cli.CommandDocumentationContent{
ShortDesc: "Uploads Dolt Docs from the file system into the database",
LongDesc: "Uploads Dolt Docs from the file system into the database",
Synopsis: []string{
"{{.LessThan}}doc{{.GreaterThan}} {{.LessThan}}file{{.GreaterThan}}",
},
}
type ReadCmd struct{}
type UploadCmd struct{}
// Name implements cli.Command.
func (cmd ReadCmd) Name() string {
return "read"
func (cmd UploadCmd) Name() string {
return "upload"
}
// Description implements cli.Command.
func (cmd ReadCmd) Description() string {
return readDocs.ShortDesc
func (cmd UploadCmd) Description() string {
return uploadDocs.ShortDesc
}
// RequiresRepo implements cli.Command.
func (cmd ReadCmd) RequiresRepo() bool {
func (cmd UploadCmd) RequiresRepo() bool {
return true
}
// Docs implements cli.Command.
func (cmd ReadCmd) Docs() *cli.CommandDocumentation {
func (cmd UploadCmd) Docs() *cli.CommandDocumentation {
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(readDocs, ap)
return cli.NewCommandDocumentation(uploadDocs, ap)
}
// ArgParser implements cli.Command.
func (cmd ReadCmd) ArgParser() *argparser.ArgParser {
func (cmd UploadCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"doc", "Dolt doc name to be updated in the database."})
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"file", "file to read Dolt doc from."})
@@ -71,9 +71,9 @@ func (cmd ReadCmd) ArgParser() *argparser.ArgParser {
}
// Exec implements cli.Command.
func (cmd ReadCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
func (cmd UploadCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
ap := cmd.ArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, writeDocs, ap))
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, uploadDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() != 2 {
+14 -14
View File
@@ -30,48 +30,48 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/argparser"
)
var writeDocs = cli.CommandDocumentationContent{
ShortDesc: "Writes Dolt Docs to stdout",
LongDesc: "Writes Dolt Docs to stdout",
var printDocs = cli.CommandDocumentationContent{
ShortDesc: "Prints Dolt Docs to stdout",
LongDesc: "Prints Dolt Docs to stdout",
Synopsis: []string{
"{{.LessThan}}doc{{.GreaterThan}}",
},
}
type WriteCmd struct{}
type PrintCmd struct{}
// Name implements cli.Command.
func (cmd WriteCmd) Name() string {
return "write"
func (cmd PrintCmd) Name() string {
return "print"
}
// Description implements cli.Command.
func (cmd WriteCmd) Description() string {
return writeDocs.ShortDesc
func (cmd PrintCmd) Description() string {
return printDocs.ShortDesc
}
// RequiresRepo implements cli.Command.
func (cmd WriteCmd) RequiresRepo() bool {
func (cmd PrintCmd) RequiresRepo() bool {
return true
}
// Docs implements cli.Command.
func (cmd WriteCmd) Docs() *cli.CommandDocumentation {
func (cmd PrintCmd) Docs() *cli.CommandDocumentation {
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(writeDocs, ap)
return cli.NewCommandDocumentation(printDocs, ap)
}
// ArgParser implements cli.Command.
func (cmd WriteCmd) ArgParser() *argparser.ArgParser {
func (cmd PrintCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"doc", "Dolt doc to be read."})
return ap
}
// Exec implements cli.Command.
func (cmd WriteCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
func (cmd PrintCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
ap := cmd.ArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, writeDocs, ap))
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, printDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() != 1 {
+7 -16
View File
@@ -21,8 +21,6 @@ import (
"os"
"sort"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
@@ -52,10 +50,6 @@ func (cmd *DumpDocsCmd) Description() string {
return "dumps all documentation in md format to a directory"
}
func (cmd *DumpDocsCmd) GatedForNBF(nbf *types.NomsBinFormat) bool {
return types.IsFormat_DOLT(nbf)
}
// Hidden should return true if this command should be hidden from the help text
func (cmd *DumpDocsCmd) Hidden() bool {
return true
@@ -105,19 +99,17 @@ func (cmd *DumpDocsCmd) Exec(ctx context.Context, commandStr string, args []stri
return 1
}
err = cmd.dumpDocs(wr, cmd.DoltCommand.Name(), cmd.DoltCommand.Subcommands)
verr := cmd.dumpDocs(wr, cmd.DoltCommand.Name(), cmd.DoltCommand.Subcommands)
if err != nil {
verr := errhand.BuildDError("error: Failed to dump docs.").AddCause(err).Build()
if verr != nil {
cli.PrintErrln(verr.Verbose())
return 1
}
return 0
}
func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.Command) error {
func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.Command) errhand.VerboseError {
sort.Slice(subCommands, func(i, j int) bool {
return subCommands[i].Name() < subCommands[j].Name()
})
@@ -130,10 +122,9 @@ func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.
if !hidden {
if subCmdHandler, ok := curr.(cli.SubCommandHandler); ok {
err := cmd.dumpDocs(wr, cmdStr+" "+subCmdHandler.Name(), subCmdHandler.Subcommands)
if err != nil {
return err
verr := cmd.dumpDocs(wr, cmdStr+" "+subCmdHandler.Name(), subCmdHandler.Subcommands)
if verr != nil {
return verr
}
} else {
docs := curr.Docs()
@@ -142,7 +133,7 @@ func (cmd *DumpDocsCmd) dumpDocs(wr io.Writer, cmdStr string, subCommands []cli.
docs.CommandStr = fmt.Sprintf("%s %s", cmdStr, curr.Name())
err := CreateMarkdown(wr, docs)
if err != nil {
return err
return errhand.BuildDError(fmt.Sprintf("error: Failed to create markdown for command: %s %s.", cmdStr, curr.Name())).AddCause(err).Build()
}
}
}
+4 -1
View File
@@ -65,7 +65,10 @@ func validateJWT(config []JwksConfig, username, identity, token string, reqTime
if err != nil {
return false, err
}
vd := jwtauth.NewJWTValidator(pr)
vd, err := jwtauth.NewJWTValidator(pr)
if err != nil {
return false, err
}
claims, err := vd.ValidateJWT(token, reqTime)
if err != nil {
return false, err
+1 -2
View File
@@ -187,9 +187,8 @@ func secondsSince(start time.Time) float64 {
// nullWriter is a no-op SqlRowWriter implementation
type nullWriter struct{}
func (n nullWriter) WriteRow(ctx context.Context, r row.Row) error { return nil }
func (n nullWriter) Close(ctx context.Context) error { return nil }
func (n nullWriter) WriteSqlRow(ctx context.Context, r sql.Row) error { return nil }
func (n nullWriter) Close(ctx context.Context) error { return nil }
func printEmptySetResult(start time.Time) {
seconds := secondsSince(start)
+43 -16
View File
@@ -29,10 +29,12 @@ import (
"github.com/dolthub/vitess/go/vt/sqlparser"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/mysql_file_handler"
"github.com/dolthub/dolt/go/libraries/utils/config"
@@ -48,17 +50,19 @@ type SqlEngine struct {
}
type SqlEngineConfig struct {
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
PrivFilePath string
ServerUser string
ServerPass string
ServerHost string
Autocommit bool
Bulk bool
JwksConfig []JwksConfig
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
PrivFilePath string
BranchCtrlFilePath string
ServerUser string
ServerPass string
ServerHost string
Autocommit bool
Bulk bool
JwksConfig []JwksConfig
ClusterController *cluster.Controller
}
// NewSqlEngine returns a SqlEngine
@@ -86,10 +90,23 @@ func NewSqlEngine(
return nil, err
}
config.ClusterController.ManageSystemVariables(sql.SystemVariables)
err = config.ClusterController.ApplyStandbyReplicationConfig(ctx, bThreads, mrEnv, dbs...)
if err != nil {
return nil, err
}
infoDB := information_schema.NewInformationSchemaDatabase()
all := append(dsqleDBsAsSqlDBs(dbs), infoDB)
locations = append(locations, nil)
clusterDB := config.ClusterController.ClusterDatabase()
if clusterDB != nil {
all = append(all, clusterDB)
locations = append(locations, nil)
}
b := env.GetDefaultInitBranch(mrEnv.Config())
pro, err := dsqle.NewDoltDatabaseProviderWithDatabases(b, mrEnv.FileSystem(), all, locations)
if err != nil {
@@ -97,6 +114,10 @@ func NewSqlEngine(
}
pro = pro.WithRemoteDialer(mrEnv.RemoteDialProvider())
config.ClusterController.RegisterStoredProcedures(pro)
pro.InitDatabaseHook = cluster.NewInitDatabaseHook(config.ClusterController, bThreads, pro.InitDatabaseHook)
config.ClusterController.ManageDatabaseProvider(pro)
// Load in privileges from file, if it exists
persister := mysql_file_handler.NewPersister(config.PrivFilePath, config.DoltCfgDirPath)
data, err := persister.LoadData()
@@ -104,6 +125,12 @@ func NewSqlEngine(
return nil, err
}
// Load the branch control permissions, if they exist
var bcController *branch_control.Controller
if bcController, err = branch_control.LoadData(config.BranchCtrlFilePath, config.DoltCfgDirPath); err != nil {
return nil, err
}
// Set up engine
engine := gms.New(analyzer.NewBuilder(pro).WithParallelism(parallelism).Build(), &gms.Config{
IsReadOnly: config.IsReadOnly,
@@ -140,7 +167,7 @@ func NewSqlEngine(
dbStates = append(dbStates, dbState)
}
sess, err := dsess.NewDoltSession(sql.NewEmptyContext(), sql.NewBaseSession(), pro, mrEnv.Config(), dbStates...)
sess, err := dsess.NewDoltSession(sql.NewEmptyContext(), sql.NewBaseSession(), pro, mrEnv.Config(), bcController, dbStates...)
if err != nil {
return nil, err
}
@@ -159,7 +186,7 @@ func NewSqlEngine(
return &SqlEngine{
dbs: nameToDB,
contextFactory: newSqlContext(sess, config.InitialDb),
dsessFactory: newDoltSession(pro, mrEnv.Config(), config.Autocommit),
dsessFactory: newDoltSession(pro, mrEnv.Config(), config.Autocommit, bcController),
engine: engine,
resultFormat: format,
}, nil
@@ -292,7 +319,6 @@ func newSqlContext(sess *dsess.DoltSession, initialDb string) func(ctx context.C
if sessionDB := sess.GetCurrentDatabase(); sessionDB != "" {
sqlCtx.SetCurrentDatabase(sessionDB)
} else {
sqlCtx.SetCurrentDatabase(initialDb)
}
@@ -300,7 +326,8 @@ func newSqlContext(sess *dsess.DoltSession, initialDb string) func(ctx context.C
}
}
func newDoltSession(pro dsqle.DoltDatabaseProvider, config config.ReadWriteConfig, autocommit bool) func(ctx context.Context, mysqlSess *sql.BaseSession, dbs []sql.Database) (*dsess.DoltSession, error) {
func newDoltSession(pro dsqle.DoltDatabaseProvider, config config.ReadWriteConfig,
autocommit bool, bc *branch_control.Controller) func(ctx context.Context, mysqlSess *sql.BaseSession, dbs []sql.Database) (*dsess.DoltSession, error) {
return func(ctx context.Context, mysqlSess *sql.BaseSession, dbs []sql.Database) (*dsess.DoltSession, error) {
ddbs := dsqle.DbsAsDSQLDBs(dbs)
states, err := getDbStates(ctx, ddbs)
@@ -308,7 +335,7 @@ func newDoltSession(pro dsqle.DoltDatabaseProvider, config config.ReadWriteConfi
return nil, err
}
dsess, err := dsess.NewDoltSession(sql.NewEmptyContext(), mysqlSess, pro, config, states...)
dsess, err := dsess.NewDoltSession(sql.NewEmptyContext(), mysqlSess, pro, config, bc, states...)
if err != nil {
return nil, err
}
+17 -3
View File
@@ -32,6 +32,8 @@ const (
emailParamName = "email"
usernameParamName = "name"
initBranchParamName = "initial-branch"
newFormatFlag = "new-format"
oldFormatFlag = "old-format"
)
var initDocs = cli.CommandDocumentationContent{
@@ -75,7 +77,8 @@ func (cmd InitCmd) ArgParser() *argparser.ArgParser {
ap.SupportsString(emailParamName, "", "email", fmt.Sprintf("The email address used. If not provided will be taken from {{.EmphasisLeft}}%s{{.EmphasisRight}} in the global config.", env.UserEmailKey))
ap.SupportsString(cli.DateParam, "", "date", "Specify the date used in the initial commit. If not specified the current system time is used.")
ap.SupportsString(initBranchParamName, "b", "branch", fmt.Sprintf("The branch name used to initialize this database. If not provided will be taken from {{.EmphasisLeft}}%s{{.EmphasisRight}} in the global config. If unset, the default initialized branch will be named '%s'.", env.InitBranchName, env.DefaultInitBranch))
ap.SupportsFlag(cli.NewFormatFlag, "", fmt.Sprintf("Specify this flag to use the new storage format (%s).", types.Format_DOLT.VersionString()))
ap.SupportsFlag(newFormatFlag, "", fmt.Sprintf("Specify this flag to use the new storage format (%s).", types.Format_DOLT.VersionString()))
ap.SupportsFlag(oldFormatFlag, "", fmt.Sprintf("Specify this flag to use the old storage format (%s).", types.Format_LD_1.VersionString()))
return ap
}
@@ -85,13 +88,24 @@ func (cmd InitCmd) Exec(ctx context.Context, commandStr string, args []string, d
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, initDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if len(apr.Args) > 0 {
cli.PrintErrln(color.RedString("error: invalid arguments."))
return 1
}
if dEnv.HasDoltDir() {
cli.PrintErrln(color.RedString("This directory has already been initialized."))
return 1
}
if apr.Contains(cli.NewFormatFlag) {
if apr.Contains(newFormatFlag) && apr.Contains(oldFormatFlag) {
e := fmt.Sprintf("options %s and %s are mutually exclusive", newFormatFlag, oldFormatFlag)
cli.PrintErrln(color.RedString(e))
return 1
}
if apr.Contains(newFormatFlag) {
types.Format_Default = types.Format_DOLT
} else if apr.Contains(oldFormatFlag) {
types.Format_Default = types.Format_LD_1
}
name, _ := apr.GetValue(usernameParamName)
+3 -3
View File
@@ -106,7 +106,7 @@ func (cmd InspectCmd) measureChunkIndexDistribution(ctx context.Context, dEnv *e
break
}
summary, err := cmd.processTableFile(path, dEnv.FS)
summary, err := cmd.processTableFile(ctx, path, dEnv.FS)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
@@ -120,7 +120,7 @@ func (cmd InspectCmd) measureChunkIndexDistribution(ctx context.Context, dEnv *e
return nil
}
func (cmd InspectCmd) processTableFile(path string, fs filesys.Filesys) (sum *chunkIndexSummary, err error) {
func (cmd InspectCmd) processTableFile(ctx context.Context, path string, fs filesys.Filesys) (sum *chunkIndexSummary, err error) {
var rdr io.ReadCloser
rdr, err = fs.OpenForRead(path)
if err != nil {
@@ -134,7 +134,7 @@ func (cmd InspectCmd) processTableFile(path string, fs filesys.Filesys) (sum *ch
}()
var prefixes []uint64
prefixes, err = nbs.GetTableIndexPrefixes(rdr.(io.ReadSeeker))
prefixes, err = nbs.GetTableIndexPrefixes(ctx, rdr.(io.ReadSeeker))
if err != nil {
return sum, err
}
+254 -94
View File
@@ -23,6 +23,7 @@ import (
"github.com/fatih/color"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
@@ -34,21 +35,15 @@ import (
"github.com/dolthub/dolt/go/store/util/outputpager"
)
const (
numLinesParam = "number"
mergesParam = "merges"
minParentsParam = "min-parents"
parentsParam = "parents"
decorateParam = "decorate"
oneLineParam = "oneline"
)
type logOpts struct {
numLines int
showParents bool
minParents int
decoration string
oneLine bool
numLines int
showParents bool
minParents int
decoration string
oneLine bool
excludingCommitSpecs []*doltdb.CommitSpec
commitSpecs []*doltdb.CommitSpec
tableName string
}
type logNode struct {
@@ -63,9 +58,27 @@ var logDocs = cli.CommandDocumentationContent{
ShortDesc: `Show commit logs`,
LongDesc: `Shows the commit logs
The command takes options to control what is shown and how.`,
The command takes options to control what is shown and how.
{{.EmphasisLeft}}dolt log{{.EmphasisRight}}
Lists commit logs from current HEAD when no options provided.
{{.EmphasisLeft}}dolt log [<revisions>...]{{.EmphasisRight}}
Lists commit logs starting from revision. If multiple revisions provided, lists logs reachable by all revisions.
{{.EmphasisLeft}}dolt log [<revisions>...] <table>{{.EmphasisRight}}
Lists commit logs starting from revisions, only including commits with changes to table.
{{.EmphasisLeft}}dolt log <revisionB>..<revisionA>{{.EmphasisRight}}
{{.EmphasisLeft}}dolt log <revisionA> --not <revisionB>{{.EmphasisRight}}
{{.EmphasisLeft}}dolt log ^<revisionB> <revisionA>{{.EmphasisRight}}
Different ways to list two dot logs. These will list commit logs for revisionA, while excluding commits from revisionB. The table option is not supported for two dot log.
{{.EmphasisLeft}}dolt log <revisionB>...<revisionA>{{.EmphasisRight}}
{{.EmphasisLeft}}dolt log <revisionA> <revisionB> --not $(dolt merge-base <revisionA> <revisionB>){{.EmphasisRight}}
Different ways to list three dot logs. These will list commit logs reachable by revisionA OR revisionB, while excluding commits reachable by BOTH revisionA AND revisionB.`,
Synopsis: []string{
`[-n {{.LessThan}}num_commits{{.GreaterThan}}] [{{.LessThan}}commit{{.GreaterThan}}] [[--] {{.LessThan}}table{{.GreaterThan}}]`,
`[-n {{.LessThan}}num_commits{{.GreaterThan}}] [{{.LessThan}}revision-range{{.GreaterThan}}] [[--] {{.LessThan}}table{{.GreaterThan}}]`,
},
}
@@ -92,14 +105,7 @@ func (cmd LogCmd) Docs() *cli.CommandDocumentation {
}
func (cmd LogCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsInt(numLinesParam, "n", "num_commits", "Limit the number of commits to output.")
ap.SupportsInt(minParentsParam, "", "parent_count", "The minimum number of parents a commit must have to be included in the log.")
ap.SupportsFlag(mergesParam, "", "Equivalent to min-parents == 2, this will limit the log to commits with 2 or more parents.")
ap.SupportsFlag(parentsParam, "", "Shows all parents of each commit in the log.")
ap.SupportsString(decorateParam, "", "decorate_fmt", "Shows refs next to commits. Valid options are short, full, no, and auto")
ap.SupportsFlag(oneLineParam, "", "Shows logs in a compact format.")
return ap
return cli.CreateLogArgParser()
}
// Exec executes the command
@@ -112,61 +118,181 @@ func (cmd LogCmd) logWithLoggerFunc(ctx context.Context, commandStr string, args
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, logDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() > 2 {
usage()
return 1
opts, err := parseLogArgs(ctx, dEnv, apr)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
if len(opts.commitSpecs) == 0 {
opts.commitSpecs = append(opts.commitSpecs, dEnv.RepoStateReader().CWBHeadSpec())
}
if len(opts.tableName) > 0 {
return handleErrAndExit(logTableCommits(ctx, dEnv, opts))
}
return logCommits(ctx, dEnv, opts)
}
minParents := apr.GetIntOrDefault(minParentsParam, 0)
if apr.Contains(mergesParam) {
func parseLogArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (*logOpts, error) {
minParents := apr.GetIntOrDefault(cli.MinParentsFlag, 0)
if apr.Contains(cli.MergesFlag) {
minParents = 2
}
decorateOption := apr.GetValueOrDefault(decorateParam, "auto")
decorateOption := apr.GetValueOrDefault(cli.DecorateFlag, "auto")
switch decorateOption {
case "short", "full", "auto", "no":
default:
cli.PrintErrln(color.HiRedString("fatal: invalid --decorate option: " + decorateOption))
return 1
return nil, fmt.Errorf("fatal: invalid --decorate option: %s", decorateOption)
}
opts := logOpts{
numLines: apr.GetIntOrDefault(numLinesParam, -1),
showParents: apr.Contains(parentsParam),
opts := &logOpts{
numLines: apr.GetIntOrDefault(cli.NumberFlag, -1),
showParents: apr.Contains(cli.ParentsFlag),
minParents: minParents,
oneLine: apr.Contains(oneLineParam),
oneLine: apr.Contains(cli.OneLineFlag),
decoration: decorateOption,
}
// Just dolt log
if apr.NArg() == 0 {
return logCommits(ctx, dEnv, dEnv.RepoStateReader().CWBHeadSpec(), opts)
} else if apr.NArg() == 1 { // dolt log <ref/table>
argIsRef := actions.ValidateIsRef(ctx, apr.Arg(0), dEnv.DoltDB, dEnv.RepoStateReader())
if argIsRef {
cs, err := doltdb.NewCommitSpec(apr.Arg(0))
if err != nil {
cli.PrintErrln(color.HiRedString("invalid commit %s\n", apr.Arg(0)))
}
return logCommits(ctx, dEnv, cs, opts)
} else {
return handleErrAndExit(logTableCommits(ctx, dEnv, opts, dEnv.RepoStateReader().CWBHeadSpec(), apr.Arg(0)))
}
} else { // dolt log ref table
cs, err := doltdb.NewCommitSpec(apr.Arg(0))
if err != nil {
cli.PrintErrln(color.HiRedString("invalid commit %s\n", apr.Arg(0)))
}
return handleErrAndExit(logTableCommits(ctx, dEnv, opts, cs, apr.Arg(1)))
err := opts.parseRefsAndTable(ctx, apr, dEnv)
if err != nil {
return nil, err
}
excludingRefs, ok := apr.GetValueList(cli.NotFlag)
if ok {
if len(opts.excludingCommitSpecs) > 0 {
return nil, fmt.Errorf("cannot use --not argument with two dots or ref with ^")
}
if len(opts.tableName) > 0 {
return nil, fmt.Errorf("cannot use --not argument with table")
}
for _, excludingRef := range excludingRefs {
notCs, err := doltdb.NewCommitSpec(excludingRef)
if err != nil {
return nil, fmt.Errorf("invalid commit %s\n", excludingRef)
}
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
}
}
return opts, nil
}
func logCommits(ctx context.Context, dEnv *env.DoltEnv, cs *doltdb.CommitSpec, opts logOpts) int {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
func (opts *logOpts) parseRefsAndTable(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv) error {
// `dolt log`
if apr.NArg() == 0 {
return nil
}
if strings.Contains(apr.Arg(0), "..") {
if apr.NArg() > 1 {
return fmt.Errorf("Cannot use two or three dot syntax when 2 or more arguments provided")
}
// `dolt log <ref>...<ref>`
if strings.Contains(apr.Arg(0), "...") {
refs := strings.Split(apr.Arg(0), "...")
for _, ref := range refs {
cs, err := getCommitSpec(ref)
if err != nil {
return err
}
opts.commitSpecs = append(opts.commitSpecs, cs)
}
mergeBase, verr := getMergeBaseFromStrings(ctx, dEnv, refs[0], refs[1])
if verr != nil {
return verr
}
notCs, err := getCommitSpec(mergeBase)
if err != nil {
return err
}
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
return nil
}
// `dolt log <ref>..<ref>`
refs := strings.Split(apr.Arg(0), "..")
notCs, err := getCommitSpec(refs[0])
if err != nil {
return err
}
cs, err := getCommitSpec(refs[1])
if err != nil {
return err
}
opts.commitSpecs = append(opts.commitSpecs, cs)
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
return nil
}
seenRefs := make(map[string]bool)
for _, arg := range apr.Args {
// ^<ref>
if strings.HasPrefix(arg, "^") {
commit := strings.TrimPrefix(arg, "^")
notCs, err := getCommitSpec(commit)
if err != nil {
return err
}
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
} else {
argIsRef := actions.ValidateIsRef(ctx, arg, dEnv.DoltDB, dEnv.RepoStateReader())
// <ref>
if argIsRef && !seenRefs[arg] {
cs, err := getCommitSpec(arg)
if err != nil {
return err
}
seenRefs[arg] = true
opts.commitSpecs = append(opts.commitSpecs, cs)
} else {
// <table>
opts.tableName = arg
}
}
}
if len(opts.tableName) > 0 && len(opts.excludingCommitSpecs) > 0 {
return fmt.Errorf("Cannot provide table name with excluding refs")
}
return nil
}
func getCommitSpec(commit string) (*doltdb.CommitSpec, error) {
cs, err := doltdb.NewCommitSpec(commit)
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get HEAD commit for current branch."))
return 1
return nil, fmt.Errorf("invalid commit %s\n", commit)
}
return cs, nil
}
func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
hashes := make([]hash.Hash, len(opts.commitSpecs))
for i, cs := range opts.commitSpecs {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get HEAD commit for current branch."))
return 1
}
h, err := commit.HashOf()
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: failed to get commit hash"))
return 1
}
hashes[i] = h
}
cHashToRefs := map[hash.Hash][]string{}
@@ -189,7 +315,7 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, cs *doltdb.CommitSpec, o
// Get all remote branches
remotes, err := dEnv.DoltDB.GetRemotesWithHashes(ctx)
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get Branch information."))
cli.PrintErrln(color.HiRedString("Fatal error: cannot get Remotes information."))
return 1
}
for _, r := range remotes {
@@ -216,20 +342,37 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, cs *doltdb.CommitSpec, o
cHashToRefs[t.Hash] = append(cHashToRefs[t.Hash], tagName)
}
h, err := commit.HashOf()
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: failed to get commit hash"))
return 1
matchFunc := func(c *doltdb.Commit) (bool, error) {
return c.NumParents() >= opts.minParents, nil
}
matchFunc := func(commit *doltdb.Commit) (bool, error) {
return commit.NumParents() >= opts.minParents, nil
var commits []*doltdb.Commit
if len(opts.excludingCommitSpecs) == 0 {
commits, err = commitwalk.GetTopNTopoOrderedCommitsMatching(ctx, dEnv.DoltDB, hashes, opts.numLines, matchFunc)
} else {
excludingHashes := make([]hash.Hash, len(opts.excludingCommitSpecs))
for i, excludingSpec := range opts.excludingCommitSpecs {
excludingCommit, err := dEnv.DoltDB.Resolve(ctx, excludingSpec, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get excluding commit for current branch."))
return 1
}
excludingHash, err := excludingCommit.HashOf()
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: failed to get commit hash"))
return 1
}
excludingHashes[i] = excludingHash
}
commits, err = commitwalk.GetDotDotRevisions(ctx, dEnv.DoltDB, hashes, dEnv.DoltDB, excludingHashes, opts.numLines)
}
commits, err := commitwalk.GetTopNTopoOrderedCommitsMatching(ctx, dEnv.DoltDB, h, opts.numLines, matchFunc)
if err != nil {
cli.PrintErrln("Error retrieving commit.")
cli.PrintErrln(err)
return 1
}
@@ -258,7 +401,7 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, cs *doltdb.CommitSpec, o
commitHash: cmHash,
parentHashes: pHashes,
branchNames: cHashToRefs[cmHash],
isHead: cmHash == h})
isHead: hashIsHead(cmHash, hashes)})
}
logToStdOut(opts, commitsInfo)
@@ -266,6 +409,13 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, cs *doltdb.CommitSpec, o
return 0
}
func hashIsHead(cmHash hash.Hash, hashes []hash.Hash) bool {
if len(hashes) > 1 || len(hashes) == 0 {
return false
}
return cmHash == hashes[0]
}
func tableExists(ctx context.Context, commit *doltdb.Commit, tableName string) (bool, error) {
rv, err := commit.GetRootValue(ctx)
if err != nil {
@@ -280,28 +430,38 @@ func tableExists(ctx context.Context, commit *doltdb.Commit, tableName string) (
return ok, nil
}
func logTableCommits(ctx context.Context, dEnv *env.DoltEnv, opts logOpts, cs *doltdb.CommitSpec, tableName string) error {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return err
func logTableCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) error {
hashes := make([]hash.Hash, len(opts.commitSpecs))
for i, cs := range opts.commitSpecs {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return err
}
h, err := commit.HashOf()
if err != nil {
return err
}
// Check that the table exists in the head commits
exists, err := tableExists(ctx, commit, opts.tableName)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("error: table %s does not exist", opts.tableName)
}
hashes[i] = h
}
// Check that the table exists in the head commit
exists, err := tableExists(ctx, commit, tableName)
if err != nil {
return err
matchFunc := func(commit *doltdb.Commit) (bool, error) {
return commit.NumParents() >= opts.minParents, nil
}
if !exists {
return fmt.Errorf("error: table %s does not exist", tableName)
}
h, err := commit.HashOf()
if err != nil {
return err
}
itr, err := commitwalk.GetTopologicalOrderIterator(ctx, dEnv.DoltDB, h)
itr, err := commitwalk.GetTopologicalOrderIterator(ctx, dEnv.DoltDB, hashes, matchFunc)
if err != nil && err != io.EOF {
return err
}
@@ -340,7 +500,7 @@ func logTableCommits(ctx context.Context, dEnv *env.DoltEnv, opts logOpts, cs *d
return err
}
ok, err := didTableChangeBetweenRootValues(ctx, childRV, parentRV, tableName)
ok, err := didTableChangeBetweenRootValues(ctx, childRV, parentRV, opts.tableName)
if err != nil {
return err
}
@@ -387,7 +547,7 @@ func logRefs(pager *outputpager.Pager, comm logNode) {
pager.Writer.Write([]byte("\033[33m) \033[0m"))
}
func logCompact(pager *outputpager.Pager, opts logOpts, commits []logNode) {
func logCompact(pager *outputpager.Pager, opts *logOpts, commits []logNode) {
for _, comm := range commits {
if len(comm.parentHashes) < opts.minParents {
return
@@ -413,7 +573,7 @@ func logCompact(pager *outputpager.Pager, opts logOpts, commits []logNode) {
}
}
func logDefault(pager *outputpager.Pager, opts logOpts, commits []logNode) {
func logDefault(pager *outputpager.Pager, opts *logOpts, commits []logNode) {
for _, comm := range commits {
if len(comm.parentHashes) < opts.minParents {
return
@@ -451,7 +611,7 @@ func logDefault(pager *outputpager.Pager, opts logOpts, commits []logNode) {
}
}
func logToStdOut(opts logOpts, commits []logNode) {
func logToStdOut(opts *logOpts, commits []logNode) {
if cli.ExecuteWithStdioRestored == nil {
return
}
+3 -3
View File
@@ -238,15 +238,15 @@ func openBrowserForCredsAdd(dc creds.DoltCreds, loginUrl string) {
}
func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds, authEndpoint string, insecure bool) (remotesapi.CredentialsServiceClient, errhand.VerboseError) {
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: authEndpoint,
Creds: dc,
Creds: dc.RPCCreds(),
Insecure: insecure,
})
if err != nil {
return nil, errhand.BuildDError("error: unable to build dial options for connecting to server with credentials.").AddCause(err).Build()
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return nil, errhand.BuildDError("error: unable to connect to server with credentials.").AddCause(err).Build()
}
+102 -42
View File
@@ -159,9 +159,9 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
}
suggestedMsg := fmt.Sprintf("Merge branch '%s' into %s", commitSpecStr, dEnv.RepoStateReader().CWBHeadRef().GetPath())
msg, err := getCommitMessage(ctx, apr, dEnv, suggestedMsg)
if err != nil {
return handleCommitErr(ctx, dEnv, err, usage)
msg := ""
if m, ok := apr.GetValue(cli.MessageArg); ok {
msg = m
}
if apr.Contains(cli.NoCommitFlag) && apr.Contains(cli.CommitFlag) {
@@ -214,25 +214,6 @@ func getUnmergedTableCount(ctx context.Context, root *doltdb.RootValue) (int, er
return unmergedTableCount, nil
}
// getCommitMessage returns commit message depending on whether user defined commit message and/or no-ff flag.
// If user defined message, it will use that. If not, and no-ff flag is defined, it will ask user for commit message from editor.
// If none of commit message or no-ff flag is defined, it will return suggested message.
func getCommitMessage(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv, suggestedMsg string) (string, errhand.VerboseError) {
if m, ok := apr.GetValue(cli.MessageArg); ok {
return m, nil
}
if apr.Contains(cli.NoFFParam) {
msg, err := getCommitMessageFromEditor(ctx, dEnv, suggestedMsg, "", apr.Contains(cli.NoEditFlag))
if err != nil {
return "", errhand.VerboseErrorFromError(err)
}
return msg, nil
}
return "", nil
}
func validateMergeSpec(ctx context.Context, spec *merge.MergeSpec) errhand.VerboseError {
if spec.HeadH == spec.MergeH {
//TODO - why is this different for merge/pull?
@@ -484,38 +465,117 @@ func handleMergeErr(ctx context.Context, dEnv *env.DoltEnv, mergeErr error, hasC
// FF merges will not surface constraint violations on their own; constraint verify --all
// is required to reify violations.
func performMerge(ctx context.Context, dEnv *env.DoltEnv, spec *merge.MergeSpec, suggestedMsg string) (map[string]*merge.MergeStats, error) {
var tblStats map[string]*merge.MergeStats
if ok, err := spec.HeadC.CanFastForwardTo(ctx, spec.MergeC); err != nil && !errors.Is(err, doltdb.ErrUpToDate) {
return nil, err
} else if ok {
if spec.Noff {
tblStats, err = merge.ExecNoFFMerge(ctx, dEnv, spec)
return tblStats, err
return executeNoFFMergeAndCommit(ctx, dEnv, spec, suggestedMsg)
}
return nil, merge.ExecuteFFMerge(ctx, dEnv, spec)
}
tblStats, err := merge.ExecuteMerge(ctx, dEnv, spec)
return executeMergeAndCommit(ctx, dEnv, spec, suggestedMsg)
}
func executeNoFFMergeAndCommit(ctx context.Context, dEnv *env.DoltEnv, spec *merge.MergeSpec, suggestedMsg string) (map[string]*merge.MergeStats, error) {
tblToStats, err := merge.ExecNoFFMerge(ctx, dEnv, spec)
if err != nil {
return tblStats, err
return tblToStats, err
}
if !spec.NoCommit && !hasConflictOrViolations(tblStats) {
msg := spec.Msg
if spec.Msg == "" {
msg, err = getCommitMessageFromEditor(ctx, dEnv, suggestedMsg, "", spec.NoEdit)
if err != nil {
return nil, err
}
}
author := fmt.Sprintf("%s <%s>", spec.Name, spec.Email)
res := performCommit(ctx, "commit", []string{"-m", msg, "--author", author}, dEnv)
if res != 0 {
return nil, fmt.Errorf("dolt commit failed after merging")
}
if spec.NoCommit {
cli.Println("Automatic merge went well; stopped before committing as requested")
return tblToStats, nil
}
return tblStats, nil
// Reload roots since the above method writes new values to the working set
roots, err := dEnv.Roots(ctx)
if err != nil {
return tblToStats, err
}
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return tblToStats, err
}
var mergeParentCommits []*doltdb.Commit
if ws.MergeActive() {
mergeParentCommits = []*doltdb.Commit{ws.MergeState().Commit()}
}
msg, err := getCommitMsgForMerge(ctx, dEnv, spec.Msg, suggestedMsg, spec.NoEdit)
if err != nil {
return tblToStats, err
}
_, err = actions.CommitStaged(ctx, roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData(), actions.CommitStagedProps{
Message: msg,
Date: spec.Date,
AllowEmpty: spec.AllowEmpty,
Force: spec.Force,
Name: spec.Name,
Email: spec.Email,
})
if err != nil {
return tblToStats, fmt.Errorf("%w; failed to commit", err)
}
err = dEnv.ClearMerge(ctx)
if err != nil {
return tblToStats, err
}
return tblToStats, err
}
func executeMergeAndCommit(ctx context.Context, dEnv *env.DoltEnv, spec *merge.MergeSpec, suggestedMsg string) (map[string]*merge.MergeStats, error) {
tblToStats, err := merge.ExecuteMerge(ctx, dEnv, spec)
if err != nil {
return tblToStats, err
}
if hasConflictOrViolations(tblToStats) {
return tblToStats, nil
}
if spec.NoCommit {
cli.Println("Automatic merge went well; stopped before committing as requested")
return tblToStats, nil
}
msg, err := getCommitMsgForMerge(ctx, dEnv, spec.Msg, suggestedMsg, spec.NoEdit)
if err != nil {
return tblToStats, err
}
author := fmt.Sprintf("%s <%s>", spec.Name, spec.Email)
res := performCommit(ctx, "commit", []string{"-m", msg, "--author", author}, dEnv)
if res != 0 {
return nil, fmt.Errorf("dolt commit failed after merging")
}
return tblToStats, nil
}
// getCommitMsgForMerge returns user defined message if exists; otherwise, get the commit message from editor.
func getCommitMsgForMerge(ctx context.Context, dEnv *env.DoltEnv, userDefinedMsg, suggestedMsg string, noEdit bool) (string, error) {
if userDefinedMsg != "" {
return userDefinedMsg, nil
}
msg, err := getCommitMessageFromEditor(ctx, dEnv, suggestedMsg, "", noEdit)
if err != nil {
return msg, err
}
if msg == "" {
return msg, fmt.Errorf("error: Empty commit message.\n" +
"Not committing merge; use 'dolt commit' to complete the merge.")
}
return msg, nil
}
// hasConflictOrViolations checks for conflicts or constraint violation regardless of a table being modified
+19 -7
View File
@@ -80,24 +80,36 @@ func (cmd MergeBaseCmd) Exec(ctx context.Context, commandStr string, args []stri
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
left, verr := ResolveCommitWithVErr(dEnv, apr.Arg(0))
mergeBaseStr, verr := getMergeBaseFromStrings(ctx, dEnv, apr.Arg(0), apr.Arg(1))
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
right, verr := ResolveCommitWithVErr(dEnv, apr.Arg(1))
cli.Println(mergeBaseStr)
return 0
}
// getMergeBaseFromStrings resolves two revisions and returns the merge base
// commit hash string
func getMergeBaseFromStrings(ctx context.Context, dEnv *env.DoltEnv, leftStr, rightStr string) (string, errhand.VerboseError) {
left, verr := ResolveCommitWithVErr(dEnv, leftStr)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
return "", verr
}
right, verr := ResolveCommitWithVErr(dEnv, rightStr)
if verr != nil {
return "", verr
}
mergeBase, err := merge.MergeBase(ctx, left, right)
if err != nil {
verr = errhand.BuildDError("could not find merge-base for args %s", apr.Args).AddCause(err).Build()
return HandleVErrAndExitCode(verr, usage)
verr = errhand.BuildDError("could not find merge-base for args %s %s", leftStr, rightStr).AddCause(err).Build()
return "", verr
}
cli.Println(mergeBase.String())
return 0
return mergeBase.String(), nil
}
func ResolveCommitWithVErr(dEnv *env.DoltEnv, cSpecStr string) (*doltdb.Commit, errhand.VerboseError) {
+14 -13
View File
@@ -17,7 +17,7 @@ package commands
import (
"context"
"github.com/fatih/color"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
@@ -31,8 +31,7 @@ const (
migrationPrompt = `Run "dolt migrate" to update this database to the latest data format`
migrationMsg = "Migrating database to the latest data format"
migratePushFlag = "push"
migratePullFlag = "pull"
migrateDropConflictsFlag = "drop-conflicts"
)
var migrateDocs = cli.CommandDocumentationContent{
@@ -65,8 +64,7 @@ func (cmd MigrateCmd) Docs() *cli.CommandDocumentation {
func (cmd MigrateCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(migratePushFlag, "", "Push all migrated branches to the remote")
ap.SupportsFlag(migratePullFlag, "", "Update all local tracking refs for a migrated remote")
ap.SupportsFlag(migrateDropConflictsFlag, "", "Drop any conflicts visited during the migration")
return ap
}
@@ -81,12 +79,8 @@ func (cmd MigrateCmd) Exec(ctx context.Context, commandStr string, args []string
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, migrateDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.Contains(migratePushFlag) && apr.Contains(migratePullFlag) {
cli.PrintErrf(color.RedString("options --%s and --%s are mutually exclusive", migratePushFlag, migratePullFlag))
return 1
}
if err := MigrateDatabase(ctx, dEnv); err != nil {
dropConflicts := apr.Contains(migrateDropConflictsFlag)
if err := MigrateDatabase(ctx, dEnv, dropConflicts); err != nil {
verr := errhand.BuildDError("migration failed").AddCause(err).Build()
return HandleVErrAndExitCode(verr, usage)
}
@@ -94,18 +88,25 @@ func (cmd MigrateCmd) Exec(ctx context.Context, commandStr string, args []string
}
// MigrateDatabase migrates the NomsBinFormat of |dEnv.DoltDB|.
func MigrateDatabase(ctx context.Context, dEnv *env.DoltEnv) error {
func MigrateDatabase(ctx context.Context, dEnv *env.DoltEnv, dropConflicts bool) error {
menv, err := migrate.NewEnvironment(ctx, dEnv)
if err != nil {
return err
}
menv.DropConflicts = dropConflicts
if curr := menv.Existing.DoltDB.Format(); types.IsFormat_DOLT(curr) {
cli.Println("database is already migrated")
return nil
}
p, err := menv.Migration.FS.Abs(".")
if err != nil {
return err
}
cli.Println("migrating database at tmp dir: ", p)
err = migrate.TraverseDAG(ctx, menv.Existing.DoltDB, menv.Migration.DoltDB)
err = migrate.TraverseDAG(ctx, menv, menv.Existing.DoltDB, menv.Migration.DoltDB)
if err != nil {
return err
}
+1 -1
View File
@@ -112,7 +112,7 @@ func pullHelper(ctx context.Context, dEnv *env.DoltEnv, pullSpec *env.PullSpec)
// Fetch all references
branchRefs, err := srcDB.GetHeadRefs(ctx)
if err != nil {
return env.ErrFailedToReadDb
return fmt.Errorf("%w: %s", env.ErrFailedToReadDb, err.Error())
}
hasBranch, err := srcDB.HasBranch(ctx, pullSpec.Branch.GetPath())
+1 -3
View File
@@ -111,9 +111,7 @@ func (cmd PushCmd) Exec(ctx context.Context, commandStr string, args []string, d
remoteDB, err := opts.Remote.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), dEnv)
if err != nil {
if err == remotestorage.ErrInvalidDoltSpecPath {
err = actions.HandleInvalidDoltSpecPathErr(opts.Remote.Name, opts.Remote.Url, err)
}
err = actions.HandleInitRemoteStorageClientErr(opts.Remote.Name, opts.Remote.Url, err)
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
+7 -3
View File
@@ -94,6 +94,8 @@ func (cmd RemoteCmd) ArgParser() *argparser.ArgParser {
ap.SupportsValidatedString(dbfactory.AWSCredsTypeParam, "", "creds-type", "", argparser.ValidatorFromStrList(dbfactory.AWSCredsTypeParam, dbfactory.AWSCredTypes))
ap.SupportsString(dbfactory.AWSCredsFileParam, "", "file", "AWS credentials file")
ap.SupportsString(dbfactory.AWSCredsProfile, "", "profile", "AWS profile to use")
ap.SupportsString(dbfactory.OSSCredsFileParam, "", "file", "OSS credentials file")
ap.SupportsString(dbfactory.OSSCredsProfile, "", "profile", "OSS profile to use")
return ap
}
@@ -191,12 +193,14 @@ func parseRemoteArgs(apr *argparser.ArgParseResults, scheme, remoteUrl string) (
params := map[string]string{}
var err error
if scheme == dbfactory.AWSScheme {
switch scheme {
case dbfactory.AWSScheme:
err = cli.AddAWSParams(remoteUrl, apr, params)
} else {
case dbfactory.OSSScheme:
err = cli.AddOSSParams(remoteUrl, apr, params)
default:
err = cli.VerifyNoAwsParams(apr)
}
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
+1 -1
View File
@@ -124,7 +124,7 @@ func (cmd RootsCmd) processTableFile(ctx context.Context, path string, modified
defer rdCloser.Close()
return nbs.IterChunks(rdCloser.(io.ReadSeeker), func(chunk chunks.Chunk) (stop bool, err error) {
return nbs.IterChunks(ctx, rdCloser.(io.ReadSeeker), func(chunk chunks.Chunk) (stop bool, err error) {
//Want a clean db every loop
sp, _ := spec.ForDatabase("mem")
vrw := sp.GetVRW(ctx)
+2 -2
View File
@@ -151,14 +151,14 @@ func getGRPCEmitter(dEnv *env.DoltEnv) *events.GrpcEmitter {
}
hostAndPort := fmt.Sprintf("%s:%d", host, port)
endpoint, opts, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: hostAndPort,
Insecure: insecure,
})
if err != nil {
return nil
}
conn, err := grpc.Dial(endpoint, opts...)
conn, err := grpc.Dial(cfg.Endpoint, cfg.DialOptions...)
if err != nil {
return nil
}
+158 -43
View File
@@ -85,24 +85,26 @@ By default this command uses the dolt database in the current working directory,
var ErrMultipleDoltCfgDirs = errors.NewKind("multiple .doltcfg directories detected: '%s' and '%s'; pass one of the directories using option --doltcfg-dir")
const (
QueryFlag = "query"
FormatFlag = "result-format"
saveFlag = "save"
executeFlag = "execute"
listSavedFlag = "list-saved"
messageFlag = "message"
BatchFlag = "batch"
DataDirFlag = "data-dir"
MultiDBDirFlag = "multi-db-dir"
CfgDirFlag = "doltcfg-dir"
DefaultCfgDirName = ".doltcfg"
PrivsFilePathFlag = "privilege-file"
DefaultPrivsName = "privileges.db"
continueFlag = "continue"
fileInputFlag = "file"
UserFlag = "user"
DefaultUser = "root"
DefaultHost = "localhost"
QueryFlag = "query"
FormatFlag = "result-format"
saveFlag = "save"
executeFlag = "execute"
listSavedFlag = "list-saved"
messageFlag = "message"
BatchFlag = "batch"
DataDirFlag = "data-dir"
MultiDBDirFlag = "multi-db-dir"
CfgDirFlag = "doltcfg-dir"
DefaultCfgDirName = ".doltcfg"
PrivsFilePathFlag = "privilege-file"
BranchCtrlPathFlag = "branch-control-file"
DefaultPrivsName = "privileges.db"
DefaultBranchCtrlName = "branch_control.db"
continueFlag = "continue"
fileInputFlag = "file"
UserFlag = "user"
DefaultUser = "root"
DefaultHost = "localhost"
welcomeMsg = `# Welcome to the DoltSQL shell.
# Statements must be terminated with ';'.
@@ -155,8 +157,9 @@ func (cmd SqlCmd) ArgParser() *argparser.ArgParser {
ap.SupportsString(MultiDBDirFlag, "", "directory", "Defines a directory whose subdirectories should all be dolt data repositories accessible as independent databases within. Defaults to the current directory. This is deprecated, you should use `--data-dir` instead")
ap.SupportsString(CfgDirFlag, "", "directory", "Defines a directory that contains configuration files for dolt. Defaults to `$data-dir/.doltcfg`. Will only be created if there is a change that affect configuration settings.")
ap.SupportsFlag(continueFlag, "c", "Continue running queries on an error. Used for batch mode only.")
ap.SupportsString(fileInputFlag, "", "input file", "Execute statements from the file given.")
ap.SupportsString(fileInputFlag, "f", "input file", "Execute statements from the file given.")
ap.SupportsString(PrivsFilePathFlag, "", "privilege file", "Path to a file to load and store users and grants. Defaults to `$doltcfg-dir/privileges.db`. Will only be created if there is a change to privileges.")
ap.SupportsString(BranchCtrlPathFlag, "", "branch control file", "Path to a file to load and store branch control permissions. Defaults to `$doltcfg-dir/branch_control.db`. Will only be created if there is a change to branch control permissions.")
ap.SupportsString(UserFlag, "u", "user", fmt.Sprintf("Defines the local superuser (defaults to `%v`). If the specified user exists, will take on permissions of that user.", DefaultUser))
return ap
}
@@ -256,6 +259,15 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
}
// If no branch control file path is specified, default to doltcfg directory
branchControlFilePath, hasBCFilePath := apr.GetValue(BranchCtrlPathFlag)
if !hasBCFilePath {
branchControlFilePath, err = dEnv.FS.Abs(filepath.Join(cfgDirPath, DefaultBranchCtrlName))
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
}
initialRoots, err := mrEnv.GetWorkingRoots(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
@@ -278,13 +290,14 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
config := &engine.SqlEngineConfig{
InitialDb: currentDb,
IsReadOnly: false,
DoltCfgDirPath: cfgDirPath,
PrivFilePath: privsFp,
ServerUser: username,
ServerHost: DefaultHost,
Autocommit: true,
InitialDb: currentDb,
IsReadOnly: false,
DoltCfgDirPath: cfgDirPath,
PrivFilePath: privsFp,
BranchCtrlFilePath: branchControlFilePath,
ServerUser: username,
ServerHost: DefaultHost,
Autocommit: true,
}
if query, queryOK := apr.GetValue(QueryFlag); queryOK {
@@ -316,6 +329,14 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("couldn't open file %s", fileInput).Build(), usage)
}
info, err := os.Stat(fileInput)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("couldn't get file size %s", fileInput).Build(), usage)
}
// initialize fileReadProg global variable if there is a file to process queries from
fileReadProg = &fileReadProgress{bytesRead: 0, totalBytes: info.Size(), printed: 0, displayStrLen: 0}
defer fileReadProg.close()
}
if isTty {
@@ -598,7 +619,7 @@ func execQuery(
// Set client to specified user
sqlCtx.Session.SetClient(sql.Client{User: config.ServerUser, Address: config.ServerHost, Capabilities: 0})
sqlSch, rowIter, err := ProcessQuery(sqlCtx, query, se)
sqlSch, rowIter, err := processQuery(sqlCtx, query, se)
if err != nil {
return formatQueryError("", err)
}
@@ -778,9 +799,12 @@ func saveQuery(ctx context.Context, root *doltdb.RootValue, query string, name s
// runMultiStatementMode allows for the execution of more than one query, but it doesn't attempt any batch optimizations
func runMultiStatementMode(ctx *sql.Context, se *engine.SqlEngine, input io.Reader, continueOnErr bool) error {
scanner := NewSqlStatementScanner(input)
var query string
for scanner.Scan() {
if fileReadProg != nil {
updateFileReadProgressOutput()
fileReadProg.setReadBytes(int64(len(scanner.Bytes())))
}
query += scanner.Text()
if len(query) == 0 || query == "\n" {
continue
@@ -792,10 +816,20 @@ func runMultiStatementMode(ctx *sql.Context, se *engine.SqlEngine, input io.Read
shouldProcessQuery = false
}
if shouldProcessQuery {
sqlSch, rowIter, err := ProcessQuery(ctx, query, se)
sqlStatement, err := sqlparser.Parse(query)
if err == sqlparser.ErrEmpty {
continue
} else if err != nil {
handleError(scanner.statementStartLine, query, err)
// If continueOnErr is set keep executing the remaining queries but print the error out anyway.
if !continueOnErr {
return err
}
}
sqlSch, rowIter, err := processParsedQuery(ctx, query, se, sqlStatement)
if err != nil {
verr := formatQueryError(fmt.Sprintf("error on line %d for query %s", scanner.statementStartLine, query), err)
cli.PrintErrln(verr.Verbose())
handleError(scanner.statementStartLine, query, err)
// If continueOnErr is set keep executing the remaining queries but print the error out anyway.
if !continueOnErr {
return err
@@ -803,10 +837,18 @@ func runMultiStatementMode(ctx *sql.Context, se *engine.SqlEngine, input io.Read
}
if rowIter != nil {
switch sqlStatement.(type) {
case *sqlparser.Select, *sqlparser.Insert, *sqlparser.Update, *sqlparser.Delete,
*sqlparser.OtherRead, *sqlparser.Show, *sqlparser.Explain, *sqlparser.Union:
// For any statement that prints out result, print a newline to put the regular output on its own line
if fileReadProg != nil {
fileReadProg.printNewLineIfNeeded()
}
}
err = engine.PrettyPrintResults(ctx, se.GetResultFormat(), sqlSch, rowIter)
if err != nil {
err = fmt.Errorf("error executing query on line %d: %v", scanner.statementStartLine, err)
return errhand.VerboseErrorFromError(err)
handleError(scanner.statementStartLine, query, err)
return err
}
}
}
@@ -820,12 +862,21 @@ func runMultiStatementMode(ctx *sql.Context, se *engine.SqlEngine, input io.Read
return nil
}
func handleError(stmtStartLine int, query string, err error) {
verr := formatQueryError(fmt.Sprintf("error on line %d for query %s", stmtStartLine, query), err)
cli.PrintErrln(verr.Verbose())
}
// runBatchMode processes queries until EOF. The Root of the sqlEngine may be updated.
func runBatchMode(ctx *sql.Context, se *engine.SqlEngine, input io.Reader, continueOnErr bool) error {
scanner := NewSqlStatementScanner(input)
var query string
for scanner.Scan() {
if fileReadProg != nil {
updateFileReadProgressOutput()
fileReadProg.setReadBytes(int64(len(scanner.Bytes())))
}
query += scanner.Text()
if len(query) == 0 || query == "\n" {
continue
@@ -985,7 +1036,7 @@ func runShell(ctx context.Context, se *engine.SqlEngine, mrEnv *env.MultiRepoEnv
return false
}
if sqlSch, rowIter, err = ProcessQuery(sqlCtx, query, se); err != nil {
if sqlSch, rowIter, err = processQuery(sqlCtx, query, se); err != nil {
verr := formatQueryError("", err)
shell.Println(verr.Verbose())
} else if rowIter != nil {
@@ -1134,9 +1185,9 @@ func prepend(s string, ss []string) []string {
return newSs
}
// ProcessQuery processes a single query. The Root of the sqlEngine will be updated if necessary.
// processQuery processes a single query. The Root of the sqlEngine will be updated if necessary.
// Returns the schema and the row iterator for the results, which may be nil, and an error if one occurs.
func ProcessQuery(ctx *sql.Context, query string, se *engine.SqlEngine) (sql.Schema, sql.RowIter, error) {
func processQuery(ctx *sql.Context, query string, se *engine.SqlEngine) (sql.Schema, sql.RowIter, error) {
sqlStatement, err := sqlparser.Parse(query)
if err == sqlparser.ErrEmpty {
// silently skip empty statements
@@ -1144,7 +1195,13 @@ func ProcessQuery(ctx *sql.Context, query string, se *engine.SqlEngine) (sql.Sch
} else if err != nil {
return nil, nil, err
}
return processParsedQuery(ctx, query, se, sqlStatement)
}
// processParsedQuery processes a single query with the parsed statement provided. The Root of the sqlEngine
// will be updated if necessary. Returns the schema and the row iterator for the results, which may be nil,
// and an error if one occurs.
func processParsedQuery(ctx *sql.Context, query string, se *engine.SqlEngine, sqlStatement sqlparser.Statement) (sql.Schema, sql.RowIter, error) {
switch s := sqlStatement.(type) {
case *sqlparser.Use:
sch, ri, err := se.Query(ctx, query)
@@ -1195,10 +1252,18 @@ type stats struct {
rowsDeleted int
unflushedEdits int
unprintedEdits int
displayStrLen int
}
type fileReadProgress struct {
bytesRead int64
totalBytes int64
printed int64
displayStrLen int
}
var batchEditStats = &stats{}
var displayStrLen int
var fileReadProg *fileReadProgress
const maxBatchSize = 200000
const updateInterval = 1000
@@ -1215,6 +1280,34 @@ func (s *stats) shouldFlush() bool {
return s.unflushedEdits >= maxBatchSize
}
// printNewLineIfNeeded prints a new line when there are outputs printed other than its output line of batch read progress.
func (s *stats) printNewLineIfNeeded() {
if s.displayStrLen > 0 {
cli.Print("\n")
s.displayStrLen = 0
}
}
// close will print last updated line of processed 100.0% and a new line
func (f *fileReadProgress) close() {
f.bytesRead = f.totalBytes
updateFileReadProgressOutput()
cli.Println() // need a newline after all updates are executed
}
// setReadBytes updates number of bytes that are read so far from the file
func (f *fileReadProgress) setReadBytes(b int64) {
f.bytesRead = f.printed + b
}
// printNewLineIfNeeded prints a new line when there are outputs printed other than its output line of file read progress.
func (f *fileReadProgress) printNewLineIfNeeded() {
if f.displayStrLen > 0 {
cli.Print("\n")
f.displayStrLen = 0
}
}
func flushBatchedEdits(ctx *sql.Context, se *engine.SqlEngine) error {
err := se.IterDBs(func(_ string, db dsqle.SqlDatabase) (bool, error) {
_, rowIter, err := se.Query(ctx, "COMMIT;")
@@ -1296,7 +1389,7 @@ func processBatchQuery(ctx *sql.Context, query string, se *engine.SqlEngine) err
}
func processNonBatchableQuery(ctx *sql.Context, se *engine.SqlEngine, query string, sqlStatement sqlparser.Statement) (returnErr error) {
sqlSch, rowIter, err := ProcessQuery(ctx, query, se)
sqlSch, rowIter, err := processParsedQuery(ctx, query, se, sqlStatement)
if err != nil {
return err
}
@@ -1310,10 +1403,10 @@ func processNonBatchableQuery(ctx *sql.Context, se *engine.SqlEngine, query stri
// Some statement types should print results, even in batch mode.
switch sqlStatement.(type) {
case *sqlparser.Select, *sqlparser.OtherRead, *sqlparser.Show, *sqlparser.Explain, *sqlparser.Union:
if displayStrLen > 0 {
// If we've been printing in batch mode, print a newline to put the regular output on its own line
cli.Print("\n")
displayStrLen = 0
// For any statement that prints out result, print a newline to put the regular output on its own line
batchEditStats.printNewLineIfNeeded()
if fileReadProg != nil {
fileReadProg.printNewLineIfNeeded()
}
err = engine.PrettyPrintResults(ctx, se.GetResultFormat(), sqlSch, rowIter)
if err != nil {
@@ -1467,13 +1560,35 @@ func insertsIntoAutoIncrementCol(ctx *sql.Context, se *engine.SqlEngine, query s
return isAutoInc, nil
}
// updateBatchEditOutput will delete the line it printed before, and print the updated line.
// If there were other functions printed result, it will print update line on a new line.
// This function is used for only batch reads into dolt sql.
func updateBatchEditOutput() {
if fileReadProg != nil {
fileReadProg.printNewLineIfNeeded()
}
displayStr := fmt.Sprintf("Rows inserted: %d Rows updated: %d Rows deleted: %d",
batchEditStats.rowsInserted, batchEditStats.rowsUpdated, batchEditStats.rowsDeleted)
displayStrLen = cli.DeleteAndPrint(displayStrLen, displayStr)
batchEditStats.displayStrLen = cli.DeleteAndPrint(batchEditStats.displayStrLen, displayStr)
batchEditStats.unprintedEdits = 0
}
// updateFileReadProgressOutput will delete the line it printed before, and print the updated line.
// If there were other functions printed result, it will print update line on a new line.
// This function is used for only file reads for dolt sql when `--file` flag is used.
func updateFileReadProgressOutput() {
if fileReadProg == nil {
// this should not happen, but sanity check
cli.Println("No file is being processed.")
}
// batch can be writing to the line, so print new line.
batchEditStats.printNewLineIfNeeded()
percent := float64(fileReadProg.bytesRead) / float64(fileReadProg.totalBytes) * 100
fileReadProg.printed = fileReadProg.bytesRead
displayStr := fmt.Sprintf("Processed %.1f%% of the file", percent)
fileReadProg.displayStrLen = cli.DeleteAndPrint(fileReadProg.displayStrLen, displayStr)
}
// Updates the batch insert stats with the results of an INSERT, UPDATE, or DELETE statement.
func mergeResultIntoStats(ctx *sql.Context, statement sqlparser.Statement, rowIter sql.RowIter, s *stats) error {
switch statement.(type) {
+38 -42
View File
@@ -16,20 +16,19 @@ package commands
import (
"context"
"fmt"
"testing"
"github.com/dolthub/go-mysql-server/sql"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/store/types"
)
//var UUIDS = []uuid.UUID{
@@ -46,7 +45,9 @@ var tableName = "people"
// Smoke test: Console opens and exits
func TestSqlConsole(t *testing.T) {
t.Run("SQL console opens and exits", func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{}
commandStr := "dolt sql"
@@ -71,7 +72,8 @@ func TestSqlBatchMode(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-b", "-q", test.query}
@@ -108,7 +110,8 @@ func TestSqlSelect(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -132,7 +135,8 @@ func TestSqlShow(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -203,7 +207,8 @@ func TestShowTables(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
assert.NoError(t, err)
args := []string{"-q", test.query}
commandStr := "dolt sql"
@@ -232,7 +237,8 @@ func TestAlterTable(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
commandStr := "dolt sql"
@@ -257,7 +263,8 @@ func TestDropTable(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
commandStr := "dolt sql"
@@ -352,7 +359,7 @@ func TestInsert(t *testing.T) {
},
{
name: "missing required column",
query: `insert into people (id, name, age) values
query: `insert into people (id, title, age) values
('00000000-0000-0000-0000-000000000005', 'Frank Frankerson', 10)`,
expectedRes: 1,
},
@@ -373,7 +380,8 @@ func TestInsert(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -387,15 +395,10 @@ func TestInsert(t *testing.T) {
// Assert that all expected IDs exist after the insert
for _, expectedid := range test.expectedIds {
tbl, _, err := root.GetTable(ctx, tableName)
q := fmt.Sprintf("SELECT * FROM %s WHERE id = '%s'", tableName, expectedid.String())
rows, err := sqle.ExecuteSelect(dEnv, root, q)
assert.NoError(t, err)
taggedVals := row.TaggedValues{dtestutils.IdTag: types.String(expectedid.String())}
key := taggedVals.NomsTupleForPKCols(types.Format_Default, dtestutils.TypedSchema.GetPKCols())
kv, err := key.Value(ctx)
assert.NoError(t, err)
_, ok, err := table.GetRow(ctx, tbl, dtestutils.TypedSchema, kv.(types.Tuple))
assert.NoError(t, err)
assert.True(t, ok, "expected id not found")
assert.True(t, len(rows) > 0)
}
}
})
@@ -457,7 +460,8 @@ func TestUpdate(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
ctx := context.Background()
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -471,17 +475,11 @@ func TestUpdate(t *testing.T) {
// Assert that all rows have been updated
for i, expectedid := range test.expectedIds {
tbl, _, err := root.GetTable(ctx, tableName)
q := fmt.Sprintf("SELECT * FROM %s WHERE id = '%s'", tableName, expectedid.String())
rows, err := sqle.ExecuteSelect(dEnv, root, q)
assert.NoError(t, err)
taggedVals := row.TaggedValues{dtestutils.IdTag: types.String(expectedid.String())}
key := taggedVals.NomsTupleForPKCols(types.Format_Default, dtestutils.TypedSchema.GetPKCols())
kv, err := key.Value(ctx)
assert.NoError(t, err)
row, ok, err := table.GetRow(ctx, tbl, dtestutils.TypedSchema, kv.(types.Tuple))
assert.NoError(t, err)
assert.True(t, ok, "expected id not found")
ageVal, _ := row.GetColVal(dtestutils.AgeTag)
assert.Equal(t, test.expectedAges[i], uint(ageVal.(types.Uint)))
assert.True(t, len(rows) > 0)
assert.Equal(t, uint32(test.expectedAges[i]), rows[0][2])
}
}
})
@@ -535,11 +533,12 @@ func TestDelete(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
ctx := context.Background()
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
ctx := context.Background()
commandStr := "dolt sql"
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv)
assert.Equal(t, test.expectedRes, result)
@@ -550,15 +549,10 @@ func TestDelete(t *testing.T) {
// Assert that all rows have been deleted
for _, expectedid := range test.deletedIds {
tbl, _, err := root.GetTable(ctx, tableName)
q := fmt.Sprintf("SELECT * FROM %s WHERE id = '%s'", tableName, expectedid.String())
rows, err := sqle.ExecuteSelect(dEnv, root, q)
assert.NoError(t, err)
taggedVals := row.TaggedValues{dtestutils.IdTag: types.UUID(expectedid)}
key := taggedVals.NomsTupleForPKCols(types.Format_Default, dtestutils.TypedSchema.GetPKCols())
kv, err := key.Value(ctx)
assert.NoError(t, err)
_, ok, err := table.GetRow(ctx, tbl, dtestutils.TypedSchema, kv.(types.Tuple))
assert.NoError(t, err)
assert.False(t, ok, "row not deleted")
assert.True(t, len(rows) == 0)
}
}
})
@@ -566,7 +560,9 @@ func TestDelete(t *testing.T) {
}
func TestCommitHooksNoErrors(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
sqle.AddDoltSystemVariables()
sql.SystemVariables.SetGlobal(dsess.SkipReplicationErrors, true)
sql.SystemVariables.SetGlobal(dsess.ReplicateToRemote, "unknown")
+144 -33
View File
@@ -16,6 +16,8 @@ package sqlserver
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
@@ -35,6 +37,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/remotesrv"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
)
@@ -123,6 +126,11 @@ func Serve(
}
}
clusterController, err := cluster.NewController(lgr, serverConfig.ClusterConfig(), mrEnv.Config())
if err != nil {
return err, nil
}
serverConf, sErr, cErr := getConfigFromServerConfig(serverConfig)
if cErr != nil {
return nil, cErr
@@ -132,15 +140,17 @@ func Serve(
// Create SQL Engine with users
config := &engine.SqlEngineConfig{
InitialDb: "",
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
DoltCfgDirPath: serverConfig.CfgDir(),
ServerUser: serverConfig.User(),
ServerPass: serverConfig.Password(),
ServerHost: serverConfig.Host(),
Autocommit: serverConfig.AutoCommit(),
JwksConfig: serverConfig.JwksConfig(),
InitialDb: "",
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
BranchCtrlFilePath: serverConfig.BranchControlFilePath(),
DoltCfgDirPath: serverConfig.CfgDir(),
ServerUser: serverConfig.User(),
ServerPass: serverConfig.Password(),
ServerHost: serverConfig.Host(),
Autocommit: serverConfig.AutoCommit(),
JwksConfig: serverConfig.JwksConfig(),
ClusterController: clusterController,
}
sqlEngine, err := engine.NewSqlEngine(
ctx,
@@ -175,19 +185,31 @@ func Serve(
}
defer listener.Close()
mySQLServer, startError = server.NewServer(
serverConf,
sqlEngine.GetUnderlyingEngine(),
newSessionBuilder(sqlEngine, serverConfig),
listener,
)
v, ok := serverConfig.(validatingServerConfig)
if ok && v.goldenMysqlConnectionString() != "" {
mySQLServer, startError = server.NewValidatingServer(
serverConf,
sqlEngine.GetUnderlyingEngine(),
newSessionBuilder(sqlEngine, serverConfig),
listener,
v.goldenMysqlConnectionString(),
)
} else {
mySQLServer, startError = server.NewServer(
serverConf,
sqlEngine.GetUnderlyingEngine(),
newSessionBuilder(sqlEngine, serverConfig),
listener,
)
}
if startError != nil {
if errors.Is(startError, server.UnixSocketInUseError) {
lgr.Warn("unix socket set up failed: file already in use: ", serverConf.Socket)
} else if startError != nil {
cli.PrintErr(startError)
return
} else {
sqlserver.SetRunningServer(mySQLServer)
}
sqlserver.SetRunningServer(mySQLServer)
var metSrv *http.Server
if serverConfig.MetricsHost() != "" && serverConfig.MetricsPort() > 0 {
@@ -207,7 +229,18 @@ func Serve(
var remoteSrv *remotesrv.Server
if serverConfig.RemotesapiPort() != nil {
if remoteSrvSqlCtx, err := sqlEngine.NewContext(context.Background()); err == nil {
remoteSrv = sqle.NewRemoteSrvServer(logrus.NewEntry(lgr), remoteSrvSqlCtx, *serverConfig.RemotesapiPort())
args := sqle.RemoteSrvServerArgs(remoteSrvSqlCtx, remotesrv.ServerArgs{
Logger: logrus.NewEntry(lgr),
ReadOnly: true,
HttpPort: *serverConfig.RemotesapiPort(),
GrpcPort: *serverConfig.RemotesapiPort(),
})
remoteSrv, err = remotesrv.NewServer(args)
if err != nil {
lgr.Errorf("error creating remotesapi server on port %d: %v", *serverConfig.RemotesapiPort(), err)
startError = err
return
}
listeners, err := remoteSrv.Listeners()
if err != nil {
lgr.Errorf("error starting remotesapi server listeners on port %d: %v", *serverConfig.RemotesapiPort(), err)
@@ -225,6 +258,51 @@ func Serve(
}
}
var clusterRemoteSrv *remotesrv.Server
if clusterController != nil {
if remoteSrvSqlCtx, err := sqlEngine.NewContext(context.Background()); err == nil {
args := clusterController.RemoteSrvServerArgs(remoteSrvSqlCtx, remotesrv.ServerArgs{
Logger: logrus.NewEntry(lgr),
})
clusterRemoteSrvTLSConfig, err := LoadClusterTLSConfig(serverConfig.ClusterConfig())
if err != nil {
lgr.Errorf("error starting remotesapi server for cluster config, could not load tls config: %v", err)
startError = err
return
}
args.TLSConfig = clusterRemoteSrvTLSConfig
clusterRemoteSrv, err = remotesrv.NewServer(args)
if err != nil {
lgr.Errorf("error creating remotesapi server on port %d: %v", *serverConfig.RemotesapiPort(), err)
startError = err
return
}
listeners, err := clusterRemoteSrv.Listeners()
if err != nil {
lgr.Errorf("error starting remotesapi server listeners for cluster config on port %d: %v", clusterController.RemoteSrvPort(), err)
startError = err
return
}
go clusterRemoteSrv.Serve(listeners)
go clusterController.Run()
clusterController.ManageQueryConnections(
mySQLServer.SessionManager().Iter,
sqlEngine.GetUnderlyingEngine().ProcessList.Kill,
mySQLServer.SessionManager().KillConnection,
)
} else {
lgr.Errorf("error creating SQL engine context for remotesapi server: %v", err)
startError = err
return
}
}
if ok, f := mrEnv.IsLocked(); ok {
startError = env.ErrActiveServerLock.New(f)
return
@@ -241,6 +319,12 @@ func Serve(
if remoteSrv != nil {
remoteSrv.GracefulStop()
}
if clusterRemoteSrv != nil {
clusterRemoteSrv.GracefulStop()
}
if clusterController != nil {
clusterController.GracefulStop()
}
return mySQLServer.Close()
})
@@ -256,6 +340,22 @@ func Serve(
return
}
func LoadClusterTLSConfig(cfg cluster.Config) (*tls.Config, error) {
rcfg := cfg.RemotesAPIConfig()
if rcfg.TLSKey() == "" && rcfg.TLSCert() == "" {
return nil, nil
}
c, err := tls.LoadX509KeyPair(rcfg.TLSCert(), rcfg.TLSKey())
if err != nil {
return nil, err
}
return &tls.Config{
Certificates: []tls.Certificate{
c,
},
}, nil
}
func portInUse(hostPort string) bool {
timeout := time.Second
conn, _ := net.DialTimeout("tcp", hostPort, timeout)
@@ -366,21 +466,32 @@ func handleProtocolAndAddress(serverConfig ServerConfig) (server.Config, error)
}
serverConf.Address = hostPort
// if socket is defined with or without value -> unix
if serverConfig.Socket() != "" {
if runtime.GOOS == "windows" {
return server.Config{}, fmt.Errorf("cannot define unix socket file on Windows")
}
serverConf.Socket = serverConfig.Socket()
sock, useSock, err := checkForUnixSocket(serverConfig)
if err != nil {
return server.Config{}, err
}
if useSock {
serverConf.Socket = sock
}
// TODO : making it an "opt in" feature (just to start) and requiring users to pass in the `--socket` flag
// to turn them on instead of defaulting them on when host and port aren't set or host is set to `localhost`.
//} else {
// // if host is undefined or defined as "localhost" -> unix
// if shouldUseUnixSocket(serverConfig) {
// serverConf.Socket = defaultUnixSocketFilePath
// }
//}
return serverConf, nil
}
// checkForUnixSocket evaluates ServerConfig for whether the unix socket is to be used or not.
// If user defined socket flag or host is 'localhost', it returns the unix socket file location
// either user-defined or the default if it was not defined.
func checkForUnixSocket(config ServerConfig) (string, bool, error) {
if config.Socket() != "" {
if runtime.GOOS == "windows" {
return "", false, fmt.Errorf("cannot define unix socket file on Windows")
}
return config.Socket(), true, nil
} else {
// if host is undefined or defined as "localhost" -> unix
if runtime.GOOS != "windows" && config.Host() == "localhost" {
return defaultUnixSocketFilePath, true, nil
}
}
return "", false, nil
}
+29 -11
View File
@@ -27,9 +27,9 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils/testcommands"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/config"
)
@@ -62,6 +62,9 @@ var (
func TestServerArgs(t *testing.T) {
serverController := NewServerController()
go func() {
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
startServer(context.Background(), "0.0.0", "dolt sql-server", []string{
"-H", "localhost",
"-P", "15200",
@@ -70,7 +73,7 @@ func TestServerArgs(t *testing.T) {
"-t", "5",
"-l", "info",
"-r",
}, dtestutils.CreateEnvWithSeedData(t), serverController)
}, dEnv, serverController)
}()
err := serverController.WaitForStart()
require.NoError(t, err)
@@ -102,7 +105,9 @@ listener:
`
serverController := NewServerController()
go func() {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
dEnv.FS.WriteFile("config.yaml", []byte(yamlConfig))
startServer(context.Background(), "0.0.0", "dolt sql-server", []string{
"--config", "config.yaml",
@@ -120,7 +125,8 @@ listener:
}
func TestServerBadArgs(t *testing.T) {
env := dtestutils.CreateEnvWithSeedData(t)
env, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
tests := [][]string{
{"-H", "127.0.0.0.1"},
@@ -148,7 +154,8 @@ func TestServerBadArgs(t *testing.T) {
}
func TestServerGoodParams(t *testing.T) {
env := dtestutils.CreateEnvWithSeedData(t)
env, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
tests := []ServerConfig{
DefaultServerConfig(),
@@ -186,7 +193,9 @@ func TestServerGoodParams(t *testing.T) {
}
func TestServerSelect(t *testing.T) {
env := dtestutils.CreateEnvWithSeedData(t)
env, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15300)
sc := NewServerController()
@@ -194,7 +203,7 @@ func TestServerSelect(t *testing.T) {
go func() {
_, _ = Serve(context.Background(), "0.0.0", serverConfig, sc, env)
}()
err := sc.WaitForStart()
err = sc.WaitForStart()
require.NoError(t, err)
const dbName = "dolt"
@@ -243,6 +252,9 @@ func TestServerFailsIfPortInUse(t *testing.T) {
Addr: ":15200",
Handler: http.DefaultServeMux,
}
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
go server.ListenAndServe()
go func() {
startServer(context.Background(), "test", "dolt sql-server", []string{
@@ -253,15 +265,18 @@ func TestServerFailsIfPortInUse(t *testing.T) {
"-t", "5",
"-l", "info",
"-r",
}, dtestutils.CreateEnvWithSeedData(t), serverController)
}, dEnv, serverController)
}()
err := serverController.WaitForStart()
err = serverController.WaitForStart()
require.Error(t, err)
server.Close()
}
func TestServerSetDefaultBranch(t *testing.T) {
dEnv := dtestutils.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15302)
sc := NewServerController()
@@ -269,7 +284,7 @@ func TestServerSetDefaultBranch(t *testing.T) {
go func() {
_, _ = Serve(context.Background(), "0.0.0", serverConfig, sc, dEnv)
}()
err := sc.WaitForStart()
err = sc.WaitForStart()
require.NoError(t, err)
const dbName = "dolt"
@@ -414,6 +429,9 @@ func TestReadReplica(t *testing.T) {
sc := NewServerController()
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15303)
// set socket to nil to force tcp
serverConfig = serverConfig.WithHost("127.0.0.1").WithSocket("")
func() {
os.Chdir(multiSetup.DbPaths[readReplicaDbName])
go func() {
+82 -6
View File
@@ -16,12 +16,15 @@ package sqlserver
import (
"crypto/tls"
"errors"
"fmt"
"net"
"path/filepath"
"strings"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
)
// LogLevel defines the available levels of logging for the server.
@@ -51,6 +54,7 @@ const (
defaultDataDir = "."
defaultCfgDir = ".doltcfg"
defaultPrivilegeFilePath = "privileges.db"
defaultBranchControlFilePath = "branch_control.db"
defaultMetricsHost = ""
defaultMetricsPort = -1
defaultAllowCleartextPasswords = false
@@ -133,6 +137,8 @@ type ServerConfig interface {
// PrivilegeFilePath returns the path to the file which contains all needed privilege information in the form of a
// JSON string.
PrivilegeFilePath() string
// BranchControlFilePath returns the path to the file which contains the branch control permissions.
BranchControlFilePath() string
// UserVars is an array containing user specific session variables
UserVars() []UserSessionVars
// JwksConfig is an array containing jwks config
@@ -146,6 +152,15 @@ type ServerConfig interface {
// as a dolt remote for things like `clone`, `fetch` and read
// replication.
RemotesapiPort() *int
// ClusterConfig is the configuration for clustering in this sql-server.
ClusterConfig() cluster.Config
}
type validatingServerConfig interface {
ServerConfig
// goldenMysqlConnectionString returns a connection string for a mysql
// instance that can be used to validate query results
goldenMysqlConnectionString() string
}
type commandLineServerConfig struct {
@@ -167,9 +182,11 @@ type commandLineServerConfig struct {
requireSecureTransport bool
persistenceBehavior string
privilegeFilePath string
branchControlFilePath string
allowCleartextPasswords bool
socket string
remotesapiPort *int
goldenMysqlConn string
}
var _ ServerConfig = (*commandLineServerConfig)(nil)
@@ -273,12 +290,21 @@ func (cfg *commandLineServerConfig) RemotesapiPort() *int {
return cfg.remotesapiPort
}
func (cfg *commandLineServerConfig) ClusterConfig() cluster.Config {
return nil
}
// PrivilegeFilePath returns the path to the file which contains all needed privilege information in the form of a
// JSON string.
func (cfg *commandLineServerConfig) PrivilegeFilePath() string {
return cfg.privilegeFilePath
}
// BranchControlFilePath returns the path to the file which contains the branch control permissions.
func (cfg *commandLineServerConfig) BranchControlFilePath() string {
return cfg.branchControlFilePath
}
// UserVars is an array containing user specific session variables.
func (cfg *commandLineServerConfig) UserVars() []UserSessionVars {
return nil
@@ -399,6 +425,12 @@ func (cfg *commandLineServerConfig) withPrivilegeFilePath(privFilePath string) *
return cfg
}
// withBranchControlFilePath updates the path to the file which contains the branch control permissions
func (cfg *commandLineServerConfig) withBranchControlFilePath(branchControlFilePath string) *commandLineServerConfig {
cfg.branchControlFilePath = branchControlFilePath
return cfg
}
func (cfg *commandLineServerConfig) withAllowCleartextPasswords(allow bool) *commandLineServerConfig {
cfg.allowCleartextPasswords = allow
return cfg
@@ -416,6 +448,15 @@ func (cfg *commandLineServerConfig) WithRemotesapiPort(port *int) *commandLineSe
return cfg
}
func (cfg *commandLineServerConfig) goldenMysqlConnectionString() string {
return cfg.goldenMysqlConn
}
func (cfg *commandLineServerConfig) withGoldenMysqlConnectionString(cs string) *commandLineServerConfig {
cfg.goldenMysqlConn = cs
return cfg
}
// DefaultServerConfig creates a `*ServerConfig` that has all of the options set to their default values.
func DefaultServerConfig() *commandLineServerConfig {
return &commandLineServerConfig{
@@ -432,6 +473,7 @@ func DefaultServerConfig() *commandLineServerConfig {
dataDir: defaultDataDir,
cfgDir: filepath.Join(defaultDataDir, defaultCfgDir),
privilegeFilePath: filepath.Join(defaultDataDir, defaultCfgDir, defaultPrivilegeFilePath),
branchControlFilePath: filepath.Join(defaultDataDir, defaultCfgDir, defaultBranchControlFilePath),
allowCleartextPasswords: defaultAllowCleartextPasswords,
}
}
@@ -453,6 +495,40 @@ func ValidateConfig(config ServerConfig) error {
if config.RequireSecureTransport() && config.TLSCert() == "" && config.TLSKey() == "" {
return fmt.Errorf("require_secure_transport can only be `true` when a tls_key and tls_cert are provided.")
}
return ValidateClusterConfig(config.ClusterConfig())
}
func ValidateClusterConfig(config cluster.Config) error {
if config == nil {
return nil
}
remotes := config.StandbyRemotes()
if len(remotes) == 0 {
return errors.New("cluster config: must supply standby_remotes when supplying cluster configuration.")
}
for i := range remotes {
if remotes[i].Name() == "" {
return fmt.Errorf("cluster: standby_remotes[%d]: name: Cannot be empty", i)
}
if strings.Index(remotes[i].RemoteURLTemplate(), "{database}") == -1 {
return fmt.Errorf("cluster: standby_remotes[%d]: remote_url_template: is \"%s\" but must include the {database} template parameter", i, remotes[i].RemoteURLTemplate())
}
}
if config.BootstrapRole() != "" && config.BootstrapRole() != "primary" && config.BootstrapRole() != "standby" {
return fmt.Errorf("cluster: boostrap_role: is \"%s\" but must be \"primary\" or \"standby\"", config.BootstrapRole())
}
if config.BootstrapEpoch() < 0 {
return fmt.Errorf("cluster: boostrap_epoch: is %d but must be >= 0", config.BootstrapEpoch())
}
if config.RemotesAPIConfig().Port() < 0 || config.RemotesAPIConfig().Port() > 65535 {
return fmt.Errorf("cluster: remotesapi: port: is not in range 0-65535: %d", config.RemotesAPIConfig().Port())
}
if config.RemotesAPIConfig().TLSKey() == "" && config.RemotesAPIConfig().TLSCert() != "" {
return fmt.Errorf("cluster: remotesapi: tls_key: must supply a tls_key if you supply a tls_cert")
}
if config.RemotesAPIConfig().TLSKey() != "" && config.RemotesAPIConfig().TLSCert() == "" {
return fmt.Errorf("cluster: remotesapi: tls_cert: must supply a tls_cert if you supply a tls_key")
}
return nil
}
@@ -478,12 +554,12 @@ func ConnectionString(config ServerConfig, database string) string {
// ConfigInfo returns a summary of some of the config which contains some of the more important information
func ConfigInfo(config ServerConfig) string {
socket := ""
if config.Socket() != "" {
s := config.Socket()
if s == "" {
s = defaultUnixSocketFilePath
}
socket = fmt.Sprintf(`|S="%v"`, s)
sock, useSock, err := checkForUnixSocket(config)
if err != nil {
panic(err)
}
if useSock {
socket = fmt.Sprintf(`|S="%v"`, sock)
}
return fmt.Sprintf(`HP="%v:%v"|T="%v"|R="%v"|L="%v"%s`, config.Host(), config.Port(),
config.ReadTimeout(), config.ReadOnly(), config.LogLevel(), socket)
+128 -12
View File
@@ -19,6 +19,7 @@ import (
mysql "database/sql"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
@@ -27,8 +28,9 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/ishell"
"github.com/fatih/color"
_ "github.com/go-sql-driver/mysql"
mysqlDriver "github.com/go-sql-driver/mysql"
"github.com/gocraft/dbr/v2"
"github.com/gocraft/dbr/v2/dialect"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/commands"
@@ -39,7 +41,10 @@ import (
)
const (
sqlClientDualFlag = "dual"
sqlClientDualFlag = "dual"
sqlClientQueryFlag = "query"
sqlClientUseDbFlag = "use-db"
sqlClientResultFormat = "result-format"
)
var sqlClientDocs = cli.CommandDocumentationContent{
@@ -52,10 +57,13 @@ Similar to {{.EmphasisLeft}}dolt sql-server{{.EmphasisRight}}, this command may
Synopsis: []string{
"[-d] --config {{.LessThan}}file{{.GreaterThan}}",
"[-d] [-H {{.LessThan}}host{{.GreaterThan}}] [-P {{.LessThan}}port{{.GreaterThan}}] [-u {{.LessThan}}user{{.GreaterThan}}] [-p {{.LessThan}}password{{.GreaterThan}}] [-t {{.LessThan}}timeout{{.GreaterThan}}] [-l {{.LessThan}}loglevel{{.GreaterThan}}] [--data-dir {{.LessThan}}directory{{.GreaterThan}}] [--query-parallelism {{.LessThan}}num-go-routines{{.GreaterThan}}] [-r]",
"-q {{.LessThan}}string{{.GreaterThan}} [--use-db {{.LessThan}}db_name{{.GreaterThan}}] [--result-format {{.LessThan}}format{{.GreaterThan}}] [-H {{.LessThan}}host{{.GreaterThan}}] [-P {{.LessThan}}port{{.GreaterThan}}] [-u {{.LessThan}}user{{.GreaterThan}}] [-p {{.LessThan}}password{{.GreaterThan}}]",
},
}
type SqlClientCmd struct{}
type SqlClientCmd struct {
VersionStr string
}
var _ cli.Command = SqlClientCmd{}
@@ -75,6 +83,10 @@ func (cmd SqlClientCmd) Docs() *cli.CommandDocumentation {
func (cmd SqlClientCmd) ArgParser() *argparser.ArgParser {
ap := SqlServerCmd{}.ArgParser()
ap.SupportsFlag(sqlClientDualFlag, "d", "Causes this command to spawn a dolt server that is automatically connected to.")
ap.SupportsString(sqlClientQueryFlag, "q", "string", "Sends the given query to the server and immediately exits.")
ap.SupportsString(sqlClientUseDbFlag, "", "db_name", fmt.Sprintf("Selects the given database before executing a query. "+
"By default, uses the current folder's name. Must be used with the --%s flag.", sqlClientQueryFlag))
ap.SupportsString(sqlClientResultFormat, "", "format", fmt.Sprintf("Returns the results in the given format. Must be used with the --%s flag.", sqlClientQueryFlag))
return ap
}
@@ -110,6 +122,18 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
cli.PrintErrln(err.Error())
return 1
}
if apr.Contains(sqlClientQueryFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, sqlClientQueryFlag)))
return 1
}
if apr.Contains(sqlClientUseDbFlag) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, sqlClientUseDbFlag)))
return 1
}
if apr.Contains(sqlClientResultFormat) {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s flag may not be used with --%s", sqlClientDualFlag, sqlClientResultFormat)))
return 1
}
serverConfig, err = GetServerConfig(dEnv, apr)
if err != nil {
@@ -126,7 +150,7 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
serverController = NewServerController()
go func() {
_, _ = Serve(ctx, SqlServerCmd{}.VersionStr, serverConfig, serverController, dEnv)
_, _ = Serve(ctx, cmd.VersionStr, serverConfig, serverController, dEnv)
}()
err = serverController.WaitForStart()
if err != nil {
@@ -142,17 +166,109 @@ func (cmd SqlClientCmd) Exec(ctx context.Context, commandStr string, args []stri
}
}
conn, err := dbr.Open("mysql", ConnectionString(serverConfig, ""), nil)
if err != nil {
cli.PrintErrln(err.Error())
serverController.StopServer()
err = serverController.WaitForClose()
if err != nil {
cli.PrintErrln(err.Error())
}
query, hasQuery := apr.GetValue(sqlClientQueryFlag)
dbToUse, hasUseDb := apr.GetValue(sqlClientUseDbFlag)
resultFormat, hasResultFormat := apr.GetValue(sqlClientResultFormat)
if !hasQuery && hasUseDb {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", sqlClientUseDbFlag, sqlClientQueryFlag)))
return 1
} else if !hasQuery && hasResultFormat {
cli.PrintErrln(color.RedString(fmt.Sprintf("--%s may only be used with --%s", sqlClientUseDbFlag, sqlClientResultFormat)))
return 1
}
if !hasUseDb && hasQuery {
directory, err := os.Getwd()
if err != nil {
cli.PrintErrln(color.RedString(err.Error()))
return 1
}
dbToUse = strings.Replace(filepath.Base(directory), "-", "_", -1)
}
format := engine.FormatTabular
if hasResultFormat {
switch strings.ToLower(resultFormat) {
case "tabular":
format = engine.FormatTabular
case "csv":
format = engine.FormatCsv
case "json":
format = engine.FormatJson
case "null":
format = engine.FormatNull
case "vertical":
format = engine.FormatVertical
default:
cli.PrintErrln(color.RedString(fmt.Sprintf("unknown --%s value: %s", sqlClientResultFormat, resultFormat)))
return 1
}
}
// The standard DSN parser cannot handle a forward slash in the database name, so we have to workaround it.
// See the original issue: https://github.com/dolthub/dolt/issues/4623
parsedMySQLConfig, err := mysqlDriver.ParseDSN(ConnectionString(serverConfig, "no_database"))
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
parsedMySQLConfig.DBName = dbToUse
mysqlConnector, err := mysqlDriver.NewConnector(parsedMySQLConfig)
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
conn := &dbr.Connection{DB: mysql.OpenDB(mysqlConnector), EventReceiver: nil, Dialect: dialect.MySQL}
_ = conn.Ping()
if hasQuery {
defer conn.Close()
if apr.Contains(noAutoCommitFlag) {
_, err = conn.Exec("set @@autocommit = off;")
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
}
scanner := commands.NewSqlStatementScanner(strings.NewReader(query))
query = ""
for scanner.Scan() {
query += scanner.Text()
if len(query) == 0 || query == "\n" {
continue
}
rows, err := conn.Query(query)
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
if rows != nil {
sqlCtx := sql.NewContext(ctx)
wrapper, err := NewMysqlRowWrapper(rows)
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
defer wrapper.Close(sqlCtx)
if wrapper.HasMoreRows() {
err = engine.PrettyPrintResults(sqlCtx, format, wrapper.Schema(), wrapper)
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
}
}
query = ""
}
if err = scanner.Err(); err != nil {
cli.PrintErrln(err.Error())
return 1
}
return 0
}
ticker := time.NewTicker(time.Second * 10)
go func() {
for range ticker.C {
+24 -1
View File
@@ -48,6 +48,7 @@ const (
allowCleartextPasswordsFlag = "allow-cleartext-passwords"
socketFlag = "socket"
remotesapiPortFlag = "remotesapi-port"
goldenMysqlConn = "golden"
)
func indentLines(s string) string {
@@ -138,7 +139,7 @@ func (cmd SqlServerCmd) ArgParser() *argparser.ArgParser {
ap.SupportsString(passwordFlag, "p", "password", fmt.Sprintf("Defines the server password. Defaults to `%v`.", serverConfig.Password()))
ap.SupportsInt(timeoutFlag, "t", "connection timeout", fmt.Sprintf("Defines the timeout, in seconds, used for connections\nA value of `0` represents an infinite timeout. Defaults to `%v`.", serverConfig.ReadTimeout()))
ap.SupportsFlag(readonlyFlag, "r", "Disable modification of the database.")
ap.SupportsString(logLevelFlag, "l", "log level", fmt.Sprintf("Defines the level of logging provided\nOptions are: `trace', `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `%v`.", serverConfig.LogLevel()))
ap.SupportsString(logLevelFlag, "l", "log level", fmt.Sprintf("Defines the level of logging provided\nOptions are: `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `%v`.", serverConfig.LogLevel()))
ap.SupportsString(commands.DataDirFlag, "", "directory", "Defines a directory whose subdirectories should all be dolt data repositories accessible as independent databases within. Defaults to the current directory.")
ap.SupportsString(commands.MultiDBDirFlag, "", "directory", "Defines a directory whose subdirectories should all be dolt data repositories accessible as independent databases within. Defaults to the current directory. This is deprecated, you should use `--data-dir` instead.")
ap.SupportsString(commands.CfgDirFlag, "", "directory", "Defines a directory that contains configuration files for dolt. Defaults to `$data-dir/.doltcfg`. Will only be created if there is a change that affect configuration settings.")
@@ -147,9 +148,11 @@ func (cmd SqlServerCmd) ArgParser() *argparser.ArgParser {
ap.SupportsInt(maxConnectionsFlag, "", "max-connections", fmt.Sprintf("Set the number of connections handled by the server. Defaults to `%d`.", serverConfig.MaxConnections()))
ap.SupportsString(persistenceBehaviorFlag, "", "persistence-behavior", fmt.Sprintf("Indicate whether to `load` or `ignore` persisted global variables. Defaults to `%s`.", serverConfig.PersistenceBehavior()))
ap.SupportsString(commands.PrivsFilePathFlag, "", "privilege file", "Path to a file to load and store users and grants. Defaults to `$doltcfg-dir/privileges.db`. Will only be created if there is a change to privileges.")
ap.SupportsString(commands.BranchCtrlPathFlag, "", "branch control file", "Path to a file to load and store branch control permissions. Defaults to `$doltcfg-dir/branch_control.db`. Will only be created if there is a change to branch control permissions.")
ap.SupportsString(allowCleartextPasswordsFlag, "", "allow-cleartext-passwords", "Allows use of cleartext passwords. Defaults to false.")
ap.SupportsOptionalString(socketFlag, "", "socket file", "Path for the unix socket file. Defaults to '/tmp/mysql.sock'.")
ap.SupportsUint(remotesapiPortFlag, "", "remotesapi port", "Sets the port for a server which can expose the databases in this sql-server over remotesapi.")
ap.SupportsString(goldenMysqlConn, "", "mysql connection string", "Provides a connection string to a MySQL instance to be user to validate query results")
return ap
}
@@ -255,6 +258,11 @@ func GetServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (ServerC
yamlCfg.UserConfig.Password = &pass
}
if connStr, ok := apr.GetValue(goldenMysqlConn); ok {
cli.Println(connStr)
yamlCfg.GoldenMysqlConn = &connStr
}
return yamlCfg, nil
}
@@ -320,6 +328,16 @@ func SetupDoltConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResults, config S
serverConfig.withPrivilegeFilePath(path)
}
if branchControlFilePath, ok := apr.GetValue(commands.BranchCtrlPathFlag); ok {
serverConfig.withBranchControlFilePath(branchControlFilePath)
} else {
path, err := dEnv.FS.Abs(filepath.Join(cfgDirPath, commands.DefaultBranchCtrlName))
if err != nil {
return err
}
serverConfig.withBranchControlFilePath(path)
}
return nil
}
@@ -422,6 +440,11 @@ func getCommandLineServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResult
serverConfig.autoCommit = !apr.Contains(noAutoCommitFlag)
serverConfig.allowCleartextPasswords = apr.Contains(allowCleartextPasswordsFlag)
if connStr, ok := apr.GetValue(goldenMysqlConn); ok {
cli.Println(connStr)
serverConfig.withGoldenMysqlConnectionString(connStr)
}
return serverConfig, nil
}
+106 -2
View File
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
)
func strPtr(s string) *string {
@@ -114,7 +115,11 @@ type MetricsYAMLConfig struct {
}
type RemotesapiYAMLConfig struct {
Port *int `yaml:"port"`
Port_field *int `yaml:"port"`
}
func (r RemotesapiYAMLConfig) Port() int {
return *r.Port_field
}
type UserSessionVars struct {
@@ -134,12 +139,16 @@ type YAMLConfig struct {
CfgDirStr *string `yaml:"cfg_dir"`
MetricsConfig MetricsYAMLConfig `yaml:"metrics"`
RemotesapiConfig RemotesapiYAMLConfig `yaml:"remotesapi"`
ClusterCfg *ClusterYAMLConfig `yaml:"cluster"`
PrivilegeFile *string `yaml:"privilege_file"`
BranchControlFile *string `yaml:"branch_control_file"`
Vars []UserSessionVars `yaml:"user_session_vars"`
Jwks []engine.JwksConfig `yaml:"jwks"`
GoldenMysqlConn *string `yaml:"golden_mysql_conn"`
}
var _ ServerConfig = YAMLConfig{}
var _ validatingServerConfig = YAMLConfig{}
func NewYamlConfig(configFileData []byte) (YAMLConfig, error) {
var cfg YAMLConfig
@@ -340,7 +349,7 @@ func (cfg YAMLConfig) MetricsPort() int {
}
func (cfg YAMLConfig) RemotesapiPort() *int {
return cfg.RemotesapiConfig.Port
return cfg.RemotesapiConfig.Port_field
}
// PrivilegeFilePath returns the path to the file which contains all needed privilege information in the form of a
@@ -352,6 +361,14 @@ func (cfg YAMLConfig) PrivilegeFilePath() string {
return filepath.Join(cfg.CfgDir(), defaultPrivilegeFilePath)
}
// BranchControlFilePath returns the path to the file which contains the branch control permissions.
func (cfg YAMLConfig) BranchControlFilePath() string {
if cfg.BranchControlFile != nil {
return *cfg.BranchControlFile
}
return filepath.Join(cfg.CfgDir(), defaultBranchControlFilePath)
}
// UserVars is an array containing user specific session variables
func (cfg YAMLConfig) UserVars() []UserSessionVars {
if cfg.Vars != nil {
@@ -444,3 +461,90 @@ func (cfg YAMLConfig) Socket() string {
}
return *cfg.ListenerConfig.Socket
}
func (cfg YAMLConfig) goldenMysqlConnectionString() (s string) {
if cfg.GoldenMysqlConn != nil {
s = *cfg.GoldenMysqlConn
}
return
}
func (cfg YAMLConfig) ClusterConfig() cluster.Config {
if cfg.ClusterCfg == nil {
return nil
}
return cfg.ClusterCfg
}
type ClusterYAMLConfig struct {
StandbyRemotes_field []standbyRemoteYAMLConfig `yaml:"standby_remotes"`
BootstrapRole_field string `yaml:"bootstrap_role"`
BootstrapEpoch_field int `yaml:"bootstrap_epoch"`
Remotesapi clusterRemotesAPIYAMLConfig `yaml:"remotesapi"`
}
type standbyRemoteYAMLConfig struct {
Name_field string `yaml:"name"`
RemoteURLTemplate_field string `yaml:"remote_url_template"`
}
func (c standbyRemoteYAMLConfig) Name() string {
return c.Name_field
}
func (c standbyRemoteYAMLConfig) RemoteURLTemplate() string {
return c.RemoteURLTemplate_field
}
func (c *ClusterYAMLConfig) StandbyRemotes() []cluster.StandbyRemoteConfig {
ret := make([]cluster.StandbyRemoteConfig, len(c.StandbyRemotes_field))
for i := range c.StandbyRemotes_field {
ret[i] = c.StandbyRemotes_field[i]
}
return ret
}
func (c *ClusterYAMLConfig) BootstrapRole() string {
return c.BootstrapRole_field
}
func (c *ClusterYAMLConfig) BootstrapEpoch() int {
return c.BootstrapEpoch_field
}
func (c *ClusterYAMLConfig) RemotesAPIConfig() cluster.RemotesAPIConfig {
return c.Remotesapi
}
type clusterRemotesAPIYAMLConfig struct {
Port_ int `yaml:"port"`
TLSKey_ string `yaml:"tls_key"`
TLSCert_ string `yaml:"tls_cert"`
TLSCA_ string `yaml:"tls_ca"`
URLMatches []string `yaml:"server_name_urls"`
DNSMatches []string `yaml:"server_name_dns"`
}
func (c clusterRemotesAPIYAMLConfig) Port() int {
return c.Port_
}
func (c clusterRemotesAPIYAMLConfig) TLSKey() string {
return c.TLSKey_
}
func (c clusterRemotesAPIYAMLConfig) TLSCert() string {
return c.TLSCert_
}
func (c clusterRemotesAPIYAMLConfig) TLSCA() string {
return c.TLSCA_
}
func (c clusterRemotesAPIYAMLConfig) ServerNameURLMatches() []string {
return c.URLMatches
}
func (c clusterRemotesAPIYAMLConfig) ServerNameDNSMatches() []string {
return c.DNSMatches
}
@@ -161,6 +161,136 @@ remotesapi:
require.Equal(t, 8000, *config.RemotesapiPort())
}
func TestUnmarshallCluster(t *testing.T) {
testStr := `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://doltdb-1.doltdb:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`
config, err := NewYamlConfig([]byte(testStr))
require.NoError(t, err)
require.NotNil(t, config.ClusterConfig())
require.NotNil(t, config.ClusterConfig().RemotesAPIConfig())
require.Equal(t, 50051, config.ClusterConfig().RemotesAPIConfig().Port())
require.Len(t, config.ClusterConfig().StandbyRemotes(), 1)
require.Equal(t, "primary", config.ClusterConfig().BootstrapRole())
require.Equal(t, 0, config.ClusterConfig().BootstrapEpoch())
require.Equal(t, "standby", config.ClusterConfig().StandbyRemotes()[0].Name())
require.Equal(t, "http://doltdb-1.doltdb:50051/{database}", config.ClusterConfig().StandbyRemotes()[0].RemoteURLTemplate())
}
func TestValidateClusterConfig(t *testing.T) {
cases := []struct {
Name string
Config string
Error bool
}{
{
Name: "no cluster: config",
Config: "",
Error: false,
},
{
Name: "all fields valid",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: false,
},
{
Name: "bad bootstrap_role",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: backup
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: true,
},
{
Name: "negative bootstrap_epoch",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: primary
bootstrap_epoch: -1
remotesapi:
port: 50051
`,
Error: true,
},
{
Name: "negative remotesapi port",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database}
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: -5
`,
Error: true,
},
{
Name: "bad remote_url_template",
Config: `
cluster:
standby_remotes:
- name: standby
remote_url_template: http://localhost:50051/{database
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: true,
},
{
Name: "no standby remotes",
Config: `
cluster:
standby_remotes:
bootstrap_role: primary
bootstrap_epoch: 0
remotesapi:
port: 50051
`,
Error: true,
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
cfg, err := NewYamlConfig([]byte(c.Config))
require.NoError(t, err)
if c.Error {
require.Error(t, ValidateClusterConfig(cfg.ClusterConfig()))
} else {
require.NoError(t, ValidateClusterConfig(cfg.ClusterConfig()))
}
})
}
}
// Tests that a common YAML error (incorrect indentation) throws an error
func TestUnmarshallError(t *testing.T) {
testStr := `
+1 -1
View File
@@ -219,7 +219,7 @@ func printRemoteRefTrackingInfo(ctx context.Context, dEnv *env.DoltEnv) error {
// countCommitsInRange returns the number of commits between the given starting point to trace back to the given target point.
// The starting commit must be a descendant of the target commit. Target commit must be a common ancestor commit.
func countCommitsInRange(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash, targetCommitHash hash.Hash) (int, error) {
itr, iErr := commitwalk.GetTopologicalOrderIterator(ctx, ddb, startCommitHash)
itr, iErr := commitwalk.GetTopologicalOrderIterator(ctx, ddb, []hash.Hash{startCommitHash}, nil)
if iErr != nil {
return 0, iErr
}
+32 -30
View File
@@ -62,7 +62,8 @@ const (
primaryKeyParam = "pk"
fileTypeParam = "file-type"
delimParam = "delim"
ignoreSkippedRows = "ignore-skipped-rows"
quiet = "quiet"
ignoreSkippedRows = "ignore-skipped-rows" // alias for quiet
disableFkChecks = "disable-fk-checks"
)
@@ -74,7 +75,7 @@ The schema for the new table can be specified explicitly by providing a SQL sche
If {{.EmphasisLeft}}--update-table | -u{{.EmphasisRight}} is given the operation will update {{.LessThan}}table{{.GreaterThan}} with the contents of file. The table's existing schema will be used, and field names will be used to match file fields with table fields unless a mapping file is specified.
During import, if there is an error importing any row, the import will be aborted by default. Use the {{.EmphasisLeft}}--continue{{.EmphasisRight}} flag to continue importing when an error is encountered. You can add the {{.EmphasisLeft}}--ignore-skipped-rows{{.EmphasisRight}} flag to prevent the import utility from printing all the skipped rows.
During import, if there is an error importing any row, the import will be aborted by default. Use the {{.EmphasisLeft}}--continue{{.EmphasisRight}} flag to continue importing when an error is encountered. You can add the {{.EmphasisLeft}}--quiet{{.EmphasisRight}} flag to prevent the import utility from printing all the skipped rows.
If {{.EmphasisLeft}}--replace-table | -r{{.EmphasisRight}} is given the operation will replace {{.LessThan}}table{{.GreaterThan}} with the contents of the file. The table's existing schema will be used, and field names will be used to match file fields with table fields unless a mapping file is specified.
@@ -87,8 +88,8 @@ A mapping file can be used to map fields between the file being imported and the
In create, update, and replace scenarios the file's extension is used to infer the type of the file. If a file does not have the expected extension then the {{.EmphasisLeft}}--file-type{{.EmphasisRight}} parameter should be used to explicitly define the format of the file in one of the supported formats (csv, psv, json, xlsx). For files separated by a delimiter other than a ',' (type csv) or a '|' (type psv), the --delim parameter can be used to specify a delimiter`,
Synopsis: []string{
"-c [-f] [--pk {{.LessThan}}field{{.GreaterThan}}] [--schema {{.LessThan}}file{{.GreaterThan}}] [--map {{.LessThan}}file{{.GreaterThan}}] [--continue] [--ignore-skipped-rows] [--disable-fk-checks] [--file-type {{.LessThan}}type{{.GreaterThan}}] {{.LessThan}}table{{.GreaterThan}} {{.LessThan}}file{{.GreaterThan}}",
"-u [--map {{.LessThan}}file{{.GreaterThan}}] [--continue] [--ignore-skipped-rows] [--file-type {{.LessThan}}type{{.GreaterThan}}] {{.LessThan}}table{{.GreaterThan}} {{.LessThan}}file{{.GreaterThan}}",
"-c [-f] [--pk {{.LessThan}}field{{.GreaterThan}}] [--schema {{.LessThan}}file{{.GreaterThan}}] [--map {{.LessThan}}file{{.GreaterThan}}] [--continue] [--quiet] [--disable-fk-checks] [--file-type {{.LessThan}}type{{.GreaterThan}}] {{.LessThan}}table{{.GreaterThan}} {{.LessThan}}file{{.GreaterThan}}",
"-u [--map {{.LessThan}}file{{.GreaterThan}}] [--continue] [--quiet] [--file-type {{.LessThan}}type{{.GreaterThan}}] {{.LessThan}}table{{.GreaterThan}} {{.LessThan}}file{{.GreaterThan}}",
"-r [--map {{.LessThan}}file{{.GreaterThan}}] [--file-type {{.LessThan}}type{{.GreaterThan}}] {{.LessThan}}table{{.GreaterThan}} {{.LessThan}}file{{.GreaterThan}}",
},
}
@@ -96,17 +97,17 @@ In create, update, and replace scenarios the file's extension is used to infer t
var bitTypeRegex = regexp.MustCompile(`(?m)b\'(\d+)\'`)
type importOptions struct {
operation mvdata.TableImportOp
destTableName string
contOnErr bool
force bool
schFile string
primaryKeys []string
nameMapper rowconv.NameMapper
src mvdata.DataLocation
srcOptions interface{}
ignoreSkippedRows bool
disableFkChecks bool
operation mvdata.TableImportOp
destTableName string
contOnErr bool
force bool
schFile string
primaryKeys []string
nameMapper rowconv.NameMapper
src mvdata.DataLocation
srcOptions interface{}
quiet bool
disableFkChecks bool
}
func (m importOptions) IsBatched() bool {
@@ -168,7 +169,7 @@ func getImportMoveOptions(ctx context.Context, apr *argparser.ArgParseResults, d
schemaFile, _ := apr.GetValue(schemaParam)
force := apr.Contains(forceParam)
contOnErr := apr.Contains(contOnErrParam)
ignore := apr.Contains(ignoreSkippedRows)
quiet := apr.Contains(quiet)
disableFks := apr.Contains(disableFkChecks)
val, _ := apr.GetValue(primaryKeyParam)
@@ -238,17 +239,17 @@ func getImportMoveOptions(ctx context.Context, apr *argparser.ArgParseResults, d
}
return &importOptions{
operation: moveOp,
destTableName: tableName,
contOnErr: contOnErr,
force: force,
schFile: schemaFile,
nameMapper: colMapper,
primaryKeys: pks,
src: srcLoc,
srcOptions: srcOpts,
ignoreSkippedRows: ignore,
disableFkChecks: disableFks,
operation: moveOp,
destTableName: tableName,
contOnErr: contOnErr,
force: force,
schFile: schemaFile,
nameMapper: colMapper,
primaryKeys: pks,
src: srcLoc,
srcOptions: srcOpts,
quiet: quiet,
disableFkChecks: disableFks,
}, nil
}
@@ -337,7 +338,8 @@ func (cmd ImportCmd) ArgParser() *argparser.ArgParser {
ap.SupportsFlag(forceParam, "f", "If a create operation is being executed, data already exists in the destination, the force flag will allow the target to be overwritten.")
ap.SupportsFlag(replaceParam, "r", "Replace existing table with imported data while preserving the original schema.")
ap.SupportsFlag(contOnErrParam, "", "Continue importing when row import errors are encountered.")
ap.SupportsFlag(ignoreSkippedRows, "", "Ignore the skipped rows printed by the --continue flag.")
ap.SupportsFlag(quiet, "", "Suppress any warning messages about invalid rows when using the --continue flag.")
ap.SupportsAlias(ignoreSkippedRows, quiet)
ap.SupportsFlag(disableFkChecks, "", "Disables foreign key checks.")
ap.SupportsString(schemaParam, "s", "schema_file", "The schema for the output data.")
ap.SupportsString(mappingFileParam, "m", "mapping_file", "A file that lays out how fields should be mapped from input data to output data.")
@@ -524,8 +526,8 @@ func move(ctx context.Context, rd table.SqlRowReader, wr *mvdata.SqlEngineTableW
return true
}
// Don't log the skipped rows when the ignore-skipped-rows param is specified.
if options.ignoreSkippedRows {
// Don't log the skipped rows when asked to suppress warning output
if options.quiet {
return false
}
+2 -2
View File
@@ -1,4 +1,4 @@
// Copyright 2019 Dolthub, Inc.
// Copyright 2019-2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,5 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// dolt is a command line tool for working with dolt data repositories stored in noms.
// dolt is the command line tool for working with Dolt databases.
package main
+2 -2
View File
@@ -57,7 +57,7 @@ import (
)
const (
Version = "0.41.4"
Version = "0.51.3"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
@@ -73,7 +73,7 @@ var doltCommand = cli.NewSubCommandHandler("dolt", "it's git for data", []cli.Co
commands.SqlCmd{VersionStr: Version},
admin.Commands,
sqlserver.SqlServerCmd{VersionStr: Version},
sqlserver.SqlClientCmd{},
sqlserver.SqlClientCmd{VersionStr: Version},
commands.LogCmd{},
commands.BranchCmd{},
commands.CheckoutCmd{},
File diff suppressed because it is too large Load Diff
+1
View File
@@ -36,6 +36,7 @@ const TableSchemaFileID = "DSCH"
const ForeignKeyCollectionFileID = "DFKC"
const MergeArtifactsFileID = "ARTM"
const BlobFileID = "BLOB"
const BranchControlFileID = "BRCL"
const MessageTypesKind int = 27
+16 -1
View File
@@ -144,7 +144,19 @@ func (rcv *RootValue) MutateForeignKeyAddr(j int, n byte) bool {
return false
}
const RootValueNumFields = 3
func (rcv *RootValue) Collation() Collation {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return Collation(rcv._tab.GetUint16(o + rcv._tab.Pos))
}
return 0
}
func (rcv *RootValue) MutateCollation(n Collation) bool {
return rcv._tab.MutateUint16Slot(10, uint16(n))
}
const RootValueNumFields = 4
func RootValueStart(builder *flatbuffers.Builder) {
builder.StartObject(RootValueNumFields)
@@ -164,6 +176,9 @@ func RootValueAddForeignKeyAddr(builder *flatbuffers.Builder, foreignKeyAddr fla
func RootValueStartForeignKeyAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func RootValueAddCollation(builder *flatbuffers.Builder, collation Collation) {
builder.PrependUint16Slot(3, uint16(collation), 0)
}
func RootValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
+33 -1
View File
@@ -649,7 +649,33 @@ func (rcv *Index) MutateSystemDefined(n bool) bool {
return rcv._tab.MutateBoolSlot(18, n)
}
const IndexNumFields = 8
func (rcv *Index) PrefixLengths(j int) uint16 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetUint16(a + flatbuffers.UOffsetT(j*2))
}
return 0
}
func (rcv *Index) PrefixLengthsLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *Index) MutatePrefixLengths(j int, n uint16) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(20))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateUint16(a+flatbuffers.UOffsetT(j*2), n)
}
return false
}
const IndexNumFields = 9
func IndexStart(builder *flatbuffers.Builder) {
builder.StartObject(IndexNumFields)
@@ -687,6 +713,12 @@ func IndexAddUniqueKey(builder *flatbuffers.Builder, uniqueKey bool) {
func IndexAddSystemDefined(builder *flatbuffers.Builder, systemDefined bool) {
builder.PrependBoolSlot(7, systemDefined, false)
}
func IndexAddPrefixLengths(builder *flatbuffers.Builder, prefixLengths flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(prefixLengths), 0)
}
func IndexStartPrefixLengthsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(2, numElems, 2)
}
func IndexEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
@@ -21,11 +21,12 @@
package remotesapi
import (
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
@@ -8,6 +8,7 @@ package remotesapi
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
+12 -12
View File
@@ -10,14 +10,12 @@ require (
github.com/aws/aws-sdk-go v1.32.6
github.com/bcicen/jstream v1.0.0
github.com/boltdb/bolt v1.3.1
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/denisbrodbeck/machineid v1.0.1
github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi v0.0.0-20201005193433-3ee972b1d078
github.com/dolthub/fslock v0.0.3
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20220915235715-9064d89c3f99
github.com/dolthub/vitess v0.0.0-20221116234926-60a2fd96afae
github.com/dustin/go-humanize v1.0.0
github.com/fatih/color v1.13.0
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -25,7 +23,7 @@ require (
github.com/gocraft/dbr/v2 v2.7.2
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.1
github.com/google/go-cmp v0.5.8
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.2.0
github.com/jpillora/backoff v1.0.0
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d
@@ -39,16 +37,16 @@ require (
github.com/silvasur/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/sirupsen/logrus v1.8.1
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
github.com/stretchr/testify v1.7.1
github.com/stretchr/testify v1.8.0
github.com/tealeg/xlsx v1.0.5
github.com/tklauser/go-sysconf v0.3.9 // indirect
go.uber.org/zap v1.15.0
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261
golang.org/x/sys v0.1.0
google.golang.org/api v0.32.0
google.golang.org/grpc v1.37.0
google.golang.org/grpc v1.49.0
google.golang.org/protobuf v1.27.1
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/src-d/go-errors.v1 v1.0.0
@@ -56,11 +54,14 @@ require (
)
require (
github.com/dolthub/go-mysql-server v0.12.1-0.20220920170952-3c7de0f3b7dd
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/go-mysql-server v0.14.1-0.20221117215505-afa6d1c3c02c
github.com/google/flatbuffers v2.0.6+incompatible
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/mitchellh/go-ps v1.0.0
github.com/pquerna/cachecontrol v0.1.0
github.com/prometheus/client_golang v1.11.0
github.com/shirou/gopsutil/v3 v3.22.1
github.com/vbauerster/mpb v3.4.0+incompatible
@@ -76,6 +77,7 @@ require (
go.opentelemetry.io/otel/trace v1.7.0
golang.org/x/text v0.3.7
gonum.org/v1/plot v0.11.0
gopkg.in/yaml.v3 v3.0.1
)
require (
@@ -86,7 +88,6 @@ require (
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-fonts/liberation v0.2.0 // indirect
@@ -116,7 +117,6 @@ require (
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/stretchr/objx v0.2.0 // indirect
github.com/tklauser/numcpus v0.3.0 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.opencensus.io v0.22.4 // indirect
@@ -127,11 +127,11 @@ require (
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
golang.org/x/tools v0.1.10 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210506142907-4a47615972c2 // indirect
gopkg.in/yaml.v3 v3.0.0 // indirect
)
replace (
+21 -20
View File
@@ -79,6 +79,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible h1:QoRMR0TCctLDqBCMyOu1eXdZyMw3F7uGA9qPn2J4+R8=
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
@@ -128,8 +130,9 @@ github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
@@ -160,6 +163,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creasty/defaults v1.6.0 h1:ltuE9cfphUtlrBeomuu8PEyISTXnxqkBIoQfXgv7BSc=
github.com/creasty/defaults v1.6.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM=
github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -175,18 +180,16 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20220920170952-3c7de0f3b7dd h1:ppijRWae4E9chm1ieNlIMNKLc34P6I1/mbdim0y9Syg=
github.com/dolthub/go-mysql-server v0.12.1-0.20220920170952-3c7de0f3b7dd/go.mod h1:JJtUZL+JLCimxbLiu4SqjgFefbxjvfpY7Z0i7Kcnm20=
github.com/dolthub/go-mysql-server v0.14.1-0.20221117215505-afa6d1c3c02c h1:n7zzhmZ96rv5gMgn1OhODE/m7zADWd+RyxOqwabom1I=
github.com/dolthub/go-mysql-server v0.14.1-0.20221117215505-afa6d1c3c02c/go.mod h1:z8i7fusnVa0hic93c/58X5tyF4lRj0e4yk3BhO7M0JY=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474/go.mod h1:kMz7uXOXq4qRriCEyZ/LUeTqraLJCjf0WVZcUi6TxUY=
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66 h1:WRPDbpJWEnPxPmiuOTndT+lUWUeGjx6eoNOK9O4tQQQ=
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66/go.mod h1:N5ZIbMGuDUpTpOFQ7HcsN6WSIpTGQjHP+Mz27AfmAgk=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/vitess v0.0.0-20220915235715-9064d89c3f99 h1:XF5dT33M4olcTUzA0XynHdT3eISI0ecfVnEhZc+CFtE=
github.com/dolthub/vitess v0.0.0-20220915235715-9064d89c3f99/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dolthub/vitess v0.0.0-20221116234926-60a2fd96afae h1:3S1jX5/x+3S6Mhg9j2gpC4MxwRGw7RMEdJIo1c03g00=
github.com/dolthub/vitess v0.0.0-20221116234926-60a2fd96afae/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -199,7 +202,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
@@ -593,8 +595,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -694,8 +694,8 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -703,8 +703,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
github.com/tealeg/xlsx v1.0.5 h1:+f8oFmvY8Gw1iUXzPk+kz+4GpbDZPK1FhPiQRd+ypgE=
@@ -922,7 +923,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -979,8 +979,8 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY=
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -996,6 +996,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1168,8 +1169,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1219,8 +1220,8 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -0,0 +1,379 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package branch_control
import (
"fmt"
"math"
"strings"
"sync"
"github.com/dolthub/go-mysql-server/sql"
flatbuffers "github.com/google/flatbuffers/go"
"github.com/dolthub/dolt/go/gen/fb/serial"
)
// Permissions are a set of flags that denote a user's allowed functionality on a branch.
type Permissions uint64
const (
Permissions_Admin Permissions = 1 << iota // Permissions_Admin grants unrestricted control over a branch, including modification of table entries
Permissions_Write // Permissions_Write allows for all modifying operations on a branch, but does not allow modification of table entries
)
// Access contains all of the expressions that comprise the "dolt_branch_control" table, which handles write Access to
// branches, along with write access to the branch control system tables.
type Access struct {
binlog *Binlog
Databases []MatchExpression
Branches []MatchExpression
Users []MatchExpression
Hosts []MatchExpression
Values []AccessValue
RWMutex *sync.RWMutex
}
// AccessValue contains the user-facing values of a particular row, along with the permissions for a row.
type AccessValue struct {
Database string
Branch string
User string
Host string
Permissions Permissions
}
// newAccess returns a new Access.
func newAccess() *Access {
return &Access{
binlog: NewAccessBinlog(nil),
Databases: nil,
Branches: nil,
Users: nil,
Hosts: nil,
Values: nil,
RWMutex: &sync.RWMutex{},
}
}
// Match returns whether any entries match the given database, branch, user, and host, along with their permissions.
// Requires external synchronization handling, therefore manually manage the RWMutex.
func (tbl *Access) Match(database string, branch string, user string, host string) (bool, Permissions) {
filteredIndexes := Match(tbl.Users, user, sql.Collation_utf8mb4_0900_bin)
filteredHosts := tbl.filterHosts(filteredIndexes)
indexPool.Put(filteredIndexes)
filteredIndexes = Match(filteredHosts, host, sql.Collation_utf8mb4_0900_ai_ci)
matchExprPool.Put(filteredHosts)
filteredDatabases := tbl.filterDatabases(filteredIndexes)
indexPool.Put(filteredIndexes)
filteredIndexes = Match(filteredDatabases, database, sql.Collation_utf8mb4_0900_ai_ci)
matchExprPool.Put(filteredDatabases)
filteredBranches := tbl.filterBranches(filteredIndexes)
indexPool.Put(filteredIndexes)
filteredIndexes = Match(filteredBranches, branch, sql.Collation_utf8mb4_0900_ai_ci)
matchExprPool.Put(filteredBranches)
bRes, pRes := len(filteredIndexes) > 0, tbl.gatherPermissions(filteredIndexes)
indexPool.Put(filteredIndexes)
return bRes, pRes
}
// GetIndex returns the index of the given branch, user, and host expressions. If the expressions cannot be found,
// returns -1. Assumes that the given expressions have already been folded. Requires external synchronization handling,
// therefore manually manage the RWMutex.
func (tbl *Access) GetIndex(databaseExpr string, branchExpr string, userExpr string, hostExpr string) int {
for i, value := range tbl.Values {
if value.Database == databaseExpr && value.Branch == branchExpr && value.User == userExpr && value.Host == hostExpr {
return i
}
}
return -1
}
// GetBinlog returns the table's binlog.
func (tbl *Access) GetBinlog() *Binlog {
return tbl.binlog
}
// Serialize returns the offset for the Access table written to the given builder.
func (tbl *Access) Serialize(b *flatbuffers.Builder) flatbuffers.UOffsetT {
tbl.RWMutex.RLock()
defer tbl.RWMutex.RUnlock()
// Serialize the binlog
binlog := tbl.binlog.Serialize(b)
// Initialize field offset slices
databaseOffsets := make([]flatbuffers.UOffsetT, len(tbl.Databases))
branchOffsets := make([]flatbuffers.UOffsetT, len(tbl.Branches))
userOffsets := make([]flatbuffers.UOffsetT, len(tbl.Users))
hostOffsets := make([]flatbuffers.UOffsetT, len(tbl.Hosts))
valueOffsets := make([]flatbuffers.UOffsetT, len(tbl.Values))
// Get field offsets
for i, matchExpr := range tbl.Databases {
databaseOffsets[i] = matchExpr.Serialize(b)
}
for i, matchExpr := range tbl.Branches {
branchOffsets[i] = matchExpr.Serialize(b)
}
for i, matchExpr := range tbl.Users {
userOffsets[i] = matchExpr.Serialize(b)
}
for i, matchExpr := range tbl.Hosts {
hostOffsets[i] = matchExpr.Serialize(b)
}
for i, val := range tbl.Values {
valueOffsets[i] = val.Serialize(b)
}
// Get the field vectors
serial.BranchControlAccessStartDatabasesVector(b, len(databaseOffsets))
for i := len(databaseOffsets) - 1; i >= 0; i-- {
b.PrependUOffsetT(databaseOffsets[i])
}
databases := b.EndVector(len(databaseOffsets))
serial.BranchControlAccessStartBranchesVector(b, len(branchOffsets))
for i := len(branchOffsets) - 1; i >= 0; i-- {
b.PrependUOffsetT(branchOffsets[i])
}
branches := b.EndVector(len(branchOffsets))
serial.BranchControlAccessStartUsersVector(b, len(userOffsets))
for i := len(userOffsets) - 1; i >= 0; i-- {
b.PrependUOffsetT(userOffsets[i])
}
users := b.EndVector(len(userOffsets))
serial.BranchControlAccessStartHostsVector(b, len(hostOffsets))
for i := len(hostOffsets) - 1; i >= 0; i-- {
b.PrependUOffsetT(hostOffsets[i])
}
hosts := b.EndVector(len(hostOffsets))
serial.BranchControlAccessStartValuesVector(b, len(valueOffsets))
for i := len(valueOffsets) - 1; i >= 0; i-- {
b.PrependUOffsetT(valueOffsets[i])
}
values := b.EndVector(len(valueOffsets))
// Write the table
serial.BranchControlAccessStart(b)
serial.BranchControlAccessAddBinlog(b, binlog)
serial.BranchControlAccessAddDatabases(b, databases)
serial.BranchControlAccessAddBranches(b, branches)
serial.BranchControlAccessAddUsers(b, users)
serial.BranchControlAccessAddHosts(b, hosts)
serial.BranchControlAccessAddValues(b, values)
return serial.BranchControlAccessEnd(b)
}
// Deserialize populates the table with the data from the flatbuffers representation.
func (tbl *Access) Deserialize(fb *serial.BranchControlAccess) error {
tbl.RWMutex.Lock()
defer tbl.RWMutex.Unlock()
// Verify that the table is empty
if len(tbl.Values) != 0 {
return fmt.Errorf("cannot deserialize to a non-empty access table")
}
// Verify that all fields have the same length
if fb.DatabasesLength() != fb.BranchesLength() ||
fb.BranchesLength() != fb.UsersLength() ||
fb.UsersLength() != fb.HostsLength() ||
fb.HostsLength() != fb.ValuesLength() {
return fmt.Errorf("cannot deserialize an access table with differing field lengths")
}
// Read the binlog
binlog, err := fb.TryBinlog(nil)
if err != nil {
return err
}
if err = tbl.binlog.Deserialize(binlog); err != nil {
return err
}
// Initialize every slice
tbl.Databases = make([]MatchExpression, fb.DatabasesLength())
tbl.Branches = make([]MatchExpression, fb.BranchesLength())
tbl.Users = make([]MatchExpression, fb.UsersLength())
tbl.Hosts = make([]MatchExpression, fb.HostsLength())
tbl.Values = make([]AccessValue, fb.ValuesLength())
// Read the databases
for i := 0; i < fb.DatabasesLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Databases(serialMatchExpr, i)
tbl.Databases[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the branches
for i := 0; i < fb.BranchesLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Branches(serialMatchExpr, i)
tbl.Branches[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the users
for i := 0; i < fb.UsersLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Users(serialMatchExpr, i)
tbl.Users[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the hosts
for i := 0; i < fb.HostsLength(); i++ {
serialMatchExpr := &serial.BranchControlMatchExpression{}
fb.Hosts(serialMatchExpr, i)
tbl.Hosts[i] = deserializeMatchExpression(serialMatchExpr)
}
// Read the values
for i := 0; i < fb.ValuesLength(); i++ {
serialAccessValue := &serial.BranchControlAccessValue{}
fb.Values(serialAccessValue, i)
tbl.Values[i] = AccessValue{
Database: string(serialAccessValue.Database()),
Branch: string(serialAccessValue.Branch()),
User: string(serialAccessValue.User()),
Host: string(serialAccessValue.Host()),
Permissions: Permissions(serialAccessValue.Permissions()),
}
}
return nil
}
// filterDatabases returns all databases that match the given collection indexes.
func (tbl *Access) filterDatabases(filters []uint32) []MatchExpression {
if len(filters) == 0 {
return nil
}
matchExprs := matchExprPool.Get().([]MatchExpression)[:0]
for _, filter := range filters {
matchExprs = append(matchExprs, tbl.Databases[filter])
}
return matchExprs
}
// filterBranches returns all branches that match the given collection indexes.
func (tbl *Access) filterBranches(filters []uint32) []MatchExpression {
if len(filters) == 0 {
return nil
}
matchExprs := matchExprPool.Get().([]MatchExpression)[:0]
for _, filter := range filters {
matchExprs = append(matchExprs, tbl.Branches[filter])
}
return matchExprs
}
// filterUsers returns all users that match the given collection indexes.
func (tbl *Access) filterUsers(filters []uint32) []MatchExpression {
if len(filters) == 0 {
return nil
}
matchExprs := matchExprPool.Get().([]MatchExpression)[:0]
for _, filter := range filters {
matchExprs = append(matchExprs, tbl.Users[filter])
}
return matchExprs
}
// filterHosts returns all hosts that match the given collection indexes.
func (tbl *Access) filterHosts(filters []uint32) []MatchExpression {
if len(filters) == 0 {
return nil
}
matchExprs := matchExprPool.Get().([]MatchExpression)[:0]
for _, filter := range filters {
matchExprs = append(matchExprs, tbl.Hosts[filter])
}
return matchExprs
}
// gatherPermissions combines all permissions from the given collection indexes and returns the result.
func (tbl *Access) gatherPermissions(collectionIndexes []uint32) Permissions {
perms := Permissions(0)
for _, collectionIndex := range collectionIndexes {
perms |= tbl.Values[collectionIndex].Permissions
}
return perms
}
// insertDefaultRow adds a row that allows all users to access and modify all branches, but does not allow them to
// modify any branch control tables. This was the default behavior of Dolt before the introduction of branch permissions.
func (tbl *Access) insertDefaultRow() {
// Check if the appropriate row already exists
for _, value := range tbl.Values {
if value.Database == "%" && value.Branch == "%" && value.User == "%" && value.Host == "%" {
// Getting to this state will be disallowed in the future, but if the row exists without any perms, then add
// the Write perm
if uint64(value.Permissions) == 0 {
value.Permissions = Permissions_Write
}
return
}
}
tbl.insert("%", "%", "%", "%", Permissions_Write)
}
// insert adds the given expressions to the table. This does not perform any sort of validation whatsoever, so it is
// important to ensure that the expressions are valid before insertion.
func (tbl *Access) insert(database string, branch string, user string, host string, perms Permissions) {
// Database, Branch, and Host are case-insensitive, while User is case-sensitive
database = strings.ToLower(FoldExpression(database))
branch = strings.ToLower(FoldExpression(branch))
user = FoldExpression(user)
host = strings.ToLower(FoldExpression(host))
// Each expression is capped at 2¹⁶-1 values, so we truncate to 2¹⁶-2 and add the any-match character at the end if it's over
if len(database) > math.MaxUint16 {
database = string(append([]byte(database[:math.MaxUint16-1]), byte('%')))
}
if len(branch) > math.MaxUint16 {
branch = string(append([]byte(branch[:math.MaxUint16-1]), byte('%')))
}
if len(user) > math.MaxUint16 {
user = string(append([]byte(user[:math.MaxUint16-1]), byte('%')))
}
if len(host) > math.MaxUint16 {
host = string(append([]byte(host[:math.MaxUint16-1]), byte('%')))
}
// Add the expression strings to the binlog
tbl.binlog.Insert(database, branch, user, host, uint64(perms))
// Parse and insert the expressions
databaseExpr := ParseExpression(database, sql.Collation_utf8mb4_0900_ai_ci)
branchExpr := ParseExpression(branch, sql.Collation_utf8mb4_0900_ai_ci)
userExpr := ParseExpression(user, sql.Collation_utf8mb4_0900_bin)
hostExpr := ParseExpression(host, sql.Collation_utf8mb4_0900_ai_ci)
nextIdx := uint32(len(tbl.Values))
tbl.Databases = append(tbl.Databases, MatchExpression{CollectionIndex: nextIdx, SortOrders: databaseExpr})
tbl.Branches = append(tbl.Branches, MatchExpression{CollectionIndex: nextIdx, SortOrders: branchExpr})
tbl.Users = append(tbl.Users, MatchExpression{CollectionIndex: nextIdx, SortOrders: userExpr})
tbl.Hosts = append(tbl.Hosts, MatchExpression{CollectionIndex: nextIdx, SortOrders: hostExpr})
tbl.Values = append(tbl.Values, AccessValue{
Database: database,
Branch: branch,
User: user,
Host: host,
Permissions: perms,
})
}
// Serialize returns the offset for the AccessValue written to the given builder.
func (val *AccessValue) Serialize(b *flatbuffers.Builder) flatbuffers.UOffsetT {
database := b.CreateString(val.Database)
branch := b.CreateString(val.Branch)
user := b.CreateString(val.User)
host := b.CreateString(val.Host)
serial.BranchControlAccessValueStart(b)
serial.BranchControlAccessValueAddDatabase(b, database)
serial.BranchControlAccessValueAddBranch(b, branch)
serial.BranchControlAccessValueAddUser(b, user)
serial.BranchControlAccessValueAddHost(b, host)
serial.BranchControlAccessValueAddPermissions(b, uint64(val.Permissions))
return serial.BranchControlAccessValueEnd(b)
}

Some files were not shown because too many files have changed in this diff Show More