Merge remote-tracking branch 'origin/main' into aaron/nbs-table-file-finalizer-panic-always-on

This commit is contained in:
Aaron Son
2023-02-28 21:58:40 -08:00
173 changed files with 6638 additions and 3938 deletions
+109
View File
@@ -0,0 +1,109 @@
import os
import shutil
import sys
import random
if len(sys.argv) != 7:
print("usage: python3 data.py <output-dir> <table-num> <row-num> <add-num> <delete-num> <update-num>")
sys.exit(1)
table_dir = sys.argv[1]
tables = int(sys.argv[2])
rows = int(sys.argv[3])
adds = int(sys.argv[4])
deletes = int(sys.argv[5])
updates = int(sys.argv[6])
if __name__=="__main__":
if deletes + updates > rows:
raise ValueError(f"deletes({deletes}) + updates({updates}) = {updates+deletes} > total rows({rows})")
if not os.path.exists(table_dir):
shutil.rmtree(table_dir, ignore_errors=True)
os.makedirs(table_dir)
ys = [i for i in range(rows+adds+deletes+updates)]
random.shuffle(ys)
with open(f"{table_dir}/create.sql", "+w") as f:
for i in range(tables):
if i == 0:
f.write(f"create table table{i} (x int primary key, y int, z int, key y_idx(y));\n")
else:
f.write(f"create table table{i} (x int primary key, y int, z int, key y_idx(y), foreign key (y) references table{i-1}(y));\n")
for j in range(tables):
with open(f"{table_dir}/table{j}.csv", "+w") as f:
f.write("x,y,z\n")
for i in range(rows+deletes+updates):
f.write(f"{i},{ys[i]},{i}\n")
with open(f"{table_dir}/branch.sql", "+w") as f:
for i in range(tables):
f.write(f"set foreign_key_checks = 0;\n")
f.write(f"set unique_checks = 0;\n")
if adds > 0:
f.write(f"insert into table{i} values\n")
for j,k in enumerate(ys[rows+deletes+updates:rows+deletes+updates+adds]):
if j == 0:
f.write(f" ")
else:
f.write(f", ")
f.write(f"({rows+deletes+updates+j},{k},{rows+deletes+updates+j})")
f.write(f";\n")
if deletes > 0:
f.write(f"delete from table{i} where x in\n")
for j, y in enumerate(ys[:deletes]):
if j == 0:
f.write(f" (")
else:
f.write(f", ")
f.write(f"{y}")
f.write(f");\n")
if updates > 0:
f.write(f"update table{i} set y=y+1 where x in\n")
for j, y in enumerate(ys[deletes:deletes+updates]):
if j == 0:
f.write(f" (")
else:
f.write(f", ")
f.write(f"{y}")
f.write(f");\n")
with open(f"{table_dir}/diverge_main.sql", "+w") as f:
for i in range(tables):
f.write(f"set foreign_key_checks = 0;\n")
f.write(f"set unique_checks = 0;\n")
if adds > 0:
# y value is one higher, conflict
f.write(f"insert into table{i} values\n")
for j,k in enumerate(ys[rows+deletes+updates:rows+deletes+updates+adds]):
if j == 0:
f.write(f" ")
else:
f.write(f", ")
f.write(f"({rows+deletes+updates+j},{k+1},{rows+deletes+updates+j})")
f.write(f";\n")
if deletes > 0:
f.write(f"delete from table{i} where y in\n")
for j, y in enumerate(ys[:deletes]):
if j == 0:
f.write(f" (")
else:
f.write(f", ")
f.write(f"{y}")
f.write(f");\n")
if updates > 0:
f.write(f"update table{i} set y=y+1 where y in\n")
for j, y in enumerate(ys[deletes:deletes+updates]):
if j == 0:
f.write(f" (")
else:
f.write(f", ")
f.write(f"{y}")
f.write(f");\n")
+37
View File
@@ -0,0 +1,37 @@
#!/bin/bash
if [ "$#" -ne 2 ]; then
echo "usage: setup.sh <dolt-dir> <data-dir>"
exit 1
fi
DIR=$1
DATA=$2
rm -rf $DIR
mkdir $DIR
cd $DIR
dolt init
dolt sql < $DATA/create.sql
i=0
for t in $(ls $DATA/ | grep "table"); do
echo $t
dolt table import --disable-fk-checks -u "table${i}" "$DATA/$t"
((i++))
done
dolt commit -Am "add tables"
dolt sql < $DATA/diverge_main.sql
dolt commit -Am "add rows to conflict"
dolt checkout -b feature
dolt reset --hard head~1
dolt sql < $DATA/branch.sql
dolt commit -Am "new branch"
+1 -1
View File
@@ -53,7 +53,7 @@ jobs:
fi
- name: Configure AWS Credentials
if: ${{ env.use_credentials == 'true' }}
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+2 -2
View File
@@ -85,7 +85,7 @@ jobs:
fi
- name: Configure AWS Credentials
if: ${{ env.use_credentials == 'true' }}
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -159,7 +159,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+1 -1
View File
@@ -19,7 +19,7 @@ jobs:
uses: ./.github/actions/orm-tests
- name: Configure AWS Credentials
if: ${{ failure() }}
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+1 -1
View File
@@ -11,7 +11,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -65,7 +65,7 @@ jobs:
{
"version": "${{ steps.comment-branch.outputs.head_sha }}",
"run_file": "ci.yaml",
"summary": "summary.yaml",
"summary": "summary.sql",
"report": "three_way_compare.sql",
"commit_to_branch": "${{ steps.comment-branch.outputs.head_sha }}",
"actor": "${{ github.actor }}",
+1 -1
View File
@@ -134,7 +134,7 @@ jobs:
- name: Configure AWS Credentials
if: ${{ github.event.client_payload.email_recipient }} != ""
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+1 -1
View File
@@ -24,7 +24,7 @@ jobs:
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+1 -1
View File
@@ -18,7 +18,7 @@ jobs:
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+1 -1
View File
@@ -17,7 +17,7 @@ jobs:
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+70
View File
@@ -0,0 +1,70 @@
name: Run Merge Benchmark on Pull Requests
on:
pull_request:
types: [ opened ]
issue_comment:
types: [ created ]
jobs:
validate-commentor:
runs-on: ubuntu-22.04
outputs:
valid: ${{ steps.set_valid.outputs.valid }}
steps:
- uses: actions/checkout@v3
- name: Validate Commentor
id: set_valid
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
env:
ACTOR: ${{ github.actor }}
check-comments:
runs-on: ubuntu-22.04
needs: validate-commentor
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
outputs:
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
comment-body: ${{ steps.set_body.outputs.body }}
steps:
- name: Check for Deploy Trigger
uses: dolthub/pull-request-comment-trigger@master
id: check
with:
trigger: '#merge-benchmark'
reaction: rocket
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set Benchmark
if: ${{ steps.check.outputs.triggered == 'true' }}
id: set_benchmark
run: |
echo "benchmark=true" >> $GITHUB_OUTPUT
performance:
runs-on: ubuntu-22.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Trigger Benchmark Merge Workflow
steps:
- uses: dolthub/pull-request-comment-branch@v3
id: comment-branch
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- name: Get pull number
uses: actions/github-script@v6
id: get_pull_number
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-merge
client-payload: |
{
"version": "${{ steps.comment-branch.outputs.head_sha }}",
"commit_to_branch": "${{ steps.comment-branch.outputs.head_sha }}",
"actor": "${{ github.actor }}",
"issue_id": "${{ steps.get_pull_number.outputs.pull_number }}"
}
+207
View File
@@ -0,0 +1,207 @@
name: Merge Benchmarks
on:
repository_dispatch:
types: [ benchmark-merge ]
env:
SCRIPT_DIR: '.github/scripts/merge-perf'
RESULT_TABLE_NAME: 'merge_perf_results'
DOLTHUB_DB: 'import-perf/merge-perf'
jobs:
bench:
name: Benchmark
defaults:
run:
shell: bash
strategy:
fail-fast: true
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
id: go
uses: actions/setup-go@v3
with:
go-version: ^1.19
- name: Setup Python 3.x
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Dolt version
id: version
run: |
version=${{ github.event.client_payload.version }}
- uses: actions/checkout@v3
with:
ref: ${{ github.event.client_payload.version }}
- name: Install dolt
working-directory: ./go
run: go install ./cmd/dolt
- name: Config dolt
id: config
run: |
dolt config --global --add user.email "merge-perf@dolthub.com"
dolt config --global --add user.name "merge-perf"
- name: Run bench
id: bench
run: |
gw=$GITHUB_WORKSPACE
DATADIR=$gw/data
# initialize results sql import
RESULTS=$gw/results.sql
echo "CREATE TABLE ${{env.RESULT_TABLE_NAME }} (name varchar(50) primary key, table_cnt int, run_cnt int, add_cnt int, delete_cnt int, update_cnt int, conflict_cnt int, fks bool, latency float);" >> $RESULTS
# parameters for testing
ROW_NUM=1000000
TABLE_NUM=2
EDIT_CNT=60000
names=('adds_only' 'deletes_only' 'updates_only' 'adds_updates_deletes')
adds=($EDIT_CNT 0 0 $EDIT_CNT)
deletes=(0 $EDIT_CNT 0 $EDIT_CNT)
updates=(0 0 $EDIT_CNT $EDIT_CNT)
wd=$(pwd)
for i in {0..3}; do
cd $wd
echo "${names[$i]}, ${adds[$i]}, ${deletes[$i]}, ${updates[$i]}"
# data.py creates files for import
python ${{ env.SCRIPT_DIR }}/data.py $DATADIR $TABLE_NUM $ROW_NUM ${adds[$i]} ${deletes[$i]} ${updates[$i]}
# setup.sh runs the import and commit process for a set of data files
TMPDIR=$gw/tmp
./${{ env.SCRIPT_DIR}}/setup.sh $TMPDIR $DATADIR
# small python script times merge, we suppres errcodes but print error messages
cd $TMPDIR
python3 -c "import time, subprocess, sys; start = time.time(); res=subprocess.run(['dolt', 'merge', '--squash', 'main'], capture_output=True); err = res.stdout + res.stderr if res.returncode != 0 else ''; latency = time.time() -start; print(latency); sys.stderr.write(str(err))" 1> lat.log 2>err.log
latency=$(cat lat.log)
cat err.log
# count conflicts in first table
conflicts=$(dolt sql -r csv -q "select count(*) from dolt_conflicts_table0;" | tail -1)
echo "INSERT INTO ${{ env.RESULT_TABLE_NAME }} values ('"${names[$i]}"', $TABLE_NUM, $ROW_NUM, ${adds[$i]}, ${deletes[$i]}, ${updates[$i]}, $conflicts, true, $latency);" >> $RESULTS
done
echo "result_path=$RESULTS" >> $GITHUB_OUTPUT
- name: Report
id: report
run: |
gw=$GITHUB_WORKSPACE
in="${{ steps.bench.outputs.result_path }}"
query="select name, add_cnt, delete_cnt, update_cnt, round(latency, 2) as latency from ${{ env.RESULT_TABLE_NAME }}"
summaryq="select round(avg(latency), 2) as avg from ${{ env.RESULT_TABLE_NAME }}"
out="$gw/results.csv"
dolt_dir="$gw/merge-perf"
dolt config --global --add user.email "merge-perf@dolthub.com"
dolt config --global --add user.name "merge-perf"
echo '${{ secrets.DOLTHUB_IMPORT_PERF_CREDS_VALUE }}' | dolt creds import
dolt clone ${{ env.DOLTHUB_DB }} "$dolt_dir"
cd "$dolt_dir"
branch="${{ github.event.client_payload.commit_to_branch }}"
# checkout branch
if [ -z $(dolt sql -q "select 1 from dolt_branches where name = '$branch';") ]; then
dolt checkout -b $branch
else
dolt checkout $branch
fi
dolt sql -q "drop table if exists ${{ env.RESULT_TABLE_NAME }}"
# load results
dolt sql < "$in"
# push results to dolthub
dolt add ${{ env.RESULT_TABLE_NAME }}
dolt commit -m "CI commit"
dolt push -f origin $branch
# generate report
dolt sql -r csv -q "$query" > "$out"
cat "$out"
echo "::set-output name=report_path::$out"
avg=$(dolt sql -r csv -q "$summaryq" | tail -1)
echo "::set-output name=avg::$avg"
- name: Format Results
id: html
if: ${{ github.event.client_payload.email_recipient }} != ""
run: |
gw="$GITHUB_WORKSPACE"
in="${{ steps.report.outputs.report_path }}"
out="$gw/results.html"
echo "<table>" > "$out"
print_header=true
while read line; do
if "$print_header"; then
echo " <tr><th>${line//,/</th><th>}</th></tr>" >> "$out"
print_header=false
continue
fi
echo " <tr><td>${line//,/</td><td>}</td></tr>" >> "$out"
done < "$in"
echo "</table>" >> "$out"
avg="${{ steps.report.outputs.avg }}"
echo "<table><tr><th>Average</th></tr><tr><td>$avg</tr></td></table>" >> "$out"
cat "$out"
echo "::set-output name=html::$(echo $out)"
- name: Configure AWS Credentials
if: ${{ github.event.client_payload.email_recipient }} != ""
uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Send Email
uses: ./.github/actions/ses-email-action
if: ${{ github.event.client_payload.email_recipient }} != ""
with:
region: us-west-2
toAddresses: '["${{ github.event.client_payload.email_recipient }}"]'
subject: 'Merge Performance Benchmarks: ${{ github.event.client_payload.version }}'
bodyPath: ${{ steps.html.outputs.html }}
template: 'SysbenchTemplate'
- name: Read CSV
if: ${{ github.event.client_payload.issue_id }} != ""
id: csv
uses: juliangruber/read-file-action@v1
with:
path: "${{ steps.report.outputs.report_path }}"
- name: Create MD
if: ${{ github.event.client_payload.issue_id }} != ""
uses: petems/csv-to-md-table-action@master
id: md
with:
csvinput: ${{ steps.csv.outputs.content }}
- uses: mshick/add-pr-comment@v2
if: ${{ github.event.client_payload.issue_id }} != ""
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue: ${{ github.event.client_payload.issue_id }}
message-failure: merge benchmark failed
message-cancelled: merge benchmark cancelled
allow-repeats: true
message: |
@${{ github.event.client_payload.actor }} __DOLT__
${{ steps.md.outputs.markdown-table }}
@@ -52,3 +52,14 @@ jobs:
"commit_to_branch": "nightly",
"actor": "${{ github.actor }}"
}
- uses: peter-evans/repository-dispatch@v2.0.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: benchmark-merge
client-payload: |
{
"email_recipient": "${{ secrets.PERF_REPORTS_EMAIL_ADDRESS }}",
"version": "${{ github.sha }}",
"commit_to_branch": "nightly",
"actor": "${{ github.actor }}"
}
+1 -1
View File
@@ -13,7 +13,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+1 -1
View File
@@ -135,7 +135,7 @@ jobs:
- name: Configure AWS Credentials
if: ${{ github.event.client_payload.email_recipient }} != ""
uses: aws-actions/configure-aws-credentials@v1-node16
uses: aws-actions/configure-aws-credentials@567d4149d67f15f52b09796bea6573fc32952783
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+4 -4
View File
@@ -1,4 +1,4 @@
<img height="100" src="./docs/Dolt-Logo@3x.svg"/>
<img height="100" src="./images/Dolt-Logo@3x.svg"/>
# Dolt is Git for Data!
@@ -526,11 +526,11 @@ Hate the command line? Let's use [Tableplus](https://tableplus.com/) to make som
Now, to connect you must select MySQL as the connection type. Then enter a name for your connection, `getting_started` as your database, and `root` as your user.
![Tableplus Connection](./docs/getting-started-tp-connect.png)
![Tableplus Connection](./images/getting-started-tp-connect.png)
Click connect and you'll be presented with a familiar database workbench GUI.
![Tableplus](./docs/getting-started-tp.png)
![Tableplus](./images/getting-started-tp.png)
## Make changes on a branch
@@ -549,7 +549,7 @@ call dolt_commit('-am', 'Modifications on a branch');
Here's the result in Tableplus.
![New Updates](./docs/getting-started-new-updates.png)
![New Updates](./images/getting-started-new-updates.png)
Back in my terminal, I cannot see the table modifications made in Tableplus because they happened on a different branch than the one I have checked out in my session.
+2 -2
View File
@@ -73,7 +73,7 @@ get_config_file_path_if_exists() {
}
# taken from https://github.com/docker-library/mysql/blob/master/8.0/docker-entrypoint.sh
# this function will run files found in /docker-entrypoint-initdb.d directory AFTER server is started
# this function will run files found in /docker-entrypoint-initdb.d directory BEFORE server is started
# usage: docker_process_init_files [file [file [...]]]
# ie: docker_process_init_files /always-initdb.d/*
# process initializer files, based on file extensions
@@ -142,7 +142,7 @@ _main() {
fi
if [[ ! -f $INIT_COMPLETED ]]; then
# run any file provided in /docker-entrypoint-initdb.d directory after the server starts
# run any file provided in /docker-entrypoint-initdb.d directory before the server starts
docker_process_init_files /docker-entrypoint-initdb.d/*
touch $INIT_COMPLETED
fi
-132
View File
@@ -1,132 +0,0 @@
#!/bin/bash
# This script installs starts a dolt server on your Unix compatible computer.
if test -z "$BASH_VERSION"; then
echo "Please run this script using bash, not sh or any other shell. It should be run as root." >&2
exit 1
fi
_() {
install_dolt() {
# Install Dolt if it already doesn't exist
echo "Installing Dolt..."
if ! command -v dolt &> /dev/null
then
sudo bash -c 'curl -L https://github.com/dolthub/dolt/releases/latest/download/install.sh | bash'
fi
}
setup_configs() {
# Set up the dolt user along with core dolt configurations
echo "Setting up Configurations..."
# Check if the user "dolt" already exists. If it exists double check that it is okay to continue
if id -u "dolt" &> /dev/null; then
echo "The user dolt already exists"
read -r -p "Do you want to continue adding privileges to the existing user dolt? " response
response=${response,,} # tolower
if ! ([[ $response =~ ^(yes|y| ) ]] || [[ -z $response ]]); then
exit 1
fi
else
# add the user if `dolt` doesn't exist
useradd -r -m -d /var/lib/doltdb dolt
fi
cd /var/lib/doltdb
read -e -p "Enter an email associated with your user: " -i "dolt-user@dolt.com" email
read -e -p "Enter a username associated with your user: " -i "Dolt Server Account" username
sudo -u dolt dolt config --global --add user.email $email
sudo -u dolt dolt config --global --add user.name $username
}
# Database creation
database_configuration() {
echo "Setting up the dolt database..."
read -e -p "Input the name of your database: " -i "mydb" db_name
local db_dir="databases/$db_name"
cd /var/lib/doltdb
sudo -u dolt mkdir -p $db_dir
cd $db_dir
sudo -u dolt dolt init
}
# Setup and Start daemon
start_server() {
echo "Starting the server"
cd ~
cat > dolt_config.yaml<<EOF
log_level: info
behavior:
read_only: false
autocommit: true
user:
name: root
password: ""
listener:
host: localhost
port: 3306
max_connections: 100
read_timeout_millis: 28800000
write_timeout_millis: 28800000
tls_key: null
tls_cert: null
require_secure_transport: null
databases: []
performance:
query_parallelism: null
EOF
cat > doltdb.service<<EOF
[Unit]
Description=dolt SQL server
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
User=dolt
Group=dolt
ExecStart=/usr/local/bin/dolt sql-server --config=dolt_config.yaml
WorkingDirectory=/var/lib/doltdb/databases/$db_name
KillSignal=SIGTERM
SendSIGKILL=no
EOF
sudo chown root:root doltdb.service
sudo chmod 644 doltdb.service
sudo mv doltdb.service /etc/systemd/system
sudo cp dolt_config.yaml /var/lib/doltdb/databases/$db_name
sudo systemctl daemon-reload
sudo systemctl enable doltdb.service
sudo systemctl start doltdb
}
validate_status() {
if systemctl --state=active | grep "doltdb.service"; then
echo "Sever successfully started..."
else
echo "ERROR: Server did not start properly..."
fi
}
install_dolt
setup_configs
database_configuration
start_server
validate_status
}
_ "$0" "$@"
-1
View File
File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 12 KiB

-10
View File
@@ -1,10 +0,0 @@
<svg width="163" height="56" viewBox="0 0 163 56" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M28.87 7.0459V45.8632C28.8654 46.7997 28.498 47.6965 27.8476 48.3591C27.1971 49.0217 26.316 49.3964 25.3957 49.402H10.4953C9.5713 49.402 8.68489 49.0298 8.0299 48.3666C7.3749 47.7035 7.00462 46.8034 7 45.8632V24.7722C7.00462 23.832 7.3749 22.9319 8.0299 22.2688C8.68489 21.6056 9.5713 21.2334 10.4953 21.2334H22.2115" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M156.3 49.4019H145.283" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M156.026 21.5259H134.174" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M145.336 7.0498V49.4024" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M72.2752 7.68311H59.049C56.6669 7.68311 54.7358 9.64808 54.7358 12.072V44.8074C54.7358 47.2313 56.6669 49.1963 59.049 49.1963H72.2752C74.6573 49.1963 76.5884 47.2313 76.5884 44.8074V12.072C76.5884 9.64808 74.6573 7.68311 72.2752 7.68311Z" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M119.586 49.4019H99.418" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M110.344 7.0498V49.4024" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M109.884 7H98.7939" stroke="#29E3C1" stroke-width="12.6599" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

-85
View File
@@ -1,85 +0,0 @@
# Dolt FAQ
## Why is it called Dolt? Are you calling me dumb?
It's named `dolt` to pay homage to [how Linus Torvalds named
git](https://en.wikipedia.org/wiki/Git#Naming):
> Torvalds sarcastically quipped about the name git (which means
> "unpleasant person" in British English slang): "I'm an egotistical
> bastard, and I name all my projects after myself. First 'Linux',
> now 'git'."
We wanted a word meaning "idiot", starting with D for Data,
short enough to type on the command line, and
not taken in the standard command line lexicon. So,
`dolt`.
## The MySQL shell gives me an error: `Can't connect to local MySQL server through socket '/tmp/mysql.sock'`
The MySQL shell will try to connect through a socket file on many OSes.
To force it to use TCP instead, give it the loopback address like this:
```bash
% mysql --host 127.0.0.1 ...
```
## What does `@@autocommit` do?
This is a SQL variable that you can turn on for your SQL session like so:
`SET @@autocommit = 1`
It's on by default in the MySQL shell, as well as in most clients. But
some clients (notably the Python MySQL connector) turn it off by
default.
You must commit your changes for them to persist after your session
ends, either by setting `@@autocommit` to on, or by issuing `COMMIT`
statements manually.
## What's the difference between `COMMIT` and `DOLT_COMMIT()`?
`COMMIT` is a standard SQL statement that commits a transaction. In
dolt, it just flushes any pending changes in the current SQL session
to disk, updating the working set. HEAD stays the same, but your
working set changes. This means your edits will persist after this
session ends.
`DOLT_COMMIT()` commits the current SQL transaction, then creates a
new dolt commit on the current branch. It's the same as if you run
`dolt commit` from the command line.
## I want each of my connected SQL users to get their own branch to make changes on, then merge them back into `main` when they're done making edits. How do I do that?
We are glad you asked! This is a common use case, and giving each user
their own branch is something we've spent a lot of time getting
right. For more details on how to use this pattern effectively, see
[using branches](https://docs.dolthub.com/reference/sql/branches).
## Does Dolt support transactions?
Yes, it should exactly work the same as MySQL, but with fewer locks
for competing writes.
It's also possible for different sessions to connect to different
branches on the same server. See [using
branches](https://docs.dolthub.com/reference/sql/branches) for details.
## What SQL features / syntax are supported?
Most of them! Check out [the docs for the full list of supported
features](https://docs.dolthub.com/reference/sql/support).
You can check out what we're working on next on our
[roadmap](roadmap.md). Paying customers get their feature requests
bumped to the front of the line.
## Does Dolt support my favorite SQL workbench / tool?
Probably! Have you tried it? If you try it and it doesn't work, [let
us know with an issue](https://github.com/dolthub/dolt/issues) or in
[our Discord](https://discord.com/invite/RFwfYpu) and we'll see what
we can do. A lot of times we can fix small compatibility issues really
quick, like the same week. And even if we can't, we want to know about
it! Our goal is to be a 100% drop-in replacement for MySQL.
-182
View File
@@ -1,182 +0,0 @@
# Dolt quickstart guide
This is a one-page guide to getting you started with Dolt as quickly
as possible. If you're trying to participate in a
[data bounty](https://www.dolthub.com/bounties), this will get you
up and running. We think bounties are the most engaging way to get
started using Dolt and DoltHub and understand how it all works.
This guide is intended for new data bounty participants, and is geared
to that use case. You can find more complete documentation on how to
use Dolt in the [README](../README.md) and in the [DoltHub
documentation](https://docs.dolthub.com/introduction/installation).
## Install Dolt
```sh
% sudo bash -c 'curl -L https://github.com/dolthub/dolt/releases/latest/download/install.sh | bash'
```
For windows installation, see [here](windows.md).
## Configure dolt
```sh
% dolt config --global --add user.email YOU@DOMAIN.COM
% dolt config --global --add user.name "YOUR NAME"
```
## Fork the data bounty
Forking a database makes a private copy for you to edit. Find the
database you want to edit, then click the "Fork" button on the top
left.
![Forking a repository](dolthub-fork.png)
## Clone your fork
Cloning your fork of the database downloads it to your local computer
so you can make changes to it. Click
"Clone" to find the command to copy and paste into your terminal. This
clone command will be different for every fork, so you can't just copy
and paste the command in the text below.
![Cloning a repository](dolthub-clone.png)
Run the command, then cd into the database directory.
```sh
% dolt clone dolthub/hospital-price-transparency
% cd hospital-price-transparency
```
## Inspect the data
Get familiar with the tables and their columns. The easiest way to do
this is by using SQL commands. `show tables` and `describe <tablename>` are good commands to use when exploring a new database.
```sql
% dolt sql
# Welcome to the DoltSQL shell.
# Statements must be terminated with ';'.
# "exit" or "quit" (or Ctrl-D) to exit.
hospital_price_transparency> show tables;
+-----------+
| Table |
+-----------+
| cpt_hcpcs |
| hospitals |
| prices |
+-----------+
hospital_price_transparency> describe hospitals;
+----------------+--------------+------+-----+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+----------------+--------------+------+-----+---------+-------+
| npi_number | char(16) | NO | PRI | | |
| name | varchar(256) | YES | | | |
| url | varchar(512) | YES | | | |
| street_address | varchar(512) | YES | | | |
| city | varchar(64) | YES | | | |
| state | varchar(32) | YES | | | |
| zip_code | varchar(16) | YES | | | |
| publish_date | date | YES | | | |
+----------------+--------------+------+-----+---------+-------+
hospital_price_transparency> select npi_number, name, street_address from hospitals limit 3;
+------------+------------------------------------+---------------------+
| npi_number | name | street_address |
+------------+------------------------------------+---------------------+
| 1003873225 | The Specialty Hospital Of Meridian | 1314 19th Ave |
| 1023061405 | Grandview Medical Center | 3690 Grandview Pkwy |
| 1023180502 | Medical City Dallas | 7777 Forest Ln |
+------------+------------------------------------+---------------------+
hospital_price_transparency> exit
Bye
```
## Add some data
There are two main ways to add data into your copy of the
database. You can either import from files, or you can add data by
writing scripts and inserting rows with SQL statements.
### Importing files
Use the `dolt table import` command to import CSV or JSON files. Use
the `-u` option to update the table (instead of replacing the
contents).
```sh
% dolt table import -u prices hospital_prices.csv
```
### Starting a SQL server
If you want to write a script to insert data with python or another
programming language, start a SQL server on the command line:
```sh
% dolt sql-server
Starting server with Config HP="localhost:3306"|T="28800000"|R="false"|L="info"
```
Then connect to the database with any standard MySQL connector and
make your edits.
## See your changes
After you've inserted some data, you can inspect the changes you made
using `dolt diff`. If you added a lot of rows, use the `--summary` flag
to get a summary instead.
```sh
% dolt diff
% dolt diff --summary
```
## Commit your changes
These commands work like `git`, if you know `git`. If you don't know
`git`, don't worry! Most people who know `git` don't actually know
`git` either!
```sh
% dolt add .
% dolt commit -m "This message describes my changes"
```
You can repeat these steps as many times as you have more changes to add:
1. Add data
2. Commit your changes
Every time you commit it creates a checkpoint you can roll back to if
you mess up later.
## Push your changes back to DoltHub and create a PR
When you're done adding data, push the database back to DoltHub and
submit a pull request (PR) to merge them back into the original fork.
```sh
% dolt push origin master
```
![Create new PR](dolthub-pr-1.png)
![Create new PR](dolthub-pr-2.png)
## Respond to PR review feedback
Your PR will be reviewed by the people running the bounty, and they
may ask you to make changes. If they do, then go ahead and make your
changes on your machine, then `dolt push` those new commits back to
DoltHub and your existing PR will automatically be updated with them.
## Questions? Still need help?
Come hang out with us on [our
Discord](https://discord.com/invite/RFwfYpu), where the team that
builds Dolt and lots of other customers are available to chat and ask
questions. If this guide is missing something obvious, come tell us
there!
-58
View File
@@ -1,58 +0,0 @@
# Dolt Feature Roadmap
Full details on [supported SQL
features](https://docs.dolthub.com/reference/sql/support) are
available on the docs site.
This is a selection of unimplemented features we're working on. Don't
see what you need on here? [Let us
know!](https://github.com/dolthub/dolt/issues) Paying customers get
their feature requests implemented first.
Roadmap last updated Apr 2022, next update Jun 2022.
## Upcoming features
| Feature | Estimate |
| ------- | --- |
| 99.9% SQL correctness | Q2 2022 |
| Hosted Dolt v1 | Q2 2022 |
| Hash join strategy | Q2 2022 |
| Storage performance | Q2 2022 |
| Lock / unlock tables | Q2 2022 |
| SQL GUI support tests | Q2 2022 |
| `JSON_TABLE()` | Q2 2022 |
| Table / index statistics | Q2 2022 |
| Universal SQL path for CLI | Q2 2022 |
| Pipeline query processing | Q3 2022 |
| Row-level locking (`SELECT FOR UPDATE`) | Q3 2022 |
| All transaction isolation levels | Q3 2022 |
| Postgres Support | 2023 |
| Automatic garbage collection | Unscheduled |
| Collation and charset support | Unscheduled |
| Virtual columns and json indexing | Unscheduled |
| Full text indexes | Unscheduled |
| Spatial indexes | Unscheduled |
| Multiple DBs in one repo | Unscheduled |
| Embedded dolt | Unscheduled |
| Signed commits | Unscheduled |
| Cross-database joins with indexes | Unscheduled |
| More function coverage | Ongoing |
## Recently launched features
| Feature | Launch Date |
| ------- | --- |
| Join for update | Oct 2021 |
| Backup and replication | Nov 2021 |
| Commit graph performance | Nov 2021 |
| Persistent SQL configuration | Dec 2021 |
| CREATE / DROP DATABASE | Dec 2021 |
| Hosted Dolt Alpha | Jan 2022 |
| `ROWS` window definitions | Jan 2022 |
| `RANGE` window definitions | Jan 2022 |
| DoltLab (on-prem DoltHub) | Jan 2022 |
| Users / grants | Feb 2022 |
| Geometry types and functions | Feb 2022 |
| Better `dolt_diff` table experience | Mar 2022 |
-24
View File
@@ -1,24 +0,0 @@
# Windows support
Dolt is tested and supported on windows! If you find any problems
specific to Windows, please file an
[issue](https://github.com/dolthub/dolt/issues/) and let us know.
## Installation
Download the latest Microsoft Installer (`.msi` file) in
[releases](https://github.com/dolthub/dolt/releases) and run it.
Package manager releases coming soon!
## Environment
Dolt runs best under the Windows Subsystem for Linux, or WSL. But it
should also work fine with `cmd.exe` or `powershell`. If you find this
isn't true, please file an
[issue](https://github.com/dolthub/dolt/issues/) and let us know.
WSL 2 currently has [known
bugs](https://github.com/dolthub/dolt/issues/992), so we recommend
using WSL 1 for now. Or if you do use WSL 2, we recommend using the
Linux `dolt` binary, rather than the Windows `dolt.exe` binary.
+209
View File
@@ -4522,6 +4522,215 @@ SOFTWARE.
= LICENSE 24dd21670b04d159a0d6c283884da9d23f8761460c300d4702ccffa5 =
================================================================================
================================================================================
= github.com/kylelemons/godebug licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= github.com/lestrrat-go/strftime licensed under: =
+4 -2
View File
@@ -125,14 +125,16 @@ func ResolveTable(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue
return err
}
eng, err := engine.NewSqlEngineForEnv(ctx, dEnv)
eng, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return err
}
sqlCtx, err := engine.NewLocalSqlContext(ctx, eng)
sqlCtx, err := eng.NewLocalContext(ctx)
if err != nil {
return err
}
sqlCtx.SetCurrentDatabase(dbName)
v, err := getFirstColumn(sqlCtx, eng, "SELECT @@DOLT_ALLOW_COMMIT_CONFLICTS;")
if err != nil {
+3 -2
View File
@@ -140,7 +140,7 @@ func printConflicts(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootVal
}
}
eng, err := engine.NewSqlEngineForEnv(ctx, dEnv)
eng, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
@@ -179,10 +179,11 @@ func printConflicts(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootVal
return errhand.BuildDError("failed to fetch conflicts").AddCause(err).Build()
}
sqlCtx, err := engine.NewLocalSqlContext(ctx, eng)
sqlCtx, err := eng.NewLocalContext(ctx)
if err != nil {
return errhand.BuildDError("failed to fetch conflicts").AddCause(err).Build()
}
sqlCtx.SetCurrentDatabase(dbName)
confSqlSch, rowItr, err := eng.Query(sqlCtx, buildConflictQuery(baseSch, sch, mergeSch, tblName))
if err != nil {
@@ -119,7 +119,7 @@ func (cmd VerifyConstraintsCmd) Exec(ctx context.Context, commandStr string, arg
if tablesWithViolations.Size() > 0 {
cli.PrintErrln("All constraints are not satisfied.")
eng, err := engine.NewSqlEngineForEnv(ctx, dEnv)
eng, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return commands.HandleVErrAndExitCode(errhand.BuildDError("Failed to build sql engine.").AddCause(err).Build(), nil)
}
@@ -134,7 +134,7 @@ func (cmd VerifyConstraintsCmd) Exec(ctx context.Context, commandStr string, arg
}
cli.Println("")
cli.Println(doltdb.DoltConstViolTablePrefix + tableName)
dErr := printViolationsForTable(ctx, tableName, tbl, eng)
dErr := printViolationsForTable(ctx, dbName, tableName, tbl, eng)
if dErr != nil {
return commands.HandleVErrAndExitCode(dErr, nil)
}
@@ -153,7 +153,7 @@ func (cmd VerifyConstraintsCmd) Exec(ctx context.Context, commandStr string, arg
return 0
}
func printViolationsForTable(ctx context.Context, tblName string, tbl *doltdb.Table, eng *engine.SqlEngine) errhand.VerboseError {
func printViolationsForTable(ctx context.Context, dbName, tblName string, tbl *doltdb.Table, eng *engine.SqlEngine) errhand.VerboseError {
sch, err := tbl.GetSchema(ctx)
if err != nil {
return errhand.BuildDError("Error loading table schema").AddCause(err).Build()
@@ -162,10 +162,12 @@ func printViolationsForTable(ctx context.Context, tblName string, tbl *doltdb.Ta
colNames := strings.Join(sch.GetAllCols().GetColumnNames(), ", ")
query := fmt.Sprintf("SELECT violation_type, %s, violation_info from dolt_constraint_violations_%s", colNames, tblName)
sCtx, err := engine.NewLocalSqlContext(ctx, eng)
sCtx, err := eng.NewLocalContext(ctx)
if err != nil {
return errhand.BuildDError("Error making sql context").AddCause(err).Build()
}
sCtx.SetCurrentDatabase(dbName)
sqlSch, sqlItr, err := eng.Query(sCtx, query)
if err != nil {
return errhand.BuildDError("Error querying constraint violations").AddCause(err).Build()
+110 -37
View File
@@ -23,6 +23,7 @@ import (
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
@@ -35,17 +36,21 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped/tabular"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
"github.com/dolthub/dolt/go/libraries/utils/set"
)
type diffOutput int
type diffPart int
type diffMode int
const (
SchemaOnlyDiff diffPart = 1 // 0b0001
DataOnlyDiff diffPart = 2 // 0b0010
Summary diffPart = 4 // 0b0100
Stat diffPart = 4 // 0b0100
Summary diffPart = 8 // 0b1000
SchemaAndDataDiff = SchemaOnlyDiff | DataOnlyDiff
@@ -55,6 +60,7 @@ const (
DataFlag = "data"
SchemaFlag = "schema"
StatFlag = "stat"
SummaryFlag = "summary"
whereParam = "where"
limitParam = "limit"
@@ -62,6 +68,7 @@ const (
CachedFlag = "cached"
SkinnyFlag = "skinny"
MergeBase = "merge-base"
DiffMode = "diff-mode"
)
var diffDocs = cli.CommandDocumentationContent{
@@ -87,6 +94,8 @@ Show changes between the working and staged tables, changes between the working
The diffs displayed can be limited to show the first N by providing the parameter {{.EmphasisLeft}}--limit N{{.EmphasisRight}} where {{.EmphasisLeft}}N{{.EmphasisRight}} is the number of diffs to display.
To filter which data rows are displayed, use {{.EmphasisLeft}}--where <SQL expression>{{.EmphasisRight}}. Table column names in the filter expression must be prefixed with {{.EmphasisLeft}}from_{{.EmphasisRight}} or {{.EmphasisLeft}}to_{{.EmphasisRight}}, e.g. {{.EmphasisLeft}}to_COLUMN_NAME > 100{{.EmphasisRight}} or {{.EmphasisLeft}}from_COLUMN_NAME + to_COLUMN_NAME = 0{{.EmphasisRight}}.
The {{.EmphasisLeft}}--diff-mode{{.EmphasisRight}} argument controls how modified rows are presented when the format output is set to {{.EmphasisLeft}}tabular{{.EmphasisRight}}. When set to {{.EmphasisLeft}}row{{.EmphasisRight}}, modified rows are presented as old and new rows. When set to {{.EmphasisLeft}}line{{.EmphasisRight}}, modified rows are presented as a single row, and changes are presented using "+" and "-" within the column. When set to {{.EmphasisLeft}}in-place{{.EmphasisRight}}, modified rows are presented as a single row, and changes are presented side-by-side with a color distinction (requires a color-enabled terminal). When set to {{.EmphasisLeft}}context{{.EmphasisRight}}, rows that contain at least one column that spans multiple lines uses {{.EmphasisLeft}}line{{.EmphasisRight}}, while all other rows use {{.EmphasisLeft}}row{{.EmphasisRight}}. The default value is {{.EmphasisLeft}}context{{.EmphasisRight}}.
`,
Synopsis: []string{
`[options] [{{.LessThan}}commit{{.GreaterThan}}] [{{.LessThan}}tables{{.GreaterThan}}...]`,
@@ -97,6 +106,7 @@ To filter which data rows are displayed, use {{.EmphasisLeft}}--where <SQL expre
type diffArgs struct {
diffParts diffPart
diffOutput diffOutput
diffMode diff.Mode
fromRoot *doltdb.RootValue
toRoot *doltdb.RootValue
fromRef string
@@ -133,13 +143,15 @@ func (cmd DiffCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(DataFlag, "d", "Show only the data changes, do not show the schema changes (Both shown by default).")
ap.SupportsFlag(SchemaFlag, "s", "Show only the schema changes, do not show the data changes (Both shown by default).")
ap.SupportsFlag(SummaryFlag, "", "Show summary of data changes")
ap.SupportsFlag(StatFlag, "", "Show stats of data changes")
ap.SupportsFlag(SummaryFlag, "", "Show summary of data and schema changes")
ap.SupportsString(FormatFlag, "r", "result output format", "How to format diff output. Valid values are tabular, sql, json. Defaults to tabular.")
ap.SupportsString(whereParam, "", "column", "filters columns based on values in the diff. See {{.EmphasisLeft}}dolt diff --help{{.EmphasisRight}} for details.")
ap.SupportsInt(limitParam, "", "record_count", "limits to the first N diffs.")
ap.SupportsFlag(CachedFlag, "c", "Show only the unstaged data changes.")
ap.SupportsFlag(SkinnyFlag, "sk", "Shows only primary key columns and any columns with data changes.")
ap.SupportsFlag(MergeBase, "", "Uses merge base of the first commit and second commit (or HEAD if not supplied) as the first commit")
ap.SupportsString(DiffMode, "", "diff mode", "Determines how to display modified rows with tabular output. Valid values are row, line, in-place, context. Defaults to context.")
return ap
}
@@ -167,9 +179,9 @@ func (cmd DiffCmd) Exec(ctx context.Context, commandStr string, args []string, d
}
func (cmd DiffCmd) validateArgs(apr *argparser.ArgParseResults) errhand.VerboseError {
if apr.Contains(SummaryFlag) {
if apr.Contains(StatFlag) || apr.Contains(SummaryFlag) {
if apr.Contains(SchemaFlag) || apr.Contains(DataFlag) {
return errhand.BuildDError("invalid Arguments: --summary cannot be combined with --schema or --data").Build()
return errhand.BuildDError("invalid Arguments: --stat and --summary cannot be combined with --schema or --data").Build()
}
}
@@ -191,6 +203,8 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
dArgs.diffParts = DataOnlyDiff
} else if apr.Contains(SchemaFlag) && !apr.Contains(DataFlag) {
dArgs.diffParts = SchemaOnlyDiff
} else if apr.Contains(StatFlag) {
dArgs.diffParts = Stat
} else if apr.Contains(SummaryFlag) {
dArgs.diffParts = Summary
}
@@ -201,6 +215,16 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
switch strings.ToLower(f) {
case "tabular":
dArgs.diffOutput = TabularDiffOutput
switch strings.ToLower(apr.GetValueOrDefault(DiffMode, "context")) {
case "row":
dArgs.diffMode = diff.ModeRow
case "line":
dArgs.diffMode = diff.ModeLine
case "in-place":
dArgs.diffMode = diff.ModeInPlace
case "context":
dArgs.diffMode = diff.ModeContext
}
case "sql":
dArgs.diffOutput = SQLDiffOutput
case "json":
@@ -232,6 +256,10 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
if err != nil {
return nil, err
}
if ok {
dArgs.tableSet.Add(tableName)
continue
}
if !ok {
return nil, fmt.Errorf("table %s does not exist in either revision", tableName)
}
@@ -451,6 +479,45 @@ func maybeResolve(ctx context.Context, dEnv *env.DoltEnv, spec string) (*doltdb.
return root, true
}
var diffSummarySchema = sql.Schema{
&sql.Column{Name: "Table name", Type: types.Text, Nullable: false},
&sql.Column{Name: "Diff type", Type: types.Text, Nullable: false},
&sql.Column{Name: "Data change", Type: types.Boolean, Nullable: false},
&sql.Column{Name: "Schema change", Type: types.Boolean, Nullable: false},
}
func printDiffSummary(ctx context.Context, tds []diff.TableDelta, dArgs *diffArgs) errhand.VerboseError {
cliWR := iohelp.NopWrCloser(cli.OutStream)
wr := tabular.NewFixedWidthTableWriter(diffSummarySchema, cliWR, 100)
defer wr.Close(ctx)
for _, td := range tds {
if !dArgs.tableSet.Contains(td.FromName) && !dArgs.tableSet.Contains(td.ToName) {
continue
}
if td.FromTable == nil && td.ToTable == nil {
return errhand.BuildDError("error: both tables in tableDelta are nil").Build()
}
summ, err := td.GetSummary(ctx)
if err != nil {
return errhand.BuildDError("could not get table delta summary").AddCause(err).Build()
}
tableName := summ.TableName
if summ.DiffType == "renamed" {
tableName = fmt.Sprintf("%s -> %s", summ.FromTableName, summ.ToTableName)
}
err = wr.WriteSqlRow(ctx, sql.Row{tableName, summ.DiffType, summ.DataChange, summ.SchemaChange})
if err != nil {
return errhand.BuildDError("could not write table delta summary").AddCause(err).Build()
}
}
return nil
}
func diffUserTables(ctx context.Context, dEnv *env.DoltEnv, dArgs *diffArgs) errhand.VerboseError {
var err error
@@ -459,22 +526,32 @@ func diffUserTables(ctx context.Context, dEnv *env.DoltEnv, dArgs *diffArgs) err
return errhand.BuildDError("error: unable to diff tables").AddCause(err).Build()
}
engine, err := engine.NewSqlEngineForEnv(ctx, dEnv)
engine, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
sqlCtx, err := engine.NewLocalContext(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
sqlCtx.SetCurrentDatabase(dbName)
sort.Slice(tableDeltas, func(i, j int) bool {
return strings.Compare(tableDeltas[i].ToName, tableDeltas[j].ToName) < 0
})
if dArgs.diffParts&Summary != 0 {
return printDiffSummary(ctx, tableDeltas, dArgs)
}
dw, err := newDiffWriter(dArgs.diffOutput)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
for _, td := range tableDeltas {
verr := diffUserTable(ctx, td, engine, dArgs, dw)
verr := diffUserTable(sqlCtx, td, engine, dArgs, dw)
if verr != nil {
return verr
}
@@ -489,7 +566,7 @@ func diffUserTables(ctx context.Context, dEnv *env.DoltEnv, dArgs *diffArgs) err
}
func diffUserTable(
ctx context.Context,
ctx *sql.Context,
td diff.TableDelta,
engine *engine.SqlEngine,
dArgs *diffArgs,
@@ -516,8 +593,8 @@ func diffUserTable(
return errhand.BuildDError("cannot retrieve schema for table %s", td.ToName).AddCause(err).Build()
}
if dArgs.diffParts&Summary != 0 {
return printDiffSummary(ctx, td, fromSch.GetAllCols().Size(), toSch.GetAllCols().Size())
if dArgs.diffParts&Stat != 0 {
return printDiffStat(ctx, td, fromSch.GetAllCols().Size(), toSch.GetAllCols().Size())
}
if dArgs.diffParts&SchemaOnlyDiff != 0 {
@@ -648,14 +725,12 @@ func sqlSchemaDiff(ctx context.Context, td diff.TableDelta, toSchemas map[string
}
func diffRows(
ctx context.Context,
ctx *sql.Context,
se *engine.SqlEngine,
td diff.TableDelta,
dArgs *diffArgs,
dw diffWriter,
) errhand.VerboseError {
from, to := dArgs.fromRef, dArgs.toRef
diffable := schema.ArePrimaryKeySetsDiffable(td.Format(), td.FromSch, td.ToSch)
canSqlDiff := !(td.ToSch == nil || (td.FromSch != nil && !schema.SchemasAreEqual(td.FromSch, td.ToSch)))
@@ -667,7 +742,6 @@ func diffRows(
}
fromSch = pkSch.Schema
}
if td.ToSch != nil {
pkSch, err := sqlutil.FromDoltSchema(td.ToName, td.ToSch)
if err != nil {
@@ -719,7 +793,7 @@ func diffRows(
}
columns := getColumnNamesString(td.FromSch, td.ToSch)
query := fmt.Sprintf("select %s, %s from dolt_diff('%s', '%s', '%s')", columns, "diff_type", from, to, tableName)
query := fmt.Sprintf("select %s, %s from dolt_diff('%s', '%s', '%s')", columns, "diff_type", dArgs.fromRef, dArgs.toRef, tableName)
if len(dArgs.where) > 0 {
query += " where " + dArgs.where
@@ -729,24 +803,19 @@ func diffRows(
query += " limit " + strconv.Itoa(dArgs.limit)
}
sqlCtx, err := engine.NewLocalSqlContext(ctx, se)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
sch, rowIter, err := se.Query(sqlCtx, query)
sch, rowIter, err := se.Query(ctx, query)
if sql.ErrSyntaxError.Is(err) {
return errhand.BuildDError("Failed to parse diff query. Invalid where clause?\nDiff query: %s", query).AddCause(err).Build()
} else if err != nil {
return errhand.BuildDError("Error running diff query:\n%s", query).AddCause(err).Build()
}
defer rowIter.Close(sqlCtx)
defer rowIter.Close(ctx)
defer rowWriter.Close(ctx)
var modifiedColNames map[string]bool
if dArgs.skinny {
modifiedColNames, err = getModifiedCols(sqlCtx, rowIter, unionSch, sch)
modifiedColNames, err = getModifiedCols(ctx, rowIter, unionSch, sch)
if err != nil {
return errhand.BuildDError("Error running diff query:\n%s", query).AddCause(err).Build()
}
@@ -769,12 +838,12 @@ func diffRows(
defer rowWriter.Close(ctx)
// reset the row iterator
err = rowIter.Close(sqlCtx)
err = rowIter.Close(ctx)
if err != nil {
return errhand.BuildDError("Error closing row iterator:\n%s", query).AddCause(err).Build()
}
_, rowIter, err = se.Query(sqlCtx, query)
defer rowIter.Close(sqlCtx)
_, rowIter, err = se.Query(ctx, query)
defer rowIter.Close(ctx)
if sql.ErrSyntaxError.Is(err) {
return errhand.BuildDError("Failed to parse diff query. Invalid where clause?\nDiff query: %s", query).AddCause(err).Build()
} else if err != nil {
@@ -782,7 +851,7 @@ func diffRows(
}
}
err = writeDiffResults(sqlCtx, sch, unionSch, rowIter, rowWriter, modifiedColNames, dArgs.skinny)
err = writeDiffResults(ctx, sch, unionSch, rowIter, rowWriter, modifiedColNames, dArgs)
if err != nil {
return errhand.BuildDError("Error running diff query:\n%s", query).AddCause(err).Build()
}
@@ -827,7 +896,7 @@ func writeDiffResults(
iter sql.RowIter,
writer diff.SqlRowDiffWriter,
modifiedColNames map[string]bool,
filterChangedCols bool,
dArgs *diffArgs,
) error {
ds, err := newDiffSplitter(diffQuerySch, targetSch)
if err != nil {
@@ -847,7 +916,7 @@ func writeDiffResults(
return err
}
if filterChangedCols {
if dArgs.skinny {
var filteredOldRow, filteredNewRow rowDiff
for i, changeType := range newRow.colDiffs {
if (changeType == diff.Added|diff.Removed) || modifiedColNames[targetSch[i].Name] {
@@ -869,17 +938,21 @@ func writeDiffResults(
newRow = filteredNewRow
}
if oldRow.row != nil {
err := writer.WriteRow(ctx, oldRow.row, oldRow.rowDiff, oldRow.colDiffs)
if err != nil {
// We are guaranteed to have "ModeRow" for writers that do not support combined rows
if dArgs.diffMode != diff.ModeRow && oldRow.rowDiff == diff.ModifiedOld && newRow.rowDiff == diff.ModifiedNew {
if err = writer.WriteCombinedRow(ctx, oldRow.row, newRow.row, dArgs.diffMode); err != nil {
return err
}
}
if newRow.row != nil {
err := writer.WriteRow(ctx, newRow.row, newRow.rowDiff, newRow.colDiffs)
if err != nil {
return err
} else {
if oldRow.row != nil {
if err = writer.WriteRow(ctx, oldRow.row, oldRow.rowDiff, oldRow.colDiffs); err != nil {
return err
}
}
if newRow.row != nil {
if err = writer.WriteRow(ctx, newRow.row, newRow.rowDiff, newRow.colDiffs); err != nil {
return err
}
}
}
}
+8 -8
View File
@@ -65,18 +65,18 @@ func newDiffWriter(diffOutput diffOutput) (diffWriter, error) {
}
}
func printDiffSummary(ctx context.Context, td diff.TableDelta, oldColLen, newColLen int) errhand.VerboseError {
func printDiffStat(ctx context.Context, td diff.TableDelta, oldColLen, newColLen int) errhand.VerboseError {
// todo: use errgroup.Group
ae := atomicerr.New()
ch := make(chan diff.DiffSummaryProgress)
ch := make(chan diff.DiffStatProgress)
go func() {
defer close(ch)
err := diff.SummaryForTableDelta(ctx, ch, td)
err := diff.StatForTableDelta(ctx, ch, td)
ae.SetIfError(err)
}()
acc := diff.DiffSummaryProgress{}
acc := diff.DiffStatProgress{}
var count int64
var pos int
eP := cli.NewEphemeralPrinter()
@@ -119,15 +119,15 @@ func printDiffSummary(ctx context.Context, td diff.TableDelta, oldColLen, newCol
}
if keyless {
printKeylessSummary(acc)
printKeylessStat(acc)
} else {
printSummary(acc, oldColLen, newColLen)
printStat(acc, oldColLen, newColLen)
}
return nil
}
func printSummary(acc diff.DiffSummaryProgress, oldColLen, newColLen int) {
func printStat(acc diff.DiffStatProgress, oldColLen, newColLen int) {
numCellInserts, numCellDeletes := sqle.GetCellsAddedAndDeleted(acc, newColLen)
rowsUnmodified := uint64(acc.OldRowSize - acc.Changes - acc.Removes)
unmodified := pluralize("Row Unmodified", "Rows Unmodified", rowsUnmodified)
@@ -161,7 +161,7 @@ func printSummary(acc diff.DiffSummaryProgress, oldColLen, newColLen int) {
cli.Printf("(%s vs %s)\n\n", oldValues, newValues)
}
func printKeylessSummary(acc diff.DiffSummaryProgress) {
func printKeylessStat(acc diff.DiffStatProgress) {
insertions := pluralize("Row Added", "Rows Added", acc.Adds)
deletions := pluralize("Row Deleted", "Rows Deleted", acc.Removes)
+3 -3
View File
@@ -85,17 +85,17 @@ func (cmd DiffCmd) Exec(ctx context.Context, commandStr string, args []string, d
}
func diffDoltDoc(ctx context.Context, dEnv *env.DoltEnv, docName string) error {
eng, err := engine.NewSqlEngineForEnv(ctx, dEnv)
eng, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return err
}
working, err := readDocFromTable(ctx, eng, docName)
working, err := readDocFromTable(ctx, eng, dbName, docName)
if err != nil {
return err
}
head, err := readDocFromTableAsOf(ctx, eng, docName, "HEAD")
head, err := readDocFromTableAsOf(ctx, eng, dbName, docName, "HEAD")
if err != nil {
return err
}
+6 -4
View File
@@ -114,12 +114,12 @@ func readDoltDoc(ctx context.Context, dEnv *env.DoltEnv, docName, fileName strin
return err
}
eng, err := engine.NewSqlEngineForEnv(ctx, dEnv)
eng, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return err
}
err = writeDocToTable(ctx, eng, docName, string(update))
err = writeDocToTable(ctx, eng, dbName, docName, string(update))
if err != nil {
return err
}
@@ -131,17 +131,19 @@ const (
writeDocTemplate = `REPLACE INTO dolt_docs VALUES ("%s", "%s")`
)
func writeDocToTable(ctx context.Context, eng *engine.SqlEngine, docName, content string) error {
func writeDocToTable(ctx context.Context, eng *engine.SqlEngine, dbName, docName, content string) error {
var (
sctx *sql.Context
err error
)
sctx, err = eng.NewContext(ctx)
sctx, err = eng.NewDefaultContext(ctx)
if err != nil {
return err
}
sctx.SetCurrentDatabase(dbName)
err = sctx.Session.SetSessionVariable(sctx, sql.AutoCommitSessionVar, 1)
if err != nil {
return err
+7 -7
View File
@@ -88,12 +88,12 @@ func (cmd PrintCmd) Exec(ctx context.Context, commandStr string, args []string,
}
func writeDoltDoc(ctx context.Context, dEnv *env.DoltEnv, docName string) error {
eng, err := engine.NewSqlEngineForEnv(ctx, dEnv)
eng, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return err
}
doc, err := readDocFromTable(ctx, eng, docName)
doc, err := readDocFromTable(ctx, eng, dbName, docName)
if err != nil {
return err
}
@@ -107,11 +107,11 @@ const (
"FROM dolt_docs %s WHERE " + doltdb.DocPkColumnName + " = '%s'"
)
func readDocFromTable(ctx context.Context, eng *engine.SqlEngine, docName string) (string, error) {
return readDocFromTableAsOf(ctx, eng, docName, "")
func readDocFromTable(ctx context.Context, eng *engine.SqlEngine, dbName, docName string) (string, error) {
return readDocFromTableAsOf(ctx, eng, dbName, docName, "")
}
func readDocFromTableAsOf(ctx context.Context, eng *engine.SqlEngine, docName, asOf string) (doc string, err error) {
func readDocFromTableAsOf(ctx context.Context, eng *engine.SqlEngine, dbName, docName, asOf string) (doc string, err error) {
var (
sctx *sql.Context
iter sql.RowIter
@@ -123,11 +123,11 @@ func readDocFromTableAsOf(ctx context.Context, eng *engine.SqlEngine, docName, a
}
query := fmt.Sprintf(readDocTemplate, asOf, docName)
sctx, err = eng.NewContext(ctx)
sctx, err = eng.NewLocalContext(ctx)
if err != nil {
return "", err
}
sctx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
sctx.SetCurrentDatabase(dbName)
_, iter, err = eng.Query(sctx, query)
if sql.ErrTableNotFound.Is(err) {
+214 -16
View File
@@ -17,13 +17,18 @@ package commands
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/parse"
"github.com/dolthub/go-mysql-server/sql/plan"
"github.com/fatih/color"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
@@ -141,22 +146,6 @@ func (cmd DumpCmd) Exec(ctx context.Context, commandStr string, args []string, d
return HandleVErrAndExitCode(vErr, usage)
}
// Look for schemas and procedures table, and add to tblNames only for sql dumps
if !schemaOnly && (resFormat == emptyFileExt || resFormat == sqlFileExt) {
sysTblNames, err := doltdb.GetSystemTableNames(ctx, root)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
for _, tblName := range sysTblNames {
switch tblName {
case doltdb.SchemasTableName:
tblNames = append(tblNames, doltdb.SchemasTableName)
case doltdb.ProceduresTableName:
tblNames = append(tblNames, doltdb.ProceduresTableName)
}
}
}
switch resFormat {
case emptyFileExt, sqlFileExt:
var defaultName string
@@ -203,6 +192,11 @@ func (cmd DumpCmd) Exec(ctx context.Context, commandStr string, args []string, d
return HandleVErrAndExitCode(err, usage)
}
}
err = dumpSchemaElements(ctx, dEnv, fPath)
if err != nil {
return HandleVErrAndExitCode(err, usage)
}
case csvFileExt, jsonFileExt, parquetFileExt:
err = dumpNonSqlTables(ctx, root, dEnv, force, tblNames, resFormat, outputFileOrDirName, false)
if err != nil {
@@ -217,6 +211,210 @@ func (cmd DumpCmd) Exec(ctx context.Context, commandStr string, args []string, d
return 0
}
// dumpSchemaElements writes the non-table schema elements (views, triggers, procedures) to the file path given
func dumpSchemaElements(ctx context.Context, dEnv *env.DoltEnv, path string) errhand.VerboseError {
writer, err := dEnv.FS.OpenForWriteAppend(path, os.ModePerm)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
engine, dbName, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
sqlCtx, err := engine.NewLocalContext(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
sqlCtx.SetCurrentDatabase(dbName)
root, err := dEnv.WorkingRoot(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = dumpViews(sqlCtx, engine, root, writer)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = dumpTriggers(sqlCtx, engine, root, writer)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = dumpProcedures(sqlCtx, engine, root, writer)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = writer.Close()
if err != nil {
return errhand.VerboseErrorFromError(err)
}
return nil
}
func dumpProcedures(sqlCtx *sql.Context, engine *engine.SqlEngine, root *doltdb.RootValue, writer io.WriteCloser) (rerr error) {
_, _, ok, err := root.GetTableInsensitive(sqlCtx, doltdb.ProceduresTableName)
if err != nil {
return err
}
if !ok {
return nil
}
sch, iter, err := engine.Query(sqlCtx, "select * from "+doltdb.ProceduresTableName)
if err != nil {
return err
}
stmtColIdx := sch.IndexOfColName(doltdb.ProceduresTableCreateStmtCol)
defer func(iter sql.RowIter, context *sql.Context) {
err := iter.Close(context)
if rerr == nil && err != nil {
rerr = err
}
}(iter, sqlCtx)
for {
row, err := iter.Next(sqlCtx)
if err == io.EOF {
break
} else if err != nil {
return err
}
err = iohelp.WriteLine(writer, fmt.Sprintf("delimiter END_PROCEDURE"))
if err != nil {
return err
}
err = iohelp.WriteLine(writer, fmt.Sprintf("%s;", row[stmtColIdx]))
if err != nil {
return err
}
err = iohelp.WriteLine(writer, fmt.Sprintf("END_PROCEDURE\ndelimiter ;"))
if err != nil {
return err
}
}
return nil
}
func dumpTriggers(sqlCtx *sql.Context, engine *engine.SqlEngine, root *doltdb.RootValue, writer io.WriteCloser) (rerr error) {
_, _, ok, err := root.GetTableInsensitive(sqlCtx, doltdb.SchemasTableName)
if err != nil {
return err
}
if !ok {
return nil
}
sch, iter, err := engine.Query(sqlCtx, "select * from "+doltdb.SchemasTableName)
if err != nil {
return err
}
typeColIdx := sch.IndexOfColName(doltdb.SchemasTablesTypeCol)
fragColIdx := sch.IndexOfColName(doltdb.SchemasTablesFragmentCol)
defer func(iter sql.RowIter, context *sql.Context) {
err := iter.Close(context)
if rerr == nil && err != nil {
rerr = err
}
}(iter, sqlCtx)
for {
row, err := iter.Next(sqlCtx)
if err == io.EOF {
break
} else if err != nil {
return err
}
if row[typeColIdx] != "trigger" {
continue
}
err = iohelp.WriteLine(writer, fmt.Sprintf("%s;", row[fragColIdx]))
if err != nil {
return err
}
}
return nil
}
func dumpViews(ctx *sql.Context, engine *engine.SqlEngine, root *doltdb.RootValue, writer io.WriteCloser) (rerr error) {
_, _, ok, err := root.GetTableInsensitive(ctx, doltdb.SchemasTableName)
if err != nil {
return err
}
if !ok {
return nil
}
sch, iter, err := engine.Query(ctx, "select * from "+doltdb.SchemasTableName)
if err != nil {
return err
}
typeColIdx := sch.IndexOfColName(doltdb.SchemasTablesTypeCol)
fragColIdx := sch.IndexOfColName(doltdb.SchemasTablesFragmentCol)
nameColIdx := sch.IndexOfColName(doltdb.SchemasTablesNameCol)
defer func(iter sql.RowIter, context *sql.Context) {
err := iter.Close(context)
if rerr == nil && err != nil {
rerr = err
}
}(iter, ctx)
for {
row, err := iter.Next(ctx)
if err == io.EOF {
break
} else if err != nil {
return err
}
if row[typeColIdx] != "view" {
continue
}
// We used to store just the SELECT part of a view, but now we store the entire CREATE VIEW statement
cv, err := parse.Parse(ctx, row[fragColIdx].(string))
if err != nil {
return err
}
_, ok := cv.(*plan.CreateView)
if ok {
err := iohelp.WriteLine(writer, fmt.Sprintf("%s;", row[fragColIdx]))
if err != nil {
return err
}
} else {
err := iohelp.WriteLine(writer, fmt.Sprintf("CREATE VIEW %s AS %s;", row[nameColIdx], row[fragColIdx]))
if err != nil {
return err
}
}
}
return nil
}
type dumpOptions struct {
format string
schemaOnly bool
+74 -90
View File
@@ -38,6 +38,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/mysql_file_handler"
"github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/store/types"
)
// SqlEngine packages up the context necessary to run sql queries against dsqle.
@@ -49,11 +50,10 @@ type SqlEngine struct {
resultFormat PrintResultFormat
}
type sessionFactory func(ctx *sql.Context, mysqlSess *sql.BaseSession, pro sql.DatabaseProvider) (*dsess.DoltSession, error)
type contextFactory func(ctx context.Context) (*sql.Context, error)
type sessionFactory func(mysqlSess *sql.BaseSession, pro sql.DatabaseProvider) (*dsess.DoltSession, error)
type contextFactory func(ctx context.Context, session sql.Session) (*sql.Context, error)
type SqlEngineConfig struct {
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
@@ -80,13 +80,20 @@ func NewSqlEngine(
config.IsServerLocked = true
}
parallelism := runtime.GOMAXPROCS(0)
dbs, locations, err := CollectDBs(ctx, mrEnv, config.Bulk)
if err != nil {
return nil, err
}
nbf := types.Format_Default
if len(dbs) > 0 {
nbf = dbs[0].DbData().Ddb.Format()
}
parallelism := runtime.GOMAXPROCS(0)
if types.IsFormat_DOLT(nbf) {
parallelism = 1
}
bThreads := sql.NewBackgroundThreads()
dbs, err = dsqle.ApplyReplicationConfig(ctx, bThreads, mrEnv, cli.CliOut, dbs...)
if err != nil {
@@ -155,28 +162,29 @@ func NewSqlEngine(
}
}
sess, err := dsess.NewDoltSession(sql.NewEmptyContext(), sql.NewBaseSession(), pro, mrEnv.Config(), bcController)
if err != nil {
return nil, err
}
// this is overwritten only for server sessions
for _, db := range dbs {
db.DbData().Ddb.SetCommitHookLogger(ctx, cli.CliOut)
}
// TODO: this should just be the session default like it is with MySQL
err = sess.SetSessionVariable(sql.NewContext(ctx), sql.AutoCommitSessionVar, config.Autocommit)
if err != nil {
return nil, err
}
sessionFactory := doltSessionFactory(pro, mrEnv.Config(), bcController, config.Autocommit)
configureBinlogReplicaController(config, engine, sess)
if config.BinlogReplicaController != nil {
binLogSession, err := sessionFactory(sql.NewBaseSession(), pro)
if err != nil {
return nil, err
}
err = configureBinlogReplicaController(config, engine, binLogSession)
if err != nil {
return nil, err
}
}
return &SqlEngine{
provider: pro,
contextFactory: newSqlContext(sess, config.InitialDb),
dsessFactory: newDoltSession(pro, mrEnv.Config(), config.Autocommit, bcController),
contextFactory: sqlContextFactory(),
dsessFactory: sessionFactory,
engine: engine,
resultFormat: format,
}, nil
@@ -190,7 +198,7 @@ func NewRebasedSqlEngine(engine *gms.Engine, dbs map[string]dsqle.SqlDatabase) *
}
}
// Databases() returns a list of all databases in the engine
// Databases returns a slice of all databases in the engine
func (se *SqlEngine) Databases(ctx *sql.Context) []dsqle.SqlDatabase {
databases := se.provider.AllDatabases(ctx)
dbs := make([]dsqle.SqlDatabase, len(databases))
@@ -201,19 +209,34 @@ func (se *SqlEngine) Databases(ctx *sql.Context) []dsqle.SqlDatabase {
return nil
}
// NewContext converts a context.Context to a sql.Context.
// TODO: investigate uses of this
func (se *SqlEngine) NewContext(ctx context.Context) (*sql.Context, error) {
return se.contextFactory(ctx)
// NewContext returns a new sql.Context with the given session.
func (se *SqlEngine) NewContext(ctx context.Context, session sql.Session) (*sql.Context, error) {
return se.contextFactory(ctx, session)
}
func (se *SqlEngine) NewDoltSession(ctx context.Context, mysqlSess *sql.BaseSession) (*dsess.DoltSession, error) {
// TODO: this seems wasteful, we are creating a context for very little work here
sqlCtx, err := se.NewContext(ctx)
// NewDefaultContext returns a new sql.Context with a new default dolt session.
func (se *SqlEngine) NewDefaultContext(ctx context.Context) (*sql.Context, error) {
session, err := se.NewDoltSession(ctx, sql.NewBaseSession())
if err != nil {
return nil, err
}
return se.dsessFactory(sqlCtx, mysqlSess, se.provider)
return se.contextFactory(ctx, session)
}
// NewLocalContext returns a new |sql.Context| with its client set to |root|
func (se *SqlEngine) NewLocalContext(ctx context.Context) (*sql.Context, error) {
sqlCtx, err := se.NewDefaultContext(ctx)
if err != nil {
return nil, err
}
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
return sqlCtx, nil
}
// NewDoltSession creates a new DoltSession from a BaseSession
func (se *SqlEngine) NewDoltSession(_ context.Context, mysqlSess *sql.BaseSession) (*dsess.DoltSession, error) {
return se.dsessFactory(mysqlSess, se.provider)
}
// GetResultFormat returns the printing format of the engine. The format isn't used by the engine internally, only
@@ -276,61 +299,44 @@ func (se *SqlEngine) Close() error {
return nil
}
// configureBinlogReplicaController examines the specified |config| and if a binlog replica controller is provided,
// it creates a new context from the specified |sess| for the replia's applier to use, and it configures the
// binlog replica controller with the |engine|.
func configureBinlogReplicaController(config *SqlEngineConfig, engine *gms.Engine, sess *dsess.DoltSession) error {
if config.BinlogReplicaController == nil {
return nil
}
// configureBinlogReplicaController configures the binlog replication controller with the |engine|.
func configureBinlogReplicaController(config *SqlEngineConfig, engine *gms.Engine, session *dsess.DoltSession) error {
contextFactory := sqlContextFactory()
contextFactory := newSqlContext(sess, config.InitialDb)
newCtx, err := contextFactory(context.Background())
executionCtx, err := contextFactory(context.Background(), session)
if err != nil {
return err
}
newCtx.SetClient(sql.Client{
executionCtx.SetClient(sql.Client{
User: "root",
Address: "localhost",
})
dblr.DoltBinlogReplicaController.SetExecutionContext(newCtx)
dblr.DoltBinlogReplicaController.SetExecutionContext(executionCtx)
engine.Analyzer.BinlogReplicaController = config.BinlogReplicaController
return nil
}
func newSqlContext(sess *dsess.DoltSession, initialDb string) func(ctx context.Context) (*sql.Context, error) {
return func(ctx context.Context) (*sql.Context, error) {
sqlCtx := sql.NewContext(ctx, sql.WithSession(sess))
// If the session was already updated with a database then continue using it in the new session. Otherwise
// use the initial one.
if sessionDB := sess.GetCurrentDatabase(); sessionDB != "" {
sqlCtx.SetCurrentDatabase(sessionDB)
} else {
sqlCtx.SetCurrentDatabase(initialDb)
}
// sqlContextFactory returns a contextFactory that creates a new sql.Context with the initial database provided
func sqlContextFactory() contextFactory {
return func(ctx context.Context, session sql.Session) (*sql.Context, error) {
sqlCtx := sql.NewContext(ctx, sql.WithSession(session))
return sqlCtx, nil
}
}
// TODO: this should not require autocommit, that should be handled by the session default
func newDoltSession(
pro dsqle.DoltDatabaseProvider,
config config.ReadWriteConfig,
autocommit bool,
bc *branch_control.Controller,
) sessionFactory {
return func(ctx *sql.Context, mysqlSess *sql.BaseSession, provider sql.DatabaseProvider) (*dsess.DoltSession, error) {
dsess, err := dsess.NewDoltSession(sql.NewEmptyContext(), mysqlSess, pro, config, bc)
// doltSessionFactory returns a sessionFactory that creates a new DoltSession
func doltSessionFactory(pro dsqle.DoltDatabaseProvider, config config.ReadWriteConfig, bc *branch_control.Controller, autocommit bool) sessionFactory {
return func(mysqlSess *sql.BaseSession, provider sql.DatabaseProvider) (*dsess.DoltSession, error) {
dsess, err := dsess.NewDoltSession(mysqlSess, pro, config, bc)
if err != nil {
return nil, err
}
// TODO: this should just be the session default like it is with MySQL
err = dsess.SetSessionVariable(sql.NewContext(ctx), sql.AutoCommitSessionVar, autocommit)
// nil ctx is actually fine in this context, not used in setting a session variable. Creating a new context isn't
// free, and would be throwaway work, since we need to create a session before creating a sql.Context for user work.
err = dsess.SetSessionVariable(nil, sql.AutoCommitSessionVar, autocommit)
if err != nil {
return nil, err
}
@@ -339,45 +345,23 @@ func newDoltSession(
}
}
// NewSqlEngineForEnv returns a SqlEngine configured for the environment provided, with a single root user
func NewSqlEngineForEnv(ctx context.Context, dEnv *env.DoltEnv) (*SqlEngine, error) {
// NewSqlEngineForEnv returns a SqlEngine configured for the environment provided, with a single root user.
// Returns the new engine, the first database name, and any error that occurred.
func NewSqlEngineForEnv(ctx context.Context, dEnv *env.DoltEnv) (*SqlEngine, string, error) {
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), dEnv.FS, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
return nil, err
return nil, "", err
}
// Choose the first DB as the current one. This will be the DB in the working dir if there was one there
var dbName string
err = mrEnv.Iter(func(name string, _ *env.DoltEnv) (stop bool, err error) {
dbName = name
return true, nil
})
if err != nil {
return nil, err
}
return NewSqlEngine(
engine, err := NewSqlEngine(
ctx,
mrEnv,
FormatCsv,
&SqlEngineConfig{
InitialDb: dbName,
IsReadOnly: false,
ServerUser: "root",
ServerPass: "",
ServerHost: "localhost",
Autocommit: false,
},
)
}
// NewLocalSqlContext returns a new |sql.Context| using the engine provided, with its client set to |root|
func NewLocalSqlContext(ctx context.Context, se *SqlEngine) (*sql.Context, error) {
sqlCtx, err := se.NewContext(ctx)
if err != nil {
return nil, err
}
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
return sqlCtx, nil
return engine, mrEnv.GetFirstDatabase(), err
}
+5 -5
View File
@@ -43,7 +43,7 @@ import (
)
const (
dbName = "filterDB"
filterDbName = "filterDB"
branchesFlag = "branches"
)
@@ -256,7 +256,7 @@ func processFilterQuery(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commi
}
sess := dsess.DSessFromSess(sqlCtx.Session)
ws, err := sess.WorkingSet(sqlCtx, dbName)
ws, err := sess.WorkingSet(sqlCtx, filterDbName)
if err != nil {
return nil, err
}
@@ -274,7 +274,7 @@ func rebaseSqlEngine(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commit)
return nil, nil, err
}
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: tmpDir}
db, err := dsqle.NewDatabase(ctx, dbName, dEnv.DbData(), opts)
db, err := dsqle.NewDatabase(ctx, filterDbName, dEnv.DbData(), opts)
if err != nil {
return nil, nil, err
}
@@ -340,9 +340,9 @@ func rebaseSqlEngine(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commit)
return nil, nil, err
}
sqlCtx.SetCurrentDatabase(dbName)
sqlCtx.SetCurrentDatabase(filterDbName)
se := engine.NewRebasedSqlEngine(sqle.New(azr, &sqle.Config{IsReadOnly: false}), map[string]dsqle.SqlDatabase{dbName: db})
se := engine.NewRebasedSqlEngine(sqle.New(azr, &sqle.Config{IsReadOnly: false}), map[string]dsqle.SqlDatabase{filterDbName: db})
return sqlCtx, se, nil
}
+184 -265
View File
@@ -45,7 +45,6 @@ import (
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
@@ -199,7 +198,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
username = user
}
mrEnv, verr := getMultiRepoEnv(ctx, apr, dEnv, cmd)
mrEnv, verr := getMultiRepoEnv(ctx, apr, dEnv)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
@@ -268,44 +267,21 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
}
initialRoots, err := mrEnv.GetWorkingRoots(ctx)
se, sqlCtx, err := newEngine(ctx, apr, cfgDirPath, privsFp, branchControlFilePath, username, mrEnv)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
// Choose the first DB as the current one. This will be the DB in the working dir if there was one there
var currentDb string
mrEnv.Iter(func(name string, _ *env.DoltEnv) (stop bool, err error) {
currentDb = name
return true, nil
})
format := engine.FormatTabular
if formatSr, ok := apr.GetValue(FormatFlag); ok {
var verr errhand.VerboseError
format, verr = GetResultFormat(formatSr)
if verr != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(verr), usage)
}
}
config := &engine.SqlEngineConfig{
InitialDb: currentDb,
IsReadOnly: false,
DoltCfgDirPath: cfgDirPath,
PrivFilePath: privsFp,
BranchCtrlFilePath: branchControlFilePath,
ServerUser: username,
ServerHost: DefaultHost,
Autocommit: true,
}
defer se.Close()
if query, queryOK := apr.GetValue(QueryFlag); queryOK {
return queryMode(ctx, mrEnv, initialRoots, apr, query, format, usage, config)
if apr.Contains(saveFlag) {
return execSaveQuery(sqlCtx, dEnv, se, apr, query, usage)
}
return queryMode(sqlCtx, se, apr, query, usage)
} else if savedQueryName, exOk := apr.GetValue(executeFlag); exOk {
return savedQueryMode(ctx, mrEnv, initialRoots, savedQueryName, format, usage, config)
return executeSavedQuery(sqlCtx, se, dEnv, savedQueryName, usage)
} else if apr.Contains(listSavedFlag) {
return listSavedQueriesMode(ctx, mrEnv, initialRoots, format, usage, config)
return listSavedQueries(sqlCtx, se, dEnv, usage)
} else {
// Run in either batch mode for piped input, or shell mode for interactive
isTty := false
@@ -340,19 +316,19 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
if isTty {
verr := execShell(ctx, mrEnv, format, config)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
err := execShell(sqlCtx, se)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
} else if runInBatchMode {
verr := execBatch(ctx, continueOnError, mrEnv, input, format, config)
verr = execBatch(sqlCtx, se, input, continueOnError)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
} else {
verr := execMultiStatements(ctx, continueOnError, mrEnv, input, format, config)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
err := execMultiStatements(sqlCtx, se, input, continueOnError)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
}
}
@@ -360,15 +336,74 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
return 0
}
func listSavedQueriesMode(
func newEngine(
ctx context.Context,
apr *argparser.ArgParseResults,
cfgDirPath string,
privsFp string,
branchControlFilePath string,
username string,
mrEnv *env.MultiRepoEnv,
initialRoots map[string]*doltdb.RootValue,
format engine.PrintResultFormat,
usage cli.UsagePrinter,
config *engine.SqlEngineConfig,
) int {
hasQC, err := initialRoots[config.InitialDb].HasTable(ctx, doltdb.DoltQueryCatalogTableName)
) (*engine.SqlEngine, *sql.Context, error) {
format := engine.FormatTabular
if formatSr, ok := apr.GetValue(FormatFlag); ok {
var verr errhand.VerboseError
format, verr = GetResultFormat(formatSr)
if verr != nil {
return nil, nil, verr
}
}
config := &engine.SqlEngineConfig{
DoltCfgDirPath: cfgDirPath,
PrivFilePath: privsFp,
BranchCtrlFilePath: branchControlFilePath,
ServerUser: username,
ServerHost: DefaultHost,
Autocommit: true,
}
se, err := engine.NewSqlEngine(
ctx,
mrEnv,
format,
config,
)
if err != nil {
return nil, nil, err
}
sqlCtx, err := se.NewDefaultContext(ctx)
if err != nil {
return nil, nil, err
}
// Whether we're running in shell mode or some other mode, sql commands from the command line always have a current
// database set when you begin using them.
sqlCtx.SetCurrentDatabase(mrEnv.GetFirstDatabase())
// Add specified user as new superuser, if it doesn't already exist
if user := se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.GetUser(config.ServerUser, config.ServerHost, false); user == nil {
se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.AddSuperUser(config.ServerUser, config.ServerHost, config.ServerPass)
}
// Set client to specified user
sqlCtx.Session.SetClient(sql.Client{User: config.ServerUser, Address: config.ServerHost, Capabilities: 0})
return se, sqlCtx, nil
}
func listSavedQueries(ctx *sql.Context, se *engine.SqlEngine, dEnv *env.DoltEnv, usage cli.UsagePrinter) int {
if !dEnv.Valid() {
return HandleVErrAndExitCode(errhand.BuildDError("error: --%s must be used in a dolt database directory.", listSavedFlag).Build(), usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
hasQC, err := workingRoot.HasTable(ctx, doltdb.DoltQueryCatalogTableName)
if err != nil {
verr := errhand.BuildDError("error: Failed to read from repository.").AddCause(err).Build()
@@ -380,85 +415,93 @@ func listSavedQueriesMode(
}
query := "SELECT * FROM " + doltdb.DoltQueryCatalogTableName
return HandleVErrAndExitCode(execQuery(ctx, mrEnv, query, format, config), usage)
return HandleVErrAndExitCode(execQuery(ctx, se, query), usage)
}
func savedQueryMode(
ctx context.Context,
mrEnv *env.MultiRepoEnv,
initialRoots map[string]*doltdb.RootValue,
savedQueryName string,
format engine.PrintResultFormat,
usage cli.UsagePrinter,
config *engine.SqlEngineConfig,
) int {
sq, err := dtables.RetrieveFromQueryCatalog(ctx, initialRoots[config.InitialDb], savedQueryName)
func executeSavedQuery(ctx *sql.Context, se *engine.SqlEngine, dEnv *env.DoltEnv, savedQueryName string, usage cli.UsagePrinter) int {
if !dEnv.Valid() {
return HandleVErrAndExitCode(errhand.BuildDError("error: --%s must be used in a dolt database directory.", executeFlag).Build(), usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
sq, err := dtables.RetrieveFromQueryCatalog(ctx, workingRoot, savedQueryName)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
cli.PrintErrf("Executing saved query '%s':\n%s\n", savedQueryName, sq.Query)
return HandleVErrAndExitCode(execQuery(ctx, mrEnv, sq.Query, format, config), usage)
return HandleVErrAndExitCode(execQuery(ctx, se, sq.Query), usage)
}
func queryMode(
ctx context.Context,
mrEnv *env.MultiRepoEnv,
initialRoots map[string]*doltdb.RootValue,
ctx *sql.Context,
se *engine.SqlEngine,
apr *argparser.ArgParseResults,
query string,
format engine.PrintResultFormat,
usage cli.UsagePrinter,
config *engine.SqlEngineConfig,
) int {
// query mode has 3 sub modes:
// query mode has 2 sub modes:
// If --batch is provided, run with batch optimizations
// If --save is provided, run a single query and save it
// Otherwise, attempt to parse and execute multiple queries separated by ';'
batchMode := apr.Contains(BatchFlag)
saveName := apr.GetValueOrDefault(saveFlag, "")
_, continueOnError := apr.GetValue(continueFlag)
if saveName != "" {
verr := execQuery(ctx, mrEnv, query, format, config)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
if saveName != "" {
saveMessage := apr.GetValueOrDefault(messageFlag, "")
newRoot, verr := saveQuery(ctx, initialRoots[config.InitialDb], query, saveName, saveMessage)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
verr = UpdateWorkingWithVErr(mrEnv.GetEnv(config.InitialDb), newRoot)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
}
} else if batchMode {
if batchMode {
batchInput := strings.NewReader(query)
verr := execBatch(ctx, continueOnError, mrEnv, batchInput, format, config)
verr := execBatch(ctx, se, batchInput, continueOnError)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
} else {
input := strings.NewReader(query)
verr := execMultiStatements(ctx, continueOnError, mrEnv, input, format, config)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
err := execMultiStatements(ctx, se, input, continueOnError)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
}
return 0
}
func execSaveQuery(ctx *sql.Context, dEnv *env.DoltEnv, se *engine.SqlEngine, apr *argparser.ArgParseResults, query string, usage cli.UsagePrinter) int {
if !dEnv.Valid() {
return HandleVErrAndExitCode(errhand.BuildDError("error: --%s must be used in a dolt database directory.", saveFlag).Build(), usage)
}
saveName := apr.GetValueOrDefault(saveFlag, "")
verr := execQuery(ctx, se, query)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("error: failed to get working root").AddCause(err).Build(), usage)
}
saveMessage := apr.GetValueOrDefault(messageFlag, "")
newRoot, verr := saveQuery(ctx, workingRoot, query, saveName, saveMessage)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
err = dEnv.UpdateWorkingRoot(ctx, newRoot)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("error: failed to update working root").AddCause(err).Build(), usage)
}
return 0
}
// getMultiRepoEnv returns an appropriate MultiRepoEnv for this invocation of the command
func getMultiRepoEnv(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv, cmd SqlCmd) (*env.MultiRepoEnv, errhand.VerboseError) {
func getMultiRepoEnv(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv) (*env.MultiRepoEnv, errhand.VerboseError) {
var err error
fs := dEnv.FS
@@ -480,65 +523,15 @@ func getMultiRepoEnv(ctx context.Context, apr *argparser.ArgParseResults, dEnv *
return mrEnv, nil
}
func execShell(
ctx context.Context,
mrEnv *env.MultiRepoEnv,
format engine.PrintResultFormat,
config *engine.SqlEngineConfig,
) errhand.VerboseError {
se, err := engine.NewSqlEngine(
ctx,
mrEnv,
format,
config,
)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
defer se.Close()
err = runShell(ctx, se, mrEnv, config)
if err != nil {
return errhand.BuildDError(err.Error()).Build()
}
return nil
}
func execBatch(
ctx context.Context,
continueOnErr bool,
mrEnv *env.MultiRepoEnv,
sqlCtx *sql.Context,
se *engine.SqlEngine,
batchInput io.Reader,
format engine.PrintResultFormat,
config *engine.SqlEngineConfig,
continueOnErr bool,
) errhand.VerboseError {
se, err := engine.NewSqlEngine(
ctx,
mrEnv,
format,
config,
)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
defer se.Close()
sqlCtx, err := se.NewContext(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
// Add specified user as new superuser, if it doesn't already exist
if user := se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.GetUser(config.ServerUser, config.ServerHost, false); user == nil {
se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.AddSuperUser(config.ServerUser, config.ServerHost, config.ServerPass)
}
// Set client to specified user
sqlCtx.Session.SetClient(sql.Client{User: config.ServerUser, Address: config.ServerHost, Capabilities: 0})
// In batch mode, we need to set a couple flags on the session to prevent constant flushes to disk
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
err = runBatchMode(sqlCtx, se, batchInput, continueOnErr)
err := runBatchMode(sqlCtx, se, batchInput, continueOnErr)
if err != nil {
// If we encounter an error, attempt to flush what we have so far to disk before exiting
flushErr := flushBatchedEdits(sqlCtx, se)
@@ -552,72 +545,11 @@ func execBatch(
return nil
}
func execMultiStatements(
ctx context.Context,
continueOnErr bool,
mrEnv *env.MultiRepoEnv,
batchInput io.Reader,
format engine.PrintResultFormat,
config *engine.SqlEngineConfig,
) errhand.VerboseError {
se, err := engine.NewSqlEngine(
ctx,
mrEnv,
format,
config,
)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
defer se.Close()
sqlCtx, err := se.NewContext(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
// Add specified user as new superuser, if it doesn't already exist
if user := se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.GetUser(config.ServerUser, config.ServerHost, false); user == nil {
se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.AddSuperUser(config.ServerUser, config.ServerHost, config.ServerPass)
}
// Set client to specified user
sqlCtx.Session.SetClient(sql.Client{User: config.ServerUser, Address: config.ServerHost, Capabilities: 0})
err = runMultiStatementMode(sqlCtx, se, batchInput, continueOnErr)
return errhand.VerboseErrorFromError(err)
}
func execQuery(
ctx context.Context,
mrEnv *env.MultiRepoEnv,
sqlCtx *sql.Context,
se *engine.SqlEngine,
query string,
format engine.PrintResultFormat,
config *engine.SqlEngineConfig,
) errhand.VerboseError {
se, err := engine.NewSqlEngine(
ctx,
mrEnv,
format,
config,
)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
defer se.Close()
sqlCtx, err := se.NewContext(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
// Add specified user as new superuser, if it doesn't already exist
if user := se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.GetUser(config.ServerUser, config.ServerHost, false); user == nil {
se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.AddSuperUser(config.ServerUser, config.ServerHost, config.ServerPass)
}
// Set client to specified user
sqlCtx.Session.SetClient(sql.Client{User: config.ServerUser, Address: config.ServerHost, Capabilities: 0})
sqlSch, rowIter, err := processQuery(sqlCtx, query, se)
if err != nil {
@@ -787,7 +719,7 @@ func validateSqlArgs(apr *argparser.ArgParseResults) error {
}
// Saves the query given to the catalog with the name and message given.
func saveQuery(ctx context.Context, root *doltdb.RootValue, query string, name string, message string) (*doltdb.RootValue, errhand.VerboseError) {
func saveQuery(ctx *sql.Context, root *doltdb.RootValue, query string, name string, message string) (*doltdb.RootValue, errhand.VerboseError) {
_, newRoot, err := dtables.NewQueryCatalogEntryWithNameAsID(ctx, root, name, query, message)
if err != nil {
return nil, errhand.BuildDError("Couldn't save query").AddCause(err).Build()
@@ -796,8 +728,8 @@ func saveQuery(ctx context.Context, root *doltdb.RootValue, query string, name s
return newRoot, nil
}
// runMultiStatementMode allows for the execution of more than one query, but it doesn't attempt any batch optimizations
func runMultiStatementMode(ctx *sql.Context, se *engine.SqlEngine, input io.Reader, continueOnErr bool) error {
// execMultiStatements runs all the queries in the input reader without any batch optimizations
func execMultiStatements(ctx *sql.Context, se *engine.SqlEngine, input io.Reader, continueOnErr bool) error {
scanner := NewSqlStatementScanner(input)
var query string
for scanner.Scan() {
@@ -900,25 +832,9 @@ func runBatchMode(ctx *sql.Context, se *engine.SqlEngine, input io.Reader, conti
// runShell starts a SQL shell. Returns when the user exits the shell. The Root of the sqlEngine may
// be updated by any queries which were processed.
func runShell(ctx context.Context, se *engine.SqlEngine, mrEnv *env.MultiRepoEnv, config *engine.SqlEngineConfig) error {
func execShell(sqlCtx *sql.Context, se *engine.SqlEngine) error {
_ = iohelp.WriteLine(cli.CliOut, welcomeMsg)
sqlCtx, err := se.NewContext(ctx)
if err != nil {
return err
}
// Add specified user as new superuser, if it doesn't already exist
if user := se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.GetUser(config.ServerUser, config.ServerHost, false); user == nil {
se.GetUnderlyingEngine().Analyzer.Catalog.MySQLDb.AddSuperUser(config.ServerUser, config.ServerHost, config.ServerPass)
}
// Add root client
sqlCtx.Session.SetClient(sql.Client{User: config.ServerUser, Address: config.ServerHost, Capabilities: 0})
currentDB := sqlCtx.Session.GetCurrentDatabase()
currEnv := mrEnv.GetEnv(currentDB)
historyFile := filepath.Join(".sqlhistory") // history file written to working dir
initialPrompt := fmt.Sprintf("%s> ", sqlCtx.GetCurrentDatabase())
initialMultilinePrompt := fmt.Sprintf(fmt.Sprintf("%%%ds", len(initialPrompt)), "-> ")
@@ -947,7 +863,7 @@ func runShell(ctx context.Context, se *engine.SqlEngine, mrEnv *env.MultiRepoEnv
shell := ishell.NewUninterpreted(&shellConf)
shell.SetMultiPrompt(initialMultilinePrompt)
// TODO: update completer on create / drop / alter statements
completer, err := newCompleter(ctx, currEnv)
completer, err := newCompleter(sqlCtx, se)
if err != nil {
return err
}
@@ -968,6 +884,8 @@ func runShell(ctx context.Context, se *engine.SqlEngine, mrEnv *env.MultiRepoEnv
}
})
initialCtx := sqlCtx.Context
shell.Uninterpreted(func(c *ishell.Context) {
query := c.Args[0]
if len(strings.TrimSpace(query)) == 0 {
@@ -1001,10 +919,10 @@ func runShell(ctx context.Context, se *engine.SqlEngine, mrEnv *env.MultiRepoEnv
var rowIter sql.RowIter
cont := func() bool {
subCtx, stop := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
subCtx, stop := signal.NotifyContext(initialCtx, os.Interrupt, syscall.SIGTERM)
defer stop()
sqlCtx, err = se.NewContext(subCtx)
sqlCtx, err = se.NewContext(subCtx, sqlCtx.Session)
if err != nil {
shell.Println(color.RedString(err.Error()))
return false
@@ -1045,49 +963,50 @@ func runShell(ctx context.Context, se *engine.SqlEngine, mrEnv *env.MultiRepoEnv
}
// Returns a new auto completer with table names, column names, and SQL keywords.
func newCompleter(ctx context.Context, dEnv *env.DoltEnv) (*sqlCompleter, error) {
// TODO: change the sqlCompleter based on the current database and change it when the database changes.
if dEnv == nil {
return &sqlCompleter{}, nil
}
var completionWords []string
root, err := dEnv.WorkingRoot(ctx)
if err != nil {
return &sqlCompleter{}, nil
}
tableNames, err := root.GetTableNames(ctx)
// TODO: update the completer on DDL, branch change, etc.
func newCompleter(
ctx *sql.Context,
se *engine.SqlEngine,
) (completer *sqlCompleter, rerr error) {
subCtx, stop := signal.NotifyContext(ctx.Context, os.Interrupt, syscall.SIGTERM)
defer stop()
sqlCtx, err := se.NewContext(subCtx, ctx.Session)
if err != nil {
return nil, err
}
completionWords = append(completionWords, tableNames...)
_, iter, err := se.Query(sqlCtx, "select table_schema, table_name, column_name from information_schema.columns;")
if err != nil {
return nil, err
}
defer func(iter sql.RowIter, context *sql.Context) {
err := iter.Close(context)
if err != nil && rerr == nil {
rerr = err
}
}(iter, sqlCtx)
identifiers := make(map[string]struct{})
var columnNames []string
for _, tableName := range tableNames {
tbl, _, err := root.GetTable(ctx, tableName)
if err != nil {
for {
r, err := iter.Next(sqlCtx)
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
sch, err := tbl.GetSchema(ctx)
identifiers[r[0].(string)] = struct{}{}
identifiers[r[1].(string)] = struct{}{}
identifiers[r[2].(string)] = struct{}{}
columnNames = append(columnNames, r[2].(string))
}
if err != nil {
return nil, err
}
err = sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
completionWords = append(completionWords, col.Name)
columnNames = append(columnNames, col.Name)
return false, nil
})
if err != nil {
return nil, err
}
var completionWords []string
for k := range identifiers {
completionWords = append(completionWords, k)
}
completionWords = append(completionWords, dsqle.CommonKeywords...)
+3 -4
View File
@@ -141,7 +141,6 @@ func Serve(
// Create SQL Engine with users
config := &engine.SqlEngineConfig{
InitialDb: "",
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
BranchCtrlFilePath: serverConfig.BranchControlFilePath(),
@@ -231,7 +230,7 @@ func Serve(
var remoteSrv *remotesrv.Server
if serverConfig.RemotesapiPort() != nil {
port := *serverConfig.RemotesapiPort()
if remoteSrvSqlCtx, err := sqlEngine.NewContext(ctx); err == nil {
if remoteSrvSqlCtx, err := sqlEngine.NewDefaultContext(ctx); err == nil {
listenaddr := fmt.Sprintf(":%d", port)
args := sqle.RemoteSrvServerArgs(remoteSrvSqlCtx, remotesrv.ServerArgs{
Logger: logrus.NewEntry(lgr),
@@ -263,7 +262,7 @@ func Serve(
var clusterRemoteSrv *remotesrv.Server
if clusterController != nil {
if remoteSrvSqlCtx, err := sqlEngine.NewContext(ctx); err == nil {
if remoteSrvSqlCtx, err := sqlEngine.NewDefaultContext(ctx); err == nil {
args := clusterController.RemoteSrvServerArgs(remoteSrvSqlCtx, remotesrv.ServerArgs{
Logger: logrus.NewEntry(lgr),
})
@@ -398,7 +397,7 @@ func newSessionBuilder(se *engine.SqlEngine, config ServerConfig) server.Session
varsForUser := userToSessionVars[conn.User]
if len(varsForUser) > 0 {
sqlCtx, err := se.NewContext(ctx)
sqlCtx, err := se.NewContext(ctx, dsess)
if err != nil {
return nil, err
}
@@ -446,12 +446,12 @@ func TestReadReplica(t *testing.T) {
readReplicaDbName := multiSetup.DbNames[0]
sourceDbName := multiSetup.DbNames[1]
localCfg, ok := multiSetup.MrEnv.GetEnv(readReplicaDbName).Config.GetConfig(env.LocalConfig)
localCfg, ok := multiSetup.GetEnv(readReplicaDbName).Config.GetConfig(env.LocalConfig)
if !ok {
t.Fatal("local config does not exist")
}
config.NewPrefixConfig(localCfg, env.SqlServerGlobalsPrefix).SetStrings(map[string]string{dsess.ReadReplicaRemote: "remote1", dsess.ReplicateHeads: "main,feature"})
dsess.InitPersistedSystemVars(multiSetup.MrEnv.GetEnv(readReplicaDbName))
dsess.InitPersistedSystemVars(multiSetup.GetEnv(readReplicaDbName))
// start server as read replica
sc := NewServerController()
@@ -463,7 +463,7 @@ func TestReadReplica(t *testing.T) {
func() {
os.Chdir(multiSetup.DbPaths[readReplicaDbName])
go func() {
_, _ = Serve(context.Background(), "0.0.0", serverConfig, sc, multiSetup.MrEnv.GetEnv(readReplicaDbName))
_, _ = Serve(context.Background(), "0.0.0", serverConfig, sc, multiSetup.GetEnv(readReplicaDbName))
}()
err = sc.WaitForStart()
require.NoError(t, err)
+2 -15
View File
@@ -16,7 +16,6 @@ package sqlserver
import (
"context"
"errors"
"fmt"
"path/filepath"
"strconv"
@@ -406,23 +405,11 @@ func getCommandLineServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResult
}
if dataDir, ok := apr.GetValue(commands.MultiDBDirFlag); ok {
dbNamesAndPaths, err := env.DBNamesAndPathsFromDir(dEnv.FS, dataDir)
if err != nil {
return nil, errors.New("failed to read databases in path specified by --data-dir. error: " + err.Error())
}
serverConfig.withDBNamesAndPaths(dbNamesAndPaths).withDataDir(dataDir)
serverConfig.withDataDir(dataDir)
}
if dataDir, ok := apr.GetValue(commands.DataDirFlag); ok {
dbNamesAndPaths, err := env.DBNamesAndPathsFromDir(dEnv.FS, dataDir)
if err != nil {
return nil, errors.New("failed to read databases in path specified by --data-dir. error: " + err.Error())
}
serverConfig.withDBNamesAndPaths(dbNamesAndPaths).withDataDir(dataDir)
serverConfig.withDataDir(dataDir)
}
if queryParallelism, ok := apr.GetInt(queryParallelismFlag); ok {
+1 -1
View File
@@ -57,7 +57,7 @@ import (
)
const (
Version = "0.53.0"
Version = "0.53.2"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
+16 -1
View File
@@ -675,7 +675,19 @@ func (rcv *Index) MutatePrefixLengths(j int, n uint16) bool {
return false
}
const IndexNumFields = 9
func (rcv *Index) SpatialKey() bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(22))
if o != 0 {
return rcv._tab.GetBool(o + rcv._tab.Pos)
}
return false
}
func (rcv *Index) MutateSpatialKey(n bool) bool {
return rcv._tab.MutateBoolSlot(22, n)
}
const IndexNumFields = 10
func IndexStart(builder *flatbuffers.Builder) {
builder.StartObject(IndexNumFields)
@@ -719,6 +731,9 @@ func IndexAddPrefixLengths(builder *flatbuffers.Builder, prefixLengths flatbuffe
func IndexStartPrefixLengthsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(2, numElems, 2)
}
func IndexAddSpatialKey(builder *flatbuffers.Builder, spatialKey bool) {
builder.PrependBoolSlot(9, spatialKey, false)
}
func IndexEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
+3 -4
View File
@@ -3,10 +3,9 @@ module github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi
go 1.13
require (
github.com/golang/protobuf v1.4.2
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 // indirect
golang.org/x/sys v0.0.0-20200620081246-981b61492c35 // indirect
golang.org/x/text v0.3.3 // indirect
github.com/golang/protobuf v1.4.2 // indirect
golang.org/x/text v0.3.8 // indirect
google.golang.org/genproto v0.0.0-20200622133129-d0ee0c36e670 // indirect
google.golang.org/grpc v1.29.1
google.golang.org/protobuf v1.24.0
)
+23 -13
View File
@@ -10,7 +10,6 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
@@ -24,43 +23,56 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200620081246-981b61492c35 h1:wb/9mP8eUAmHfkM8RmpeLq6nUA7c2i5+bQOtcDftjaE=
golang.org/x/sys v0.0.0-20200620081246-981b61492c35/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
@@ -68,8 +80,6 @@ google.golang.org/genproto v0.0.0-20200622133129-d0ee0c36e670 h1:v/N9fZIfu6jopNI
google.golang.org/genproto v0.0.0-20200622133129-d0ee0c36e670/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
+8 -7
View File
@@ -15,7 +15,7 @@ require (
github.com/dolthub/fslock v0.0.3
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20230216234925-189ffe819e56
github.com/dolthub/vitess v0.0.0-20230223032306-95d4b04eabad
github.com/dustin/go-humanize v1.0.0
github.com/fatih/color v1.13.0
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -31,7 +31,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.5.0
github.com/rivo/uniseg v0.2.0
github.com/sergi/go-diff v1.1.0 // indirect
github.com/sergi/go-diff v1.1.0
github.com/shopspring/decimal v1.2.0
github.com/silvasur/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/sirupsen/logrus v1.8.1
@@ -41,9 +41,9 @@ require (
github.com/tklauser/go-sysconf v0.3.9 // indirect
go.uber.org/zap v1.15.0
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
golang.org/x/net v0.2.0
golang.org/x/net v0.7.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.2.0
golang.org/x/sys v0.5.0
google.golang.org/api v0.32.0
google.golang.org/grpc v1.49.0
google.golang.org/protobuf v1.28.1
@@ -58,10 +58,11 @@ require (
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/go-mysql-server v0.14.1-0.20230217225532-09205e0f234f
github.com/dolthub/go-mysql-server v0.14.1-0.20230228211838-a57e71ebf386
github.com/google/flatbuffers v2.0.6+incompatible
github.com/jmoiron/sqlx v1.3.4
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/kylelemons/godebug v1.1.0
github.com/mitchellh/go-ps v1.0.0
github.com/prometheus/client_golang v1.13.0
github.com/rs/zerolog v1.28.0
@@ -77,7 +78,7 @@ require (
go.opentelemetry.io/otel/exporters/jaeger v1.7.0
go.opentelemetry.io/otel/sdk v1.7.0
go.opentelemetry.io/otel/trace v1.7.0
golang.org/x/text v0.4.0
golang.org/x/text v0.7.0
gonum.org/v1/plot v0.11.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -130,7 +131,7 @@ require (
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/term v0.2.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
golang.org/x/tools v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
+22 -14
View File
@@ -166,16 +166,16 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.14.1-0.20230217225532-09205e0f234f h1:yuOrpt0Gwf8aYe7SmimteAjt/eNyUvBeaNRCq0RCMfA=
github.com/dolthub/go-mysql-server v0.14.1-0.20230217225532-09205e0f234f/go.mod h1:BRFyf6PUuoR+iSLZ+JdpjtqgHzo5cT+tF7oHIpVdytY=
github.com/dolthub/go-mysql-server v0.14.1-0.20230228211838-a57e71ebf386 h1:bycLf5/gWzTx6IuoxUCDz5tmkeL5f4C1IO4H79p0Ms4=
github.com/dolthub/go-mysql-server v0.14.1-0.20230228211838-a57e71ebf386/go.mod h1:I2Mu8LSpwUII53EyBXqJMEKTQH5DUetV4ulP88JVsKA=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474/go.mod h1:kMz7uXOXq4qRriCEyZ/LUeTqraLJCjf0WVZcUi6TxUY=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/vitess v0.0.0-20230216234925-189ffe819e56 h1:dHuKfUwaDUe847BVN3Wo+4GUGUNdlhuUif4RWkvG3Go=
github.com/dolthub/vitess v0.0.0-20230216234925-189ffe819e56/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dolthub/vitess v0.0.0-20230223032306-95d4b04eabad h1:9FPQtKoqyREEsHfGKNU2DImktOusXTXklLtvTxtIuZ0=
github.com/dolthub/vitess v0.0.0-20230223032306-95d4b04eabad/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -412,6 +412,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is=
github.com/lestrrat-go/strftime v1.0.4 h1:T1Rb9EPkAhgxKqbcMIPguPq8glqXTA1koF8n9BHElA8=
@@ -636,6 +638,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
@@ -742,6 +745,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -776,7 +780,6 @@ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@@ -787,8 +790,9 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -808,6 +812,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -851,7 +856,6 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200620081246-981b61492c35/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -874,14 +878,16 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -890,8 +896,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -952,6 +959,7 @@ golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4X
golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+13
View File
@@ -40,6 +40,16 @@ const (
ModifiedNew
)
// Mode is an enum that represents the presentation of a diff
type Mode int
const (
ModeRow Mode = 0
ModeLine Mode = 1
ModeInPlace Mode = 2
ModeContext Mode = 3
)
type RowDiffer interface {
// Start starts the RowDiffer.
Start(ctx context.Context, from, to types.Map)
@@ -63,6 +73,9 @@ type SqlRowDiffWriter interface {
// the input row.
WriteRow(ctx context.Context, row sql.Row, diffType ChangeType, colDiffTypes []ChangeType) error
// WriteCombinedRow writes the diff of the rows given as a single, combined row.
WriteCombinedRow(ctx context.Context, oldRow, newRow sql.Row, mode Mode) error
// Close finalizes the work of this writer.
Close(ctx context.Context) error
}
@@ -34,16 +34,16 @@ import (
var ErrPrimaryKeySetChanged = errors.New("primary key set changed")
type DiffSummaryProgress struct {
type DiffStatProgress struct {
Adds, Removes, Changes, CellChanges, NewRowSize, OldRowSize, NewCellSize, OldCellSize uint64
}
type prollyReporter func(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffSummaryProgress) error
type nomsReporter func(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffSummaryProgress) error
type prollyReporter func(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffStatProgress) error
type nomsReporter func(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffStatProgress) error
// Summary reports a summary of diff changes between two values
// Stat reports a stat of diff changes between two values
// todo: make package private once dolthub is migrated
func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.Index, fromSch, toSch schema.Schema) (err error) {
func Stat(ctx context.Context, ch chan DiffStatProgress, from, to durable.Index, fromSch, toSch schema.Schema) (err error) {
fc, err := from.Count()
if err != nil {
return err
@@ -52,7 +52,7 @@ func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.
if err != nil {
return err
}
ch <- DiffSummaryProgress{OldRowSize: fc, NewRowSize: tc}
ch <- DiffStatProgress{OldRowSize: fc, NewRowSize: tc}
fk, tk := schema.IsKeyless(fromSch), schema.IsKeyless(toSch)
var keyless bool
@@ -69,15 +69,15 @@ func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.
return diffNomsMaps(ctx, ch, keyless, from, to, fromSch, toSch)
}
// SummaryForTableDelta pushes diff summary progress messages for the table delta given to the channel given
func SummaryForTableDelta(ctx context.Context, ch chan DiffSummaryProgress, td TableDelta) error {
// StatForTableDelta pushes diff stat progress messages for the table delta given to the channel given
func StatForTableDelta(ctx context.Context, ch chan DiffStatProgress, td TableDelta) error {
fromSch, toSch, err := td.GetSchemas(ctx)
if err != nil {
return errhand.BuildDError("cannot retrieve schema for table %s", td.ToName).AddCause(err).Build()
}
if !schema.ArePrimaryKeySetsDiffable(td.Format(), fromSch, toSch) {
return fmt.Errorf("failed to compute diff summary for table %s: %w", td.CurName(), ErrPrimaryKeySetChanged)
return fmt.Errorf("failed to compute diff stat for table %s: %w", td.CurName(), ErrPrimaryKeySetChanged)
}
keyless, err := td.IsKeyless(ctx)
@@ -97,7 +97,7 @@ func SummaryForTableDelta(ctx context.Context, ch chan DiffSummaryProgress, td T
}
}
func diffProllyTrees(ctx context.Context, ch chan DiffSummaryProgress, keyless bool, from, to durable.Index, fromSch, toSch schema.Schema) error {
func diffProllyTrees(ctx context.Context, ch chan DiffStatProgress, keyless bool, from, to durable.Index, fromSch, toSch schema.Schema) error {
_, vMapping, err := schema.MapSchemaBasedOnTagAndName(fromSch, toSch)
if err != nil {
return err
@@ -123,7 +123,7 @@ func diffProllyTrees(ctx context.Context, ch chan DiffSummaryProgress, keyless b
}
ctc := uint64(len(toSch.GetAllCols().GetColumns())) * tc
rpr = reportPkChanges
ch <- DiffSummaryProgress{
ch <- DiffStatProgress{
OldRowSize: fc,
NewRowSize: tc,
OldCellSize: cfc,
@@ -140,7 +140,7 @@ func diffProllyTrees(ctx context.Context, ch chan DiffSummaryProgress, keyless b
return nil
}
func diffNomsMaps(ctx context.Context, ch chan DiffSummaryProgress, keyless bool, fromRows durable.Index, toRows durable.Index, fromSch, toSch schema.Schema) error {
func diffNomsMaps(ctx context.Context, ch chan DiffStatProgress, keyless bool, fromRows durable.Index, toRows durable.Index, fromSch, toSch schema.Schema) error {
var rpr nomsReporter
if keyless {
rpr = reportNomsKeylessChanges
@@ -156,7 +156,7 @@ func diffNomsMaps(ctx context.Context, ch chan DiffSummaryProgress, keyless bool
}
ctc := uint64(len(toSch.GetAllCols().GetColumns())) * tc
rpr = reportNomsPkChanges
ch <- DiffSummaryProgress{
ch <- DiffStatProgress{
OldRowSize: fc,
NewRowSize: tc,
OldCellSize: cfc,
@@ -164,10 +164,10 @@ func diffNomsMaps(ctx context.Context, ch chan DiffSummaryProgress, keyless bool
}
}
return summaryWithReporter(ctx, ch, durable.NomsMapFromIndex(fromRows), durable.NomsMapFromIndex(toRows), rpr, fromSch, toSch)
return statWithReporter(ctx, ch, durable.NomsMapFromIndex(fromRows), durable.NomsMapFromIndex(toRows), rpr, fromSch, toSch)
}
func summaryWithReporter(ctx context.Context, ch chan DiffSummaryProgress, from, to types.Map, rpr nomsReporter, fromSch, toSch schema.Schema) (err error) {
func statWithReporter(ctx context.Context, ch chan DiffStatProgress, from, to types.Map, rpr nomsReporter, fromSch, toSch schema.Schema) (err error) {
ad := NewAsyncDiffer(1024)
ad.Start(ctx, from, to)
defer func() {
@@ -199,50 +199,50 @@ func summaryWithReporter(ctx context.Context, ch chan DiffSummaryProgress, from,
return nil
}
func reportPkChanges(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffSummaryProgress) error {
var sum DiffSummaryProgress
func reportPkChanges(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffStatProgress) error {
var stat DiffStatProgress
switch change.Type {
case tree.AddedDiff:
sum.Adds++
stat.Adds++
case tree.RemovedDiff:
sum.Removes++
stat.Removes++
case tree.ModifiedDiff:
sum.CellChanges = prollyCountCellDiff(vMapping, fromD, toD, val.Tuple(change.From), val.Tuple(change.To))
sum.Changes++
stat.CellChanges = prollyCountCellDiff(vMapping, fromD, toD, val.Tuple(change.From), val.Tuple(change.To))
stat.Changes++
default:
return errors.New("unknown change type")
}
select {
case ch <- sum:
case ch <- stat:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func reportKeylessChanges(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffSummaryProgress) error {
var sum DiffSummaryProgress
func reportKeylessChanges(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffStatProgress) error {
var stat DiffStatProgress
var n, n2 uint64
switch change.Type {
case tree.AddedDiff:
n, _ = toD.GetUint64(0, val.Tuple(change.To))
sum.Adds += n
stat.Adds += n
case tree.RemovedDiff:
n, _ = fromD.GetUint64(0, val.Tuple(change.From))
sum.Removes += n
stat.Removes += n
case tree.ModifiedDiff:
n, _ = fromD.GetUint64(0, val.Tuple(change.From))
n2, _ = toD.GetUint64(0, val.Tuple(change.To))
if n < n2 {
sum.Adds += n2 - n
stat.Adds += n2 - n
} else {
sum.Removes += n - n2
stat.Removes += n - n2
}
default:
return errors.New("unknown change type")
}
select {
case ch <- sum:
case ch <- stat:
return nil
case <-ctx.Done():
return ctx.Err()
@@ -280,13 +280,13 @@ func prollyCountCellDiff(mapping val.OrdinalMapping, fromD, toD val.TupleDesc, f
return changed
}
func reportNomsPkChanges(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffSummaryProgress) error {
var summary DiffSummaryProgress
func reportNomsPkChanges(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffStatProgress) error {
var stat DiffStatProgress
switch change.ChangeType {
case types.DiffChangeAdded:
summary = DiffSummaryProgress{Adds: 1}
stat = DiffStatProgress{Adds: 1}
case types.DiffChangeRemoved:
summary = DiffSummaryProgress{Removes: 1}
stat = DiffStatProgress{Removes: 1}
case types.DiffChangeModified:
oldTuple := change.OldValue.(types.Tuple)
newTuple := change.NewValue.(types.Tuple)
@@ -294,19 +294,19 @@ func reportNomsPkChanges(ctx context.Context, change *diff.Difference, fromSch,
if err != nil {
return err
}
summary = DiffSummaryProgress{Changes: 1, CellChanges: cellChanges}
stat = DiffStatProgress{Changes: 1, CellChanges: cellChanges}
default:
return errors.New("unknown change type")
}
select {
case ch <- summary:
case ch <- stat:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func reportNomsKeylessChanges(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffSummaryProgress) error {
func reportNomsKeylessChanges(ctx context.Context, change *diff.Difference, fromSch, toSch schema.Schema, ch chan<- DiffStatProgress) error {
var oldCard uint64
if change.OldValue != nil {
v, err := change.OldValue.(types.Tuple).Get(row.KeylessCardinalityValIdx)
@@ -325,18 +325,18 @@ func reportNomsKeylessChanges(ctx context.Context, change *diff.Difference, from
newCard = uint64(v.(types.Uint))
}
var summary DiffSummaryProgress
var stat DiffStatProgress
delta := int64(newCard) - int64(oldCard)
if delta > 0 {
summary = DiffSummaryProgress{Adds: uint64(delta)}
stat = DiffStatProgress{Adds: uint64(delta)}
} else if delta < 0 {
summary = DiffSummaryProgress{Removes: uint64(-delta)}
stat = DiffStatProgress{Removes: uint64(-delta)}
} else {
return fmt.Errorf("diff with delta = 0 for key: %s", change.KeyValue.HumanReadableString())
}
select {
case ch <- summary:
case ch <- stat:
return nil
case <-ctx.Done():
return ctx.Err()
@@ -57,6 +57,15 @@ type TableDelta struct {
FromFksParentSch map[string]schema.Schema
}
type TableDeltaSummary struct {
DiffType string
DataChange bool
SchemaChange bool
TableName string
FromTableName string
ToTableName string
}
// GetStagedUnstagedTableDeltas represents staged and unstaged changes as TableDelta slices.
func GetStagedUnstagedTableDeltas(ctx context.Context, roots doltdb.Roots) (staged, unstaged []TableDelta, err error) {
staged, err = GetTableDeltas(ctx, roots.Head, roots.Staged)
@@ -387,6 +396,89 @@ func (td TableDelta) IsKeyless(ctx context.Context) (bool, error) {
}
}
// isTableDataEmpty return true if the table does not contain any data
func isTableDataEmpty(ctx context.Context, table *doltdb.Table) (bool, error) {
rowData, err := table.GetRowData(ctx)
if err != nil {
return false, err
}
return rowData.Empty()
}
// GetSummary returns a summary of the table delta.
func (td TableDelta) GetSummary(ctx context.Context) (*TableDeltaSummary, error) {
// Dropping a table is always a schema change, and also a data change if the table contained data
if td.IsDrop() {
isEmpty, err := isTableDataEmpty(ctx, td.FromTable)
if err != nil {
return nil, err
}
return &TableDeltaSummary{
TableName: td.FromName,
FromTableName: td.FromName,
DataChange: !isEmpty,
SchemaChange: true,
DiffType: "dropped",
}, nil
}
// Renaming a table is always a schema change, and also a data change if the table data differs
if td.IsRename() {
dataChanged, err := td.HasHashChanged()
if err != nil {
return nil, err
}
return &TableDeltaSummary{
TableName: td.ToName,
FromTableName: td.FromName,
ToTableName: td.ToName,
DataChange: dataChanged,
SchemaChange: true,
DiffType: "renamed",
}, nil
}
// Creating a table is always a schema change, and also a data change if data was inserted
if td.IsAdd() {
isEmpty, err := isTableDataEmpty(ctx, td.ToTable)
if err != nil {
return nil, err
}
return &TableDeltaSummary{
TableName: td.ToName,
ToTableName: td.ToName,
DataChange: !isEmpty,
SchemaChange: true,
DiffType: "added",
}, nil
}
// TODO: Renamed columns without a data change are not accounted for here,
// `dataChanged` is true when it should be false
dataChanged, err := td.HasHashChanged()
if err != nil {
return nil, err
}
schemaChanged, err := td.HasSchemaChanged(ctx)
if err != nil {
return nil, err
}
return &TableDeltaSummary{
TableName: td.FromName,
FromTableName: td.FromName,
ToTableName: td.ToName,
DataChange: dataChanged,
SchemaChange: schemaChanged,
DiffType: "modified",
}, nil
}
// GetRowData returns the table's row data at the fromRoot and toRoot, or an empty map if the table did not exist.
func (td TableDelta) GetRowData(ctx context.Context) (from, to durable.Index, err error) {
if td.FromTable == nil && td.ToTable == nil {
+3 -12
View File
@@ -227,23 +227,14 @@ func setupSqlEngine(t *testing.T, ctx context.Context) (eng *engine.SqlEngine, d
}
eng, err = engine.NewSqlEngine(ctx, mrEnv, engine.FormatNull, &engine.SqlEngineConfig{
InitialDb: testDB,
IsReadOnly: false,
PrivFilePath: "",
ServerUser: "root",
ServerPass: "",
ServerHost: "localhost",
Autocommit: true,
ServerUser: "root",
ServerHost: "localhost",
Autocommit: true,
})
if err != nil {
panic(err)
}
sqlCtx, err := eng.NewContext(ctx)
require.NoError(t, err)
sqlCtx.Session.SetClient(sql.Client{
User: "root", Address: "%",
})
return
}
@@ -39,7 +39,7 @@ const (
)
type MultiRepoTestSetup struct {
MrEnv env.MultiRepoEnv
envs map[string]*env.DoltEnv
Remote string
DoltDBs map[string]*doltdb.DoltDB
DbNames []string
@@ -56,7 +56,6 @@ const (
defaultBranch = "main"
)
// TODO this is not a proper builder, dbs need to be added before remotes
func NewMultiRepoTestSetup(errhand func(args ...interface{})) *MultiRepoTestSetup {
dir, err := os.MkdirTemp("", "")
if err != nil {
@@ -69,7 +68,7 @@ func NewMultiRepoTestSetup(errhand func(args ...interface{})) *MultiRepoTestSetu
}
return &MultiRepoTestSetup{
MrEnv: env.MultiRepoEnv{},
envs: make(map[string]*env.DoltEnv),
Remotes: make(map[string]env.Remote),
DoltDBs: make(map[string]*doltdb.DoltDB, 0),
DbNames: make([]string, 0),
@@ -80,6 +79,10 @@ func NewMultiRepoTestSetup(errhand func(args ...interface{})) *MultiRepoTestSetu
}
}
func (mr *MultiRepoTestSetup) GetEnv(dbName string) *env.DoltEnv {
return mr.envs[dbName]
}
func (mr *MultiRepoTestSetup) homeProv() (string, error) {
return mr.Home, nil
}
@@ -130,7 +133,7 @@ func (mr *MultiRepoTestSetup) NewDB(dbName string) {
dEnv = env.Load(context.Background(), mr.homeProv, filesys.LocalFS, doltdb.LocalDirDoltDB, "test")
mr.MrEnv.AddEnv(dbName, dEnv)
mr.envs[dbName] = dEnv
mr.DoltDBs[dbName] = ddb
mr.DbNames = append(mr.DbNames, dbName)
mr.DbPaths[dbName] = repo
@@ -143,17 +146,16 @@ func (mr *MultiRepoTestSetup) NewRemote(remoteName string) {
rem := env.NewRemote(remoteName, remotePath, nil)
mr.MrEnv.Iter(func(name string, dEnv *env.DoltEnv) (stop bool, err error) {
for _, dEnv := range mr.envs {
dEnv.RepoState.AddRemote(rem)
dEnv.RepoState.Save(filesys.LocalFS)
return false, nil
})
}
mr.Remotes[remoteName] = rem
}
func (mr *MultiRepoTestSetup) NewBranch(dbName, branchName string) {
dEnv := mr.MrEnv.GetEnv(dbName)
dEnv := mr.envs[dbName]
err := actions.CreateBranchWithStartPt(context.Background(), dEnv.DbData(), branchName, "head", false)
if err != nil {
mr.Errhand(err)
@@ -161,7 +163,7 @@ func (mr *MultiRepoTestSetup) NewBranch(dbName, branchName string) {
}
func (mr *MultiRepoTestSetup) CheckoutBranch(dbName, branchName string) {
dEnv := mr.MrEnv.GetEnv(dbName)
dEnv := mr.envs[dbName]
err := actions.CheckoutBranch(context.Background(), dEnv, branchName, false)
if err != nil {
mr.Errhand(err)
@@ -173,7 +175,7 @@ func (mr *MultiRepoTestSetup) CloneDB(fromRemote, dbName string) {
cloneDir := filepath.Join(mr.Root, dbName)
r := mr.GetRemote(fromRemote)
srcDB, err := r.GetRemoteDB(ctx, types.Format_Default, mr.MrEnv.GetEnv(dbName))
srcDB, err := r.GetRemoteDB(ctx, types.Format_Default, mr.envs[dbName])
if err != nil {
mr.Errhand(err)
}
@@ -206,7 +208,7 @@ func (mr *MultiRepoTestSetup) CloneDB(fromRemote, dbName string) {
dEnv = env.Load(context.Background(), mr.homeProv, filesys.LocalFS, doltdb.LocalDirDoltDB, "test")
mr.MrEnv.AddEnv(dbName, dEnv)
mr.envs[dbName] = dEnv
mr.DoltDBs[dbName] = ddb
mr.DbNames = append(mr.DbNames, dbName)
mr.DbPaths[dbName] = cloneDir
@@ -230,7 +232,7 @@ func (mr *MultiRepoTestSetup) GetDB(dbName string) *doltdb.DoltDB {
func (mr *MultiRepoTestSetup) CommitWithWorkingSet(dbName string) *doltdb.Commit {
ctx := context.Background()
dEnv := mr.MrEnv.GetEnv(dbName)
dEnv := mr.envs[dbName]
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
panic("couldn't get working set: " + err.Error())
@@ -297,7 +299,7 @@ func createTestDataTable() (*table.InMemTable, schema.Schema) {
}
func (mr *MultiRepoTestSetup) CreateTable(dbName, tblName string) {
dEnv := mr.MrEnv.GetEnv(dbName)
dEnv := mr.envs[dbName]
imt, sch := createTestDataTable()
rows := make([]row.Row, imt.NumRows())
@@ -314,7 +316,7 @@ func (mr *MultiRepoTestSetup) CreateTable(dbName, tblName string) {
}
func (mr *MultiRepoTestSetup) StageAll(dbName string) {
dEnv := mr.MrEnv.GetEnv(dbName)
dEnv := mr.envs[dbName]
ctx := context.Background()
roots, err := dEnv.Roots(ctx)
@@ -334,7 +336,7 @@ func (mr *MultiRepoTestSetup) StageAll(dbName string) {
func (mr *MultiRepoTestSetup) PushToRemote(dbName, remoteName, branchName string) {
ctx := context.Background()
dEnv := mr.MrEnv.GetEnv(dbName)
dEnv := mr.envs[dbName]
ap := cli.CreatePushArgParser()
apr, err := ap.Parse([]string{remoteName, branchName})
@@ -346,7 +348,7 @@ func (mr *MultiRepoTestSetup) PushToRemote(dbName, remoteName, branchName string
mr.Errhand(fmt.Sprintf("Failed to push remote: %s", err.Error()))
}
remoteDB, err := opts.Remote.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), mr.MrEnv.GetEnv(dbName))
remoteDB, err := opts.Remote.GetRemoteDB(ctx, dEnv.DoltDB.ValueReadWriter().Format(), mr.envs[dbName])
if err != nil {
mr.Errhand(actions.HandleInitRemoteStorageClientErr(opts.Remote.Name, opts.Remote.Url, err))
}
+1 -1
View File
@@ -192,7 +192,7 @@ func GetDefaultInitBranch(cfg config.ReadableConfig) string {
// Valid returns whether this environment has been properly initialized. This is useful because although every command
// gets a DoltEnv, not all of them require it, and we allow invalid dolt envs to be passed around for this reason.
func (dEnv *DoltEnv) Valid() bool {
return dEnv.CfgLoadErr == nil && dEnv.DBLoadError == nil && dEnv.HasDoltDir() && dEnv.HasDoltDataDir()
return dEnv != nil && dEnv.CfgLoadErr == nil && dEnv.DBLoadError == nil && dEnv.HasDoltDir() && dEnv.HasDoltDataDir()
}
// initWorkingSetFromRepoState sets the working set for the env's head to mirror the contents of the repo state file.
+153 -206
View File
@@ -44,6 +44,11 @@ type EnvNameAndPath struct {
Path string
}
type NamedEnv struct {
name string
env *DoltEnv
}
// MultiRepoEnv is a type used to store multiple environments which can be retrieved by name
type MultiRepoEnv struct {
envs []NamedEnv
@@ -52,9 +57,144 @@ type MultiRepoEnv struct {
ignoreLockFile bool
}
type NamedEnv struct {
name string
env *DoltEnv
// MultiEnvForDirectory returns a MultiRepoEnv for the directory rooted at the file system given. The doltEnv from the
// invoking context is included. If it's non-nil and valid, it will be included in the returned MultiRepoEnv, and will
// be the first database in all iterations.
func MultiEnvForDirectory(
ctx context.Context,
config config.ReadWriteConfig,
fs filesys.Filesys,
version string,
ignoreLockFile bool,
dEnv *DoltEnv,
) (*MultiRepoEnv, error) {
mrEnv := &MultiRepoEnv{
envs: make([]NamedEnv, 0),
fs: fs,
cfg: config,
ignoreLockFile: ignoreLockFile,
}
// Load current fs and put into mr env
var dbName string
if _, ok := fs.(*filesys.InMemFS); ok {
dbName = "dolt"
} else {
path, err := fs.Abs("")
if err != nil {
return nil, err
}
envName := getRepoRootDir(path, string(os.PathSeparator))
dbName = dirToDBName(envName)
}
envSet := map[string]*DoltEnv{}
if dEnv.Valid() {
envSet[dbName] = dEnv
}
// If there are other directories in the directory, try to load them as additional databases
fs.Iter(".", false, func(path string, size int64, isDir bool) (stop bool) {
if !isDir {
return false
}
dir := filepath.Base(path)
newFs, err := fs.WithWorkingDir(dir)
if err != nil {
return false
}
newEnv := Load(ctx, GetCurrentUserHomeDir, newFs, doltdb.LocalDirDoltDB, dEnv.Version)
if newEnv.Valid() {
envSet[dirToDBName(dir)] = newEnv
}
return false
})
enforceSingleFormat(envSet)
// if the current directory database is in our set, add it first so it will be the current database
var ok bool
if dEnv, ok = envSet[dbName]; ok {
mrEnv.addEnv(dbName, dEnv)
delete(envSet, dbName)
}
for dbName, dEnv = range envSet {
mrEnv.addEnv(dbName, dEnv)
}
return mrEnv, nil
}
// MultiEnvForPaths takes a variable list of EnvNameAndPath objects loads each of the environments, and returns a new
// MultiRepoEnv
func MultiEnvForPaths(
ctx context.Context,
hdp HomeDirProvider,
cfg config.ReadWriteConfig,
fs filesys.Filesys,
version string,
ignoreLockFile bool,
envNamesAndPaths ...EnvNameAndPath,
) (*MultiRepoEnv, error) {
nameToPath := make(map[string]string)
for _, nameAndPath := range envNamesAndPaths {
existingPath, ok := nameToPath[nameAndPath.Name]
if ok {
if existingPath == nameAndPath.Path {
continue
}
return nil, fmt.Errorf("databases at paths '%s' and '%s' both attempted to load with the name '%s'", existingPath, nameAndPath.Path, nameAndPath.Name)
}
nameToPath[nameAndPath.Name] = nameAndPath.Path
}
mrEnv := &MultiRepoEnv{
envs: make([]NamedEnv, 0),
fs: fs,
cfg: cfg,
ignoreLockFile: ignoreLockFile,
}
envSet := map[string]*DoltEnv{}
for name, path := range nameToPath {
absPath, err := fs.Abs(path)
if err != nil {
return nil, err
}
fsForEnv, err := filesys.LocalFilesysWithWorkingDir(absPath)
if err != nil {
return nil, err
}
urlStr := earl.FileUrlFromPath(filepath.Join(absPath, dbfactory.DoltDataDir), os.PathSeparator)
dEnv := Load(ctx, hdp, fsForEnv, urlStr, version)
if dEnv.RSLoadErr != nil {
return nil, fmt.Errorf("error loading environment '%s' at path '%s': %s", name, absPath, dEnv.RSLoadErr.Error())
} else if dEnv.DBLoadError != nil {
return nil, fmt.Errorf("error loading environment '%s' at path '%s': %s", name, absPath, dEnv.DBLoadError.Error())
} else if dEnv.CfgLoadErr != nil {
return nil, fmt.Errorf("error loading environment '%s' at path '%s': %s", name, absPath, dEnv.CfgLoadErr.Error())
}
envSet[name] = dEnv
}
enforceSingleFormat(envSet)
for dbName, dEnv := range envSet {
mrEnv.addEnv(dbName, dEnv)
}
return mrEnv, nil
}
func (mrEnv *MultiRepoEnv) FileSystem() filesys.Filesys {
@@ -72,30 +212,14 @@ func (mrEnv *MultiRepoEnv) Config() config.ReadWriteConfig {
return mrEnv.cfg
}
// TODO: un export
// AddEnv adds an environment to the MultiRepoEnv by name
func (mrEnv *MultiRepoEnv) AddEnv(name string, dEnv *DoltEnv) {
// addEnv adds an environment to the MultiRepoEnv by name
func (mrEnv *MultiRepoEnv) addEnv(name string, dEnv *DoltEnv) {
mrEnv.envs = append(mrEnv.envs, NamedEnv{
name: name,
env: dEnv,
})
}
// AddOrReplaceEnvs adds the specified DoltEnv to this MultiRepoEnv, replacing
// any existing environment in the MultiRepoEnv with the same name.
func (mrEnv *MultiRepoEnv) AddOrReplaceEnv(name string, dEnv *DoltEnv) {
// TODO: Modeling NamedEnvs as a map could probably simplify this file
newNamedEnvs := make([]NamedEnv, 0, len(mrEnv.envs))
for _, namedEnv := range mrEnv.envs {
if namedEnv.name != name {
newNamedEnvs = append(newNamedEnvs, namedEnv)
}
}
newNamedEnvs = append(newNamedEnvs, NamedEnv{name: name, env: dEnv})
mrEnv.envs = newNamedEnvs
}
// GetEnv returns the env with the name given, or nil if no such env exists
func (mrEnv *MultiRepoEnv) GetEnv(name string) *DoltEnv {
var found *DoltEnv
@@ -126,26 +250,16 @@ func (mrEnv *MultiRepoEnv) Iter(cb func(name string, dEnv *DoltEnv) (stop bool,
return nil
}
// GetWorkingRoots returns a map with entries for each environment name with a value equal to the working root
// for that environment
func (mrEnv *MultiRepoEnv) GetWorkingRoots(ctx context.Context) (map[string]*doltdb.RootValue, error) {
roots := make(map[string]*doltdb.RootValue)
err := mrEnv.Iter(func(name string, dEnv *DoltEnv) (stop bool, err error) {
root, err := dEnv.WorkingRoot(ctx)
if err != nil {
return true, err
}
roots[name] = root
return false, nil
// GetFirstDatabase returns the name of the first database in the MultiRepoEnv. This will be the database in the
// current working directory if applicable, or the first database alphabetically otherwise.
func (mrEnv *MultiRepoEnv) GetFirstDatabase() string {
var currentDb string
_ = mrEnv.Iter(func(name string, _ *DoltEnv) (stop bool, err error) {
currentDb = name
return true, nil
})
if err != nil {
return nil, err
}
return roots, err
return currentDb
}
// IsLocked returns true if any env is locked
@@ -236,150 +350,6 @@ func getRepoRootDir(path, pathSeparator string) string {
return name
}
// MultiEnvForDirectory returns a MultiRepoEnv for the directory rooted at the file system given
func MultiEnvForDirectory(
ctx context.Context,
config config.ReadWriteConfig,
fs filesys.Filesys,
version string,
ignoreLockFile bool,
oldDEnv *DoltEnv, // TODO: eventually get rid of this
) (*MultiRepoEnv, error) {
mrEnv := &MultiRepoEnv{
envs: make([]NamedEnv, 0),
fs: fs,
cfg: config,
ignoreLockFile: ignoreLockFile,
}
// Load current fs and put into mr env
var dEnv *DoltEnv
var dbName string
// Only directly copy the oldDEnv for in-memory filesystems; something is wrong with loading them
if _, ok := fs.(*filesys.InMemFS); ok {
dbName = "dolt"
dEnv = oldDEnv
} else {
path, err := fs.Abs("")
if err != nil {
return nil, err
}
envName := getRepoRootDir(path, string(os.PathSeparator))
dbName = dirToDBName(envName)
dEnv = oldDEnv
// TODO: idk how or why, but this breaks docs.bats
//dEnv = Load(ctx, GetCurrentUserHomeDir, fs, doltdb.LocalDirDoltDB, version)
}
envSet := map[string]*DoltEnv{}
if dEnv.Valid() {
envSet[dbName] = dEnv
}
// If there are other directories in the directory, try to load them as additional databases
fs.Iter(".", false, func(path string, size int64, isDir bool) (stop bool) {
if !isDir {
return false
}
dir := filepath.Base(path)
newFs, err := fs.WithWorkingDir(dir)
if err != nil {
return false
}
newEnv := Load(ctx, GetCurrentUserHomeDir, newFs, doltdb.LocalDirDoltDB, dEnv.Version)
if newEnv.Valid() {
envSet[dirToDBName(dir)] = newEnv
}
return false
})
enforceSingleFormat(envSet)
// if the current directory database is in out set,
// add it first so it will be the current database
var ok bool
if dEnv, ok = envSet[dbName]; ok {
mrEnv.AddEnv(dbName, dEnv)
delete(envSet, dbName)
}
for dbName, dEnv = range envSet {
mrEnv.AddEnv(dbName, dEnv)
}
return mrEnv, nil
}
// MultiEnvForPaths takes a variable list of EnvNameAndPath objects loads each of the environments, and returns a new
// MultiRepoEnv
func MultiEnvForPaths(
ctx context.Context,
hdp HomeDirProvider,
cfg config.ReadWriteConfig,
fs filesys.Filesys,
version string,
ignoreLockFile bool,
envNamesAndPaths ...EnvNameAndPath,
) (*MultiRepoEnv, error) {
nameToPath := make(map[string]string)
for _, nameAndPath := range envNamesAndPaths {
existingPath, ok := nameToPath[nameAndPath.Name]
if ok {
if existingPath == nameAndPath.Path {
continue
}
return nil, fmt.Errorf("databases at paths '%s' and '%s' both attempted to load with the name '%s'", existingPath, nameAndPath.Path, nameAndPath.Name)
}
nameToPath[nameAndPath.Name] = nameAndPath.Path
}
mrEnv := &MultiRepoEnv{
envs: make([]NamedEnv, 0),
fs: fs,
cfg: cfg,
ignoreLockFile: ignoreLockFile,
}
envSet := map[string]*DoltEnv{}
for name, path := range nameToPath {
absPath, err := fs.Abs(path)
if err != nil {
return nil, err
}
fsForEnv, err := filesys.LocalFilesysWithWorkingDir(absPath)
if err != nil {
return nil, err
}
urlStr := earl.FileUrlFromPath(filepath.Join(absPath, dbfactory.DoltDataDir), os.PathSeparator)
dEnv := Load(ctx, hdp, fsForEnv, urlStr, version)
if dEnv.RSLoadErr != nil {
return nil, fmt.Errorf("error loading environment '%s' at path '%s': %s", name, absPath, dEnv.RSLoadErr.Error())
} else if dEnv.DBLoadError != nil {
return nil, fmt.Errorf("error loading environment '%s' at path '%s': %s", name, absPath, dEnv.DBLoadError.Error())
} else if dEnv.CfgLoadErr != nil {
return nil, fmt.Errorf("error loading environment '%s' at path '%s': %s", name, absPath, dEnv.CfgLoadErr.Error())
}
envSet[name] = dEnv
}
enforceSingleFormat(envSet)
for dbName, dEnv := range envSet {
mrEnv.AddEnv(dbName, dEnv)
}
return mrEnv, nil
}
// enforceSingleFormat enforces that constraint that all databases in
// a multi-database environment have the same NomsBinFormat.
// Databases are removed from the MultiRepoEnv to ensure this is true.
@@ -410,29 +380,6 @@ func enforceSingleFormat(envSet map[string]*DoltEnv) {
}
}
func DBNamesAndPathsFromDir(fs filesys.Filesys, path string) ([]EnvNameAndPath, error) {
var envNamesAndPaths []EnvNameAndPath
err := fs.Iter(path, false, func(path string, size int64, isDir bool) (stop bool) {
if isDir {
dirName := filepath.Base(path)
if dirName[0] == '.' {
return false
}
name := dirToDBName(dirName)
envNamesAndPaths = append(envNamesAndPaths, EnvNameAndPath{Name: name, Path: path})
}
return false
})
if err != nil {
return nil, err
}
return envNamesAndPaths, nil
}
func dirToDBName(dirName string) string {
dbName := strings.TrimSpace(dirName)
dbName = strings.Map(func(r rune) rune {
+11 -15
View File
@@ -284,7 +284,7 @@ func TestMergeConcurrency(t *testing.T) {
ctx := context.Background()
dEnv := setupConcurrencyTest(t, ctx)
defer dEnv.DoltDB.Close()
eng := engineFromEnvironment(ctx, dEnv)
_, eng := engineFromEnvironment(ctx, dEnv)
eg, ctx := errgroup.WithContext(ctx)
for i := 0; i < concurrentThreads; i++ {
@@ -342,37 +342,33 @@ func runConcurrentTxs(ctx context.Context, eng *engine.SqlEngine, seed int) erro
func setupConcurrencyTest(t *testing.T, ctx context.Context) (dEnv *env.DoltEnv) {
dEnv = dtu.CreateTestEnv()
eng := engineFromEnvironment(ctx, dEnv)
sqlCtx, err := eng.NewContext(ctx)
dbName, eng := engineFromEnvironment(ctx, dEnv)
sqlCtx, err := eng.NewLocalContext(ctx)
require.NoError(t, err)
sqlCtx.Session.SetClient(sql.Client{
User: "root", Address: "%",
})
sqlCtx.SetCurrentDatabase(dbName)
require.NoError(t, executeQuery(sqlCtx, eng, concurrentTable))
require.NoError(t, executeQuery(sqlCtx, eng, generateTestData()))
return
}
func engineFromEnvironment(ctx context.Context, dEnv *env.DoltEnv) (eng *engine.SqlEngine) {
func engineFromEnvironment(ctx context.Context, dEnv *env.DoltEnv) (dbName string, eng *engine.SqlEngine) {
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), dEnv.FS, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
panic(err)
}
eng, err = engine.NewSqlEngine(ctx, mrEnv, engine.FormatNull, &engine.SqlEngineConfig{
InitialDb: "dolt",
IsReadOnly: false,
PrivFilePath: "",
ServerUser: "root",
ServerPass: "",
ServerHost: "localhost",
Autocommit: true,
IsReadOnly: false,
ServerUser: "root",
ServerHost: "localhost",
Autocommit: true,
})
if err != nil {
panic(err)
}
return
return mrEnv.GetFirstDatabase(), eng
}
func executeQuery(ctx *sql.Context, eng *engine.SqlEngine, query string) error {
@@ -60,6 +60,7 @@ func mergeNomsTableData(
changeChan, mergeChangeChan := make(chan types.ValueChanged, 32), make(chan types.ValueChanged, 32)
originalCtx := ctx
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
@@ -197,7 +198,7 @@ func mergeNomsTableData(
return nil, types.EmptyMap, nil, err
}
mergedTable, err := tblEdit.Table(ctx)
mergedTable, err := tblEdit.Table(originalCtx)
if err != nil {
return nil, types.EmptyMap, nil, err
}
+2 -2
View File
@@ -486,10 +486,10 @@ func calcTableMergeStats(ctx context.Context, tbl *doltdb.Table, mergeTbl *doltd
}
ae := atomicerr.New()
ch := make(chan diff.DiffSummaryProgress)
ch := make(chan diff.DiffStatProgress)
go func() {
defer close(ch)
err := diff.Summary(ctx, ch, rows, mergeRows, sch, mergeSch)
err := diff.Stat(ctx, ch, rows, mergeRows, sch, mergeSch)
ae.SetIfError(err)
}()
@@ -77,6 +77,10 @@ func TestMigration(t *testing.T) {
query: "SELECT count(*) FROM dolt_log",
expected: []sql.Row{{int64(2)}},
},
{
query: "SELECT count(*) FROM `dolt/dolt_migrated_commits`.dolt_commit_mapping",
expected: []sql.Row{{int64(2)}},
},
},
},
{
@@ -109,6 +113,36 @@ func TestMigration(t *testing.T) {
},
},
},
{
name: "create more commits",
setup: []string{
"CREATE TABLE test (pk int primary key)",
"INSERT INTO test VALUES (1),(2),(3)",
"CALL dolt_commit('-Am', 'new table')",
"INSERT INTO test VALUES (4)",
"CALL dolt_commit('-am', 'added row 4')",
"INSERT INTO test VALUES (5)",
"CALL dolt_commit('-am', 'added row 5')",
},
asserts: []assertion{
{
query: "SELECT count(*) FROM dolt_log",
expected: []sql.Row{{int64(4)}},
},
{
query: "SELECT count(*) FROM `dolt/dolt_migrated_commits`.dolt_commit_mapping",
expected: []sql.Row{{int64(4)}},
},
{
query: "SELECT count(*) FROM `dolt/dolt_migrated_commits`.dolt_commit_mapping WHERE new_commit_hash IN (SELECT commit_hash FROM dolt_log)",
expected: []sql.Row{{int64(4)}},
},
{
query: "SELECT count(*) FROM `dolt/dolt_migrated_commits`.dolt_commit_mapping WHERE new_commit_hash NOT IN (SELECT commit_hash FROM dolt_log)",
expected: []sql.Row{{int64(0)}},
},
},
},
}
for _, test := range tests {
+193 -61
View File
@@ -17,8 +17,14 @@ package migrate
import (
"context"
"fmt"
"io"
"time"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/store/chunks"
@@ -31,37 +37,34 @@ import (
"github.com/dolthub/dolt/go/store/val"
)
type ChunkMapping interface {
Has(ctx context.Context, addr hash.Hash) (bool, error)
Get(ctx context.Context, addr hash.Hash) (hash.Hash, error)
Put(ctx context.Context, old, new hash.Hash) error
Close(ctx context.Context) error
}
const (
MigratedCommitsBranch = "dolt_migrated_commits"
MigratedCommitsTable = "dolt_commit_mapping"
)
type CommitStack interface {
Push(ctx context.Context, cm *doltdb.Commit) error
Pop(ctx context.Context) (*doltdb.Commit, error)
}
var (
mappingSchema, _ = schema.SchemaFromCols(schema.NewColCollection(
schema.NewColumn("old_commit_hash", 0, types.StringKind, true),
schema.NewColumn("new_commit_hash", 1, types.StringKind, false),
))
desc = val.NewTupleDescriptor(val.Type{Enc: val.StringEnc, Nullable: false})
)
type Progress interface {
ChunkMapping
CommitStack
// progress maintains the state of migration.
type progress struct {
stack []*doltdb.Commit
Log(ctx context.Context, format string, args ...any)
Close(ctx context.Context) error
}
// A memory stack with a persisted commit mapping.
type memoryStackProgress struct {
stack []*doltdb.Commit
// mapping tracks migrated commits
// it maps old commit hash to new hash
mapping *prolly.MutableMap
kb, vb *val.TupleBuilder
buffPool pool.BuffPool
vs *types.ValueStore
cs chunks.ChunkStore
vs *types.ValueStore
cs chunks.ChunkStore
}
func newProgress(ctx context.Context, cs chunks.ChunkStore) (Progress, error) {
func newProgress(ctx context.Context, cs chunks.ChunkStore) (*progress, error) {
kd := val.NewTupleDescriptor(val.Type{
Enc: val.ByteStringEnc,
Nullable: false,
@@ -83,7 +86,7 @@ func newProgress(ctx context.Context, cs chunks.ChunkStore) (Progress, error) {
kb := val.NewTupleBuilder(kd)
vb := val.NewTupleBuilder(vd)
return &memoryStackProgress{
return &progress{
stack: make([]*doltdb.Commit, 0, 128),
mapping: mut,
kb: kb,
@@ -94,18 +97,18 @@ func newProgress(ctx context.Context, cs chunks.ChunkStore) (Progress, error) {
}, nil
}
func (mem *memoryStackProgress) Has(ctx context.Context, addr hash.Hash) (ok bool, err error) {
mem.kb.PutByteString(0, addr[:])
k := mem.kb.Build(mem.buffPool)
return mem.mapping.Has(ctx, k)
func (p *progress) Has(ctx context.Context, addr hash.Hash) (ok bool, err error) {
p.kb.PutByteString(0, addr[:])
k := p.kb.Build(p.buffPool)
return p.mapping.Has(ctx, k)
}
func (mem *memoryStackProgress) Get(ctx context.Context, old hash.Hash) (new hash.Hash, err error) {
mem.kb.PutByteString(0, old[:])
k := mem.kb.Build(mem.buffPool)
err = mem.mapping.Get(ctx, k, func(_, v val.Tuple) error {
func (p *progress) Get(ctx context.Context, old hash.Hash) (new hash.Hash, err error) {
p.kb.PutByteString(0, old[:])
k := p.kb.Build(p.buffPool)
err = p.mapping.Get(ctx, k, func(_, v val.Tuple) error {
if len(v) > 0 {
n, ok := mem.vb.Desc.GetBytes(0, v)
n, ok := p.vb.Desc.GetBytes(0, v)
if !ok {
return fmt.Errorf("failed to get string address from commit mapping value")
}
@@ -116,56 +119,185 @@ func (mem *memoryStackProgress) Get(ctx context.Context, old hash.Hash) (new has
return
}
func (mem *memoryStackProgress) Put(ctx context.Context, old, new hash.Hash) (err error) {
mem.kb.PutByteString(0, old[:])
k := mem.kb.Build(mem.buffPool)
mem.vb.PutByteString(0, new[:])
v := mem.vb.Build(mem.buffPool)
err = mem.mapping.Put(ctx, k, v)
func (p *progress) Put(ctx context.Context, old, new hash.Hash) (err error) {
p.kb.PutByteString(0, old[:])
k := p.kb.Build(p.buffPool)
p.vb.PutByteString(0, new[:])
v := p.vb.Build(p.buffPool)
err = p.mapping.Put(ctx, k, v)
return
}
func (mem *memoryStackProgress) Push(ctx context.Context, cm *doltdb.Commit) (err error) {
mem.stack = append(mem.stack, cm)
func (p *progress) Push(ctx context.Context, cm *doltdb.Commit) (err error) {
p.stack = append(p.stack, cm)
return
}
func (mem *memoryStackProgress) Pop(ctx context.Context) (cm *doltdb.Commit, err error) {
if len(mem.stack) == 0 {
func (p *progress) Pop(ctx context.Context) (cm *doltdb.Commit, err error) {
if len(p.stack) == 0 {
return nil, nil
}
top := len(mem.stack) - 1
cm = mem.stack[top]
mem.stack = mem.stack[:top]
top := len(p.stack) - 1
cm = p.stack[top]
p.stack = p.stack[:top]
return
}
func (mem *memoryStackProgress) Log(ctx context.Context, format string, args ...any) {
func (p *progress) Log(ctx context.Context, format string, args ...any) {
cli.Println(time.Now().UTC().String() + " " + fmt.Sprintf(format, args...))
}
func (mem *memoryStackProgress) Close(ctx context.Context) error {
m, err := mem.mapping.Map(ctx)
func (p *progress) Finalize(ctx context.Context) (prolly.Map, error) {
m, err := p.mapping.Map(ctx)
if err != nil {
return err
return prolly.Map{}, err
}
v := shim.ValueFromMap(m)
ref, err := mem.vs.WriteValue(ctx, v)
ref, err := p.vs.WriteValue(ctx, v)
if err != nil {
return err
return prolly.Map{}, err
}
last, err := mem.vs.Root(ctx)
last, err := p.vs.Root(ctx)
if err != nil {
return err
return prolly.Map{}, err
}
ok, err := mem.vs.Commit(ctx, last, last)
ok, err := p.vs.Commit(ctx, last, last)
if err != nil {
return err
}
if !ok {
return fmt.Errorf("failed to commit, manifest swapped out beneath us")
return prolly.Map{}, err
} else if !ok {
return prolly.Map{}, fmt.Errorf("failed to commit, manifest swapped out beneath us")
}
mem.Log(ctx, "Wrote commit mapping!! [commit_mapping_ref: %s]", ref.TargetHash().String())
return nil
p.Log(ctx, "Wrote commit mapping!! [commit_mapping_ref: %s]", ref.TargetHash().String())
p.Log(ctx, "Commit mapping allow mapping pre-migration commit hashes to post-migration commit hashes, "+
"it is available on branch '%s' in table '%s'", MigratedCommitsBranch, MigratedCommitsTable)
return m, nil
}
func persistMigratedCommitMapping(ctx context.Context, ddb *doltdb.DoltDB, mapping prolly.Map) error {
// create a new branch to persist the migrated commit mapping
init, err := ddb.ResolveCommitRef(ctx, ref.NewInternalRef(doltdb.CreationBranch))
if err != nil {
return err
}
br := ref.NewBranchRef(MigratedCommitsBranch)
err = ddb.NewBranchAtCommit(ctx, br, init)
if err != nil {
return err
}
ns, vrw := ddb.NodeStore(), ddb.ValueReadWriter()
m, err := prolly.NewMapFromTuples(ctx, ns, desc, desc)
if err != nil {
return err
}
rows := m.Mutate()
bld := val.NewTupleBuilder(desc)
// convert |mapping| values from hash.Hash to string
iter, err := mapping.IterAll(ctx)
if err != nil {
return err
}
var k, v val.Tuple
kd, vd := mapping.Descriptors()
for {
k, v, err = iter.Next(ctx)
if err == io.EOF {
break
} else if err != nil {
return err
}
o, _ := kd.GetBytes(0, k)
bld.PutString(0, hash.New(o).String())
key := bld.Build(ddb.NodeStore().Pool())
n, _ := vd.GetBytes(0, v)
bld.PutString(0, hash.New(n).String())
value := bld.Build(ddb.NodeStore().Pool())
if err = rows.Put(ctx, key, value); err != nil {
return err
}
}
m, err = rows.Map(ctx)
if err != nil {
return err
}
idx := durable.IndexFromProllyMap(m)
tbl, err := doltdb.NewTable(ctx, vrw, ns, mappingSchema, idx, nil, nil)
if err != nil {
return err
}
root, err := init.GetRootValue(ctx)
if err != nil {
return err
}
root, err = root.PutTable(ctx, MigratedCommitsTable, tbl)
if err != nil {
return err
}
return commitRoot(ctx, ddb, br, root, init)
}
func commitRoot(
ctx context.Context,
ddb *doltdb.DoltDB,
br ref.BranchRef,
root *doltdb.RootValue,
parent *doltdb.Commit,
) error {
roots := doltdb.Roots{
Head: root,
Working: root,
Staged: root,
}
parents := []*doltdb.Commit{parent}
meta, err := parent.GetCommitMeta(ctx)
if err != nil {
return err
}
meta, err = datas.NewCommitMeta(meta.Name, meta.Email, meta.Description)
if err != nil {
return err
}
pcm, err := ddb.NewPendingCommit(ctx, roots, parents, meta)
if err != nil {
return err
}
wsr, err := ref.WorkingSetRefForHead(br)
if err != nil {
return err
}
ws, err := ddb.ResolveWorkingSet(ctx, wsr)
if err != nil {
return err
}
prev, err := ws.HashOf()
if err != nil {
return err
}
ws = ws.WithWorkingRoot(root).WithStagedRoot(root)
_, err = ddb.CommitWithWorkingSet(ctx, br, wsr, pcm, ws, prev, &datas.WorkingSetMeta{
Name: meta.Name,
Email: meta.Email,
Timestamp: uint64(time.Now().Unix()),
})
return err
}
+6 -5
View File
@@ -96,7 +96,7 @@ func migrateWorkingSet(ctx context.Context, menv Environment, brRef ref.BranchRe
return new.UpdateWorkingSet(ctx, wsRef, newWs, hash.Hash{}, oldWs.Meta())
}
func migrateCommit(ctx context.Context, menv Environment, oldCm *doltdb.Commit, new *doltdb.DoltDB, prog Progress) error {
func migrateCommit(ctx context.Context, menv Environment, oldCm *doltdb.Commit, new *doltdb.DoltDB, prog *progress) error {
oldHash, err := oldCm.HashOf()
if err != nil {
return err
@@ -204,7 +204,7 @@ func migrateCommit(ctx context.Context, menv Environment, oldCm *doltdb.Commit,
return nil
}
func migrateInitCommit(ctx context.Context, cm *doltdb.Commit, new *doltdb.DoltDB, prog Progress) error {
func migrateInitCommit(ctx context.Context, cm *doltdb.Commit, new *doltdb.DoltDB, prog *progress) error {
oldHash, err := cm.HashOf()
if err != nil {
return err
@@ -244,7 +244,7 @@ func migrateInitCommit(ctx context.Context, cm *doltdb.Commit, new *doltdb.DoltD
return prog.Put(ctx, oldHash, newHash)
}
func migrateCommitOptions(ctx context.Context, oldCm *doltdb.Commit, prog Progress) (datas.CommitOptions, error) {
func migrateCommitOptions(ctx context.Context, oldCm *doltdb.Commit, prog *progress) (datas.CommitOptions, error) {
parents, err := oldCm.ParentHashes(ctx)
if err != nil {
return datas.CommitOptions{}, err
@@ -414,6 +414,7 @@ func migrateTable(ctx context.Context, newSch schema.Schema, oldParentTbl, oldTb
var newRows durable.Index
var newSet durable.IndexSet
originalCtx := ctx
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
@@ -433,13 +434,13 @@ func migrateTable(ctx context.Context, newSch schema.Schema, oldParentTbl, oldTb
return nil, err
}
ai, err := oldTbl.GetAutoIncrementValue(ctx)
ai, err := oldTbl.GetAutoIncrementValue(originalCtx)
if err != nil {
return nil, err
}
autoInc := types.Uint(ai)
return doltdb.NewTable(ctx, vrw, ns, newSch, newRows, newSet, autoInc)
return doltdb.NewTable(originalCtx, vrw, ns, newSch, newRows, newSet, autoInc)
}
func migrateSchema(ctx context.Context, tableName string, existing schema.Schema) (schema.Schema, error) {
+15 -12
View File
@@ -28,7 +28,7 @@ import (
// TraverseDAG traverses |old|, migrating values to |new|.
func TraverseDAG(ctx context.Context, menv Environment, old, new *doltdb.DoltDB) (err error) {
var heads []ref.DoltRef
var prog Progress
var prog *progress
heads, err = old.GetHeadRefs(ctx)
if err != nil {
@@ -42,12 +42,6 @@ func TraverseDAG(ctx context.Context, menv Environment, old, new *doltdb.DoltDB)
if err != nil {
return err
}
defer func() {
cerr := prog.Close(ctx)
if err == nil {
err = cerr
}
}()
for i := range heads {
if err = traverseRefHistory(ctx, menv, heads[i], old, new, prog); err != nil {
@@ -58,10 +52,19 @@ func TraverseDAG(ctx context.Context, menv Environment, old, new *doltdb.DoltDB)
if err = validateBranchMapping(ctx, old, new); err != nil {
return err
}
// write the migrated commit mapping to a special branch
m, err := prog.Finalize(ctx)
if err != nil {
return err
}
if err = persistMigratedCommitMapping(ctx, new, m); err != nil {
return err
}
return nil
}
func traverseRefHistory(ctx context.Context, menv Environment, r ref.DoltRef, old, new *doltdb.DoltDB, prog Progress) error {
func traverseRefHistory(ctx context.Context, menv Environment, r ref.DoltRef, old, new *doltdb.DoltDB, prog *progress) error {
switch r.GetType() {
case ref.BranchRefType:
if err := traverseBranchHistory(ctx, menv, r, old, new, prog); err != nil {
@@ -87,7 +90,7 @@ func traverseRefHistory(ctx context.Context, menv Environment, r ref.DoltRef, ol
}
}
func traverseBranchHistory(ctx context.Context, menv Environment, r ref.DoltRef, old, new *doltdb.DoltDB, prog Progress) error {
func traverseBranchHistory(ctx context.Context, menv Environment, r ref.DoltRef, old, new *doltdb.DoltDB, prog *progress) error {
cm, err := old.ResolveCommitRef(ctx, r)
if err != nil {
return err
@@ -108,7 +111,7 @@ func traverseBranchHistory(ctx context.Context, menv Environment, r ref.DoltRef,
return new.SetHead(ctx, r, newHash)
}
func traverseTagHistory(ctx context.Context, menv Environment, r ref.TagRef, old, new *doltdb.DoltDB, prog Progress) error {
func traverseTagHistory(ctx context.Context, menv Environment, r ref.TagRef, old, new *doltdb.DoltDB, prog *progress) error {
t, err := old.ResolveTag(ctx, r)
if err != nil {
return err
@@ -133,7 +136,7 @@ func traverseTagHistory(ctx context.Context, menv Environment, r ref.TagRef, old
return new.NewTagAtCommit(ctx, r, cm, t.Meta)
}
func traverseCommitHistory(ctx context.Context, menv Environment, cm *doltdb.Commit, new *doltdb.DoltDB, prog Progress) error {
func traverseCommitHistory(ctx context.Context, menv Environment, cm *doltdb.Commit, new *doltdb.DoltDB, prog *progress) error {
ch, err := cm.HashOf()
if err != nil {
return err
@@ -180,7 +183,7 @@ func traverseCommitHistory(ctx context.Context, menv Environment, cm *doltdb.Com
}
}
func firstAbsent(ctx context.Context, p Progress, addrs []hash.Hash) (int, error) {
func firstAbsent(ctx context.Context, p *progress, addrs []hash.Hash) (int, error) {
for i := range addrs {
ok, err := p.Has(ctx, addrs[i])
if err != nil {
@@ -44,20 +44,9 @@ func NewSqlEngineReader(ctx context.Context, dEnv *env.DoltEnv, tableName string
return nil, err
}
// Choose the first DB as the current one. This will be the DB in the working dir if there was one there
var dbName string
mrEnv.Iter(func(name string, _ *env.DoltEnv) (stop bool, err error) {
dbName = name
return true, nil
})
config := &engine.SqlEngineConfig{
InitialDb: dbName,
IsReadOnly: false,
PrivFilePath: "",
ServerUser: "root",
ServerPass: "",
Autocommit: true,
ServerUser: "root",
Autocommit: true,
}
se, err := engine.NewSqlEngine(
ctx,
@@ -69,13 +58,11 @@ func NewSqlEngineReader(ctx context.Context, dEnv *env.DoltEnv, tableName string
return nil, err
}
sqlCtx, err := se.NewContext(ctx)
sqlCtx, err := se.NewLocalContext(ctx)
if err != nil {
return nil, err
}
// Add root client
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
sqlCtx.SetCurrentDatabase(mrEnv.GetFirstDatabase())
sch, iter, err := se.Query(sqlCtx, fmt.Sprintf("SELECT * FROM `%s`", tableName))
if err != nil {
@@ -73,22 +73,11 @@ func NewSqlEngineTableWriter(ctx context.Context, dEnv *env.DoltEnv, createTable
return nil, err
}
// Choose the first DB as the current one. This will be the DB in the working dir if there was one there
var dbName string
mrEnv.Iter(func(name string, _ *env.DoltEnv) (stop bool, err error) {
dbName = name
return true, nil
})
// Simplest path would have our import path be a layer over load data
config := &engine.SqlEngineConfig{
InitialDb: dbName,
IsReadOnly: false,
PrivFilePath: "",
ServerUser: "root",
ServerPass: "",
Autocommit: false, // We set autocommit == false to ensure to improve performance. Bulk import should not commit on each row.
Bulk: true,
ServerUser: "root",
Autocommit: false, // We set autocommit == false to ensure to improve performance. Bulk import should not commit on each row.
Bulk: true,
}
se, err := engine.NewSqlEngine(
ctx,
@@ -101,18 +90,18 @@ func NewSqlEngineTableWriter(ctx context.Context, dEnv *env.DoltEnv, createTable
}
defer se.Close()
dbName := mrEnv.GetFirstDatabase()
if se.GetUnderlyingEngine().IsReadOnly {
// SqlEngineTableWriter does not respect read only mode
return nil, analyzer.ErrReadOnlyDatabase.New(dbName)
}
sqlCtx, err := se.NewContext(ctx)
sqlCtx, err := se.NewLocalContext(ctx)
if err != nil {
return nil, err
}
// Add root client
sqlCtx.Session.SetClient(sql.Client{User: "root", Address: "%", Capabilities: 0})
sqlCtx.SetCurrentDatabase(dbName)
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
@@ -17,6 +17,7 @@ package encoding
import (
"context"
"errors"
"fmt"
"sync"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
@@ -151,6 +152,7 @@ type encodedIndex struct {
Tags []uint64 `noms:"tags" json:"tags"`
Comment string `noms:"comment" json:"comment"`
Unique bool `noms:"unique" json:"unique"`
Spatial bool `noms:"spatial,omitempty" json:"spatial,omitempty"`
IsSystemDefined bool `noms:"hidden,omitempty" json:"hidden,omitempty"` // Was previously named Hidden, do not change noms name
PrefixLengths []uint16 `noms:"prefixLengths,omitempty" json:"prefixLengths,omitempty"`
}
@@ -244,6 +246,7 @@ func toSchemaData(sch schema.Schema) (schemaData, error) {
Tags: index.IndexedColumnTags(),
Comment: index.Comment(),
Unique: index.IsUnique(),
Spatial: index.IsSpatial(),
IsSystemDefined: !index.IsUserDefined(),
PrefixLengths: index.PrefixLengths(),
}
@@ -307,6 +310,7 @@ func (sd schemaData) addChecksIndexesAndPkOrderingToSchema(sch schema.Schema) er
encodedIndex.PrefixLengths,
schema.IndexProperties{
IsUnique: encodedIndex.Unique,
IsSpatial: encodedIndex.Spatial,
IsUserDefined: !encodedIndex.IsSystemDefined,
Comment: encodedIndex.Comment,
},
@@ -346,6 +350,14 @@ func MarshalSchemaAsNomsValue(ctx context.Context, vrw types.ValueReadWriter, sc
return nil, err
}
if vrw.Format().VersionString() != types.Format_DOLT.VersionString() {
for _, idx := range sch.Indexes().AllIndexes() {
if idx.IsSpatial() {
return nil, fmt.Errorf("spatial indexes are only supported in storage format __DOLT__")
}
}
}
if vrw.Format().UsesFlatbuffers() {
return SerializeSchema(ctx, vrw, sch)
}
@@ -420,6 +432,14 @@ func UnmarshalSchemaNomsValue(ctx context.Context, nbf *types.NomsBinFormat, sch
return nil, err
}
if nbf.VersionString() != types.Format_DOLT.VersionString() {
for _, idx := range sch.Indexes().AllIndexes() {
if idx.IsSpatial() {
return nil, fmt.Errorf("spatial indexes are only supported in storage format __DOLT__")
}
}
}
if sd.PkOrdinals == nil {
// schemaData will not have PK ordinals in old versions of Dolt
// this sets the default PK ordinates for subsequent cache lookups
@@ -165,6 +165,7 @@ func serializeClusteredIndex(b *fb.Builder, sch schema.Schema) fb.UOffsetT {
serial.IndexAddValueColumns(b, vo)
serial.IndexAddPrimaryKey(b, true)
serial.IndexAddUniqueKey(b, true)
serial.IndexAddSpatialKey(b, false)
serial.IndexAddSystemDefined(b, false)
return serial.IndexEnd(b)
}
@@ -350,6 +351,7 @@ func serializeSecondaryIndexes(b *fb.Builder, sch schema.Schema, indexes []schem
serial.IndexAddUniqueKey(b, idx.IsUnique())
serial.IndexAddSystemDefined(b, !idx.IsUserDefined())
serial.IndexAddPrefixLengths(b, po)
serial.IndexAddSpatialKey(b, idx.IsSpatial())
offs[i] = serial.IndexEnd(b)
}
@@ -370,6 +372,7 @@ func deserializeSecondaryIndexes(sch schema.Schema, s *serial.TableSchema) error
name := string(idx.Name())
props := schema.IndexProperties{
IsUnique: idx.UniqueKey(),
IsSpatial: idx.SpatialKey(),
IsUserDefined: !idx.SystemDefined(),
Comment: string(idx.Comment()),
}
+9
View File
@@ -43,6 +43,8 @@ type Index interface {
IndexedColumnTags() []uint64
// IsUnique returns whether the given index has the UNIQUE constraint.
IsUnique() bool
// IsSpatial returns whether the given index has the SPATIAL constraint.
IsSpatial() bool
// IsUserDefined returns whether the given index was created by a user or automatically generated.
IsUserDefined() bool
// Name returns the name of the index.
@@ -66,6 +68,7 @@ type indexImpl struct {
allTags []uint64
indexColl *indexCollectionImpl
isUnique bool
isSpatial bool
isUserDefined bool
comment string
prefixLengths []uint16
@@ -83,6 +86,7 @@ func NewIndex(name string, tags, allTags []uint64, indexColl IndexCollection, pr
allTags: allTags,
indexColl: indexCollImpl,
isUnique: props.IsUnique,
isSpatial: props.IsSpatial,
isUserDefined: props.IsUserDefined,
comment: props.Comment,
}
@@ -167,6 +171,11 @@ func (ix *indexImpl) IsUnique() bool {
return ix.isUnique
}
// IsSpatial implements Index.
func (ix *indexImpl) IsSpatial() bool {
return ix.isSpatial
}
// IsUserDefined implements Index.
func (ix *indexImpl) IsUserDefined() bool {
return ix.isUserDefined
+15 -1
View File
@@ -16,10 +16,20 @@ package schema
import (
"fmt"
"os"
"sort"
"strings"
)
// EnableSpatialIndex enables the creation and use of spatial indexes
var EnableSpatialIndex = false
func init() {
if v := os.Getenv("DOLT_ENABLE_SPATIAL_INDEX"); v != "" {
EnableSpatialIndex = true
}
}
type IndexCollection interface {
// AddIndex adds the given index, overwriting any current indexes with the same name or columns.
// It does not perform any kind of checking, and is intended for schema modifications.
@@ -68,6 +78,7 @@ type IndexCollection interface {
type IndexProperties struct {
IsUnique bool
IsSpatial bool
IsUserDefined bool
Comment string
}
@@ -159,6 +170,7 @@ func (ixc *indexCollectionImpl) AddIndexByColTags(indexName string, tags []uint6
tags: tags,
allTags: combineAllTags(tags, ixc.pks),
isUnique: props.IsUnique,
isSpatial: props.IsSpatial,
isUserDefined: props.IsUserDefined,
comment: props.Comment,
prefixLengths: prefixLengths,
@@ -172,7 +184,7 @@ func (ixc *indexCollectionImpl) AddIndexByColTags(indexName string, tags []uint6
// validateColumnIndexable returns an error if the column given cannot be used in an index
func validateColumnIndexable(c Column) error {
if IsColSpatialType(c) {
if !EnableSpatialIndex && IsColSpatialType(c) {
return fmt.Errorf("cannot create an index over spatial type columns")
}
return nil
@@ -185,6 +197,7 @@ func (ixc *indexCollectionImpl) UnsafeAddIndexByColTags(indexName string, tags [
tags: tags,
allTags: combineAllTags(tags, ixc.pks),
isUnique: props.IsUnique,
isSpatial: props.IsSpatial,
isUserDefined: props.IsUserDefined,
comment: props.Comment,
prefixLengths: prefixLengths,
@@ -325,6 +338,7 @@ func (ixc *indexCollectionImpl) Merge(indexes ...Index) {
tags: tags,
indexColl: ixc,
isUnique: index.IsUnique(),
isSpatial: index.IsSpatial(),
isUserDefined: index.IsUserDefined(),
comment: index.Comment(),
prefixLengths: index.PrefixLengths(),
@@ -23,6 +23,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/vitess/go/vt/proto/query"
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -429,6 +430,11 @@ func (si *schemaImpl) GetKeyDescriptor() val.TupleDesc {
Enc: val.Encoding(EncodingFromSqlType(query.Type_VARCHAR)),
Nullable: columnMissingNotNullConstraint(col),
}
} else if queryType == query.Type_GEOMETRY {
t = val.Type{
Enc: val.Encoding(serial.EncodingCell),
Nullable: columnMissingNotNullConstraint(col),
}
} else {
t = val.Type{
Enc: val.Encoding(EncodingFromSqlType(queryType)),
@@ -181,13 +181,13 @@ func FromSqlType(sqlType sql.Type) (TypeInfo, error) {
case gmstypes.PolygonType{}.String():
return &polygonType{sqlType.(gmstypes.PolygonType)}, nil
case gmstypes.MultiPointType{}.String():
return &multipointType{}, nil
return &multipointType{sqlType.(gmstypes.MultiPointType)}, nil
case gmstypes.MultiLineStringType{}.String():
return &multilinestringType{}, nil
return &multilinestringType{sqlType.(gmstypes.MultiLineStringType)}, nil
case gmstypes.MultiPolygonType{}.String():
return &multipolygonType{}, nil
return &multipolygonType{sqlType.(gmstypes.MultiPolygonType)}, nil
case gmstypes.GeomCollType{}.String():
return &geomcollType{}, nil
return &geomcollType{sqlType.(gmstypes.GeomCollType)}, nil
case gmstypes.GeometryType{}.String():
return &geometryType{sqlGeometryType: sqlType.(gmstypes.GeometryType)}, nil
default:
@@ -270,6 +270,7 @@ func replaceColumnInSchema(sch schema.Schema, oldCol schema.Column, newCol schem
index.PrefixLengths(),
schema.IndexProperties{
IsUnique: index.IsUnique(),
IsSpatial: index.IsSpatial(),
IsUserDefined: index.IsUserDefined(),
Comment: index.Comment(),
})
@@ -228,6 +228,20 @@ func (a *binlogReplicaApplier) startReplicationEventStream(ctx *sql.Context, con
a.currentPosition = position
// Clear out the format description in case we're reconnecting, so that we don't use the old format description
// to interpret any event messages before we receive the new format description from the new stream.
a.format = mysql.BinlogFormat{}
// If the source server has binlog checksums enabled (@@global.binlog_checksum), then the replica MUST
// set @master_binlog_checksum to handshake with the server to acknowledge that it knows that checksums
// are in use. Without this step, the server will just send back error messages saying that the replica
// does not support the binlog checksum algorithm in use on the primary.
// For more details, see: https://dev.mysql.com/worklog/task/?id=2540
_, err = conn.ExecuteFetch("set @master_binlog_checksum=@@global.binlog_checksum;", 0, false)
if err != nil {
return err
}
return conn.SendBinlogDumpCommand(serverId, *position)
}
@@ -271,10 +285,6 @@ func (a *binlogReplicaApplier) replicaBinlogEventHandler(ctx *sql.Context) error
return err
}
continue
} else if strings.Contains(sqlError.Message, "can not handle replication events with the checksum") {
// Ignore any errors about checksums
ctx.GetLogger().Debug("ignoring binlog checksum error message")
continue
}
}
@@ -285,6 +295,19 @@ func (a *binlogReplicaApplier) replicaBinlogEventHandler(ctx *sql.Context) error
continue
}
// We don't support checksum validation, so we must strip off any checksum data if present, otherwise
// it could get interpreted as part of the data fields and corrupt the fields we pull out. There is not
// a future-proof guarantee on the checksum size, so we can't strip a checksum until we've seen the
// Format binlog event that definitively tells us if checksums are enabled and what algorithm they use.
if a.format.IsZero() == false {
event, _, err = event.StripChecksum(a.format)
if err != nil {
msg := fmt.Sprintf("unable to strip checksum from binlog event: '%v'", err.Error())
ctx.GetLogger().Error(msg)
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, msg)
}
}
err = a.processBinlogEvent(ctx, engine, event)
if err != nil {
ctx.GetLogger().Errorf("unexpected error of type %T: '%v'", err, err.Error())
@@ -328,6 +351,8 @@ func (a *binlogReplicaApplier) processBinlogEvent(ctx *sql.Context, engine *gms.
"database": query.Database,
"charset": query.Charset,
"query": query.SQL,
"options": fmt.Sprintf("0x%x", query.Options),
"sql_mode": fmt.Sprintf("0x%x", query.SqlMode),
}).Debug("Received binlog event: Query")
// When executing SQL statements sent from the primary, we can't be sure what database was modified unless we
@@ -337,6 +362,39 @@ func (a *binlogReplicaApplier) processBinlogEvent(ctx *sql.Context, engine *gms.
// avoid issues with correctness, at the cost of being slightly less efficient
commitToAllDatabases = true
if query.Options&mysql.QFlagOptionAutoIsNull > 0 {
ctx.GetLogger().Tracef("Setting sql_auto_is_null ON")
ctx.SetSessionVariable(ctx, "sql_auto_is_null", 1)
} else {
ctx.GetLogger().Tracef("Setting sql_auto_is_null OFF")
ctx.SetSessionVariable(ctx, "sql_auto_is_null", 0)
}
if query.Options&mysql.QFlagOptionNotAutocommit > 0 {
ctx.GetLogger().Tracef("Setting autocommit=0")
ctx.SetSessionVariable(ctx, "autocommit", 0)
} else {
ctx.GetLogger().Tracef("Setting autocommit=1")
ctx.SetSessionVariable(ctx, "autocommit", 1)
}
if query.Options&mysql.QFlagOptionNoForeignKeyChecks > 0 {
ctx.GetLogger().Tracef("Setting foreign_key_checks=0")
ctx.SetSessionVariable(ctx, "foreign_key_checks", 0)
} else {
ctx.GetLogger().Tracef("Setting foreign_key_checks=1")
ctx.SetSessionVariable(ctx, "foreign_key_checks", 1)
}
// NOTE: unique_checks is not currently honored by Dolt
if query.Options&mysql.QFlagOptionRelaxedUniqueChecks > 0 {
ctx.GetLogger().Tracef("Setting unique_checks=0")
ctx.SetSessionVariable(ctx, "unique_checks", 0)
} else {
ctx.GetLogger().Tracef("Setting unique_checks=1")
ctx.SetSessionVariable(ctx, "unique_checks", 1)
}
executeQueryWithEngine(ctx, engine, query.SQL)
createCommit = strings.ToLower(query.SQL) != "begin"
@@ -493,16 +551,18 @@ func (a *binlogReplicaApplier) processBinlogEvent(ctx *sql.Context, engine *gms.
// processRowEvent processes a WriteRows, DeleteRows, or UpdateRows binlog event and returns an error if any problems
// were encountered.
func (a *binlogReplicaApplier) processRowEvent(ctx *sql.Context, event mysql.BinlogEvent, engine *gms.Engine) error {
var eventType string
switch {
case event.IsDeleteRows():
ctx.GetLogger().Debug("Received binlog event: DeleteRows")
eventType = "DeleteRows"
case event.IsWriteRows():
ctx.GetLogger().Debug("Received binlog event: WriteRows")
eventType = "WriteRows"
case event.IsUpdateRows():
ctx.GetLogger().Debug("Received binlog event: UpdateRows")
eventType = "UpdateRows"
default:
return fmt.Errorf("unsupported event type: %v", event)
}
ctx.GetLogger().Debugf("Received binlog event: %s", eventType)
tableId := event.TableID(a.format)
tableMap, ok := a.tableMapsById[tableId]
@@ -519,16 +579,22 @@ func (a *binlogReplicaApplier) processRowEvent(ctx *sql.Context, event mysql.Bin
return err
}
ctx.GetLogger().WithFields(logrus.Fields{
"flags": fmt.Sprintf("%x", rows.Flags),
}).Debugf("Processing rows from %s event", eventType)
flags := rows.Flags
if flags&rowFlag_endOfStatement == rowFlag_endOfStatement {
foreignKeyChecksDisabled := false
if flags&rowFlag_endOfStatement > 0 {
// nothing to be done for end of statement; just clear the flag and move on
flags = flags &^ rowFlag_endOfStatement
}
if flags&rowFlag_noForeignKeyChecks == rowFlag_noForeignKeyChecks {
if flags&rowFlag_noForeignKeyChecks > 0 {
foreignKeyChecksDisabled = true
flags = flags &^ rowFlag_noForeignKeyChecks
}
if flags != 0 {
msg := fmt.Sprintf("unsupported binlog protocol message: DeleteRows event with unsupported flags '%x'", flags)
msg := fmt.Sprintf("unsupported binlog protocol message: row event with unsupported flags '%x'", flags)
ctx.GetLogger().Errorf(msg)
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, msg)
}
@@ -543,10 +609,9 @@ func (a *binlogReplicaApplier) processRowEvent(ctx *sql.Context, event mysql.Bin
case event.IsUpdateRows():
ctx.GetLogger().Debugf(" - Updated Rows (table: %s)", tableMap.Name)
case event.IsWriteRows():
ctx.GetLogger().Debugf(" - New Rows (table: %s)", tableMap.Name)
ctx.GetLogger().Debugf(" - Inserted Rows (table: %s)", tableMap.Name)
}
foreignKeyChecksDisabled := tableMap.Flags&rowFlag_noForeignKeyChecks > 0
writeSession, tableWriter, err := getTableWriter(ctx, engine, tableMap.Name, tableMap.Database, foreignKeyChecksDisabled)
if err != nil {
return err
@@ -263,11 +263,15 @@ func TestForeignKeyChecks(t *testing.T) {
startSqlServers(t)
startReplication(t, mySqlPort)
// Insert a record with a foreign key check
primaryDatabase.MustExec("CREATE TABLE colors (name varchar(100) primary key);")
// Test that we can execute statement-based replication that requires foreign_key_checks
// being turned off (referenced table doesn't exist yet).
primaryDatabase.MustExec("SET foreign_key_checks = 0;")
primaryDatabase.MustExec("CREATE TABLE t1 (pk int primary key, color varchar(100), FOREIGN KEY (color) REFERENCES colors(name));")
primaryDatabase.MustExec("START TRANSACTION;")
primaryDatabase.MustExec("CREATE TABLE colors (name varchar(100) primary key);")
primaryDatabase.MustExec("SET foreign_key_checks = 1;")
// Insert a record with foreign key checks enabled
primaryDatabase.MustExec("START TRANSACTION;")
primaryDatabase.MustExec("INSERT INTO colors VALUES ('green'), ('red'), ('blue');")
primaryDatabase.MustExec("INSERT INTO t1 VALUES (1, 'red'), (2, 'green');")
primaryDatabase.MustExec("COMMIT;")
@@ -362,7 +366,7 @@ func TestCharsetsAndCollations(t *testing.T) {
// waitForReplicaToCatchUp waits (up to 20s) for the replica to catch up with the primary database. The
// lag is measured by checking that gtid_executed is the same on the primary and replica.
func waitForReplicaToCatchUp(t *testing.T) {
timeLimit := 20 * time.Second
timeLimit := 60 * time.Second
endTime := time.Now().Add(timeLimit)
for time.Now().Before(endTime) {
replicaGtid := queryGtid(t, replicaDatabase)
@@ -486,8 +490,8 @@ func stopDoltSqlServer(t *testing.T) {
func startReplication(_ *testing.T, port int) {
replicaDatabase.MustExec("SET @@GLOBAL.server_id=123;")
replicaDatabase.MustExec(
fmt.Sprintf("change replication source to SOURCE_HOST='localhost', SOURCE_USER='root', "+
"SOURCE_PASSWORD='', SOURCE_PORT=%v;", port))
fmt.Sprintf("change replication source to SOURCE_HOST='localhost', SOURCE_USER='replicator', "+
"SOURCE_PASSWORD='Zqr8_blrGm1!', SOURCE_PORT=%v;", port))
replicaDatabase.MustExec("start replica;")
}
@@ -588,7 +592,6 @@ func startMySqlServer(dir string) (int, *os.Process, error) {
fmt.Sprintf("--port=%v", mySqlPort),
"--server-id=11223344",
fmt.Sprintf("--socket=mysql-%v.sock", mySqlPort),
"--binlog-checksum=NONE",
"--general_log_file="+dir+"general_log",
"--log-bin="+dir+"log_bin",
"--slow_query_log_file="+dir+"slow_query_log",
@@ -623,9 +626,11 @@ func startMySqlServer(dir string) (int, *os.Process, error) {
primaryDatabase = sqlx.MustOpen("mysql", dsn)
os.Chdir(originalCwd)
fmt.Printf("MySQL server started on port %v \n", mySqlPort)
primaryDatabase.MustExec("CREATE USER 'replicator'@'%' IDENTIFIED BY 'Zqr8_blrGm1!';")
primaryDatabase.MustExec("GRANT REPLICATION SLAVE ON *.* TO 'replicator'@'%';")
return mySqlPort, cmd.Process, nil
}
+1 -1
View File
@@ -425,7 +425,7 @@ func (db Database) getTableInsensitive(ctx *sql.Context, head *doltdb.Commit, ds
}
}
dt, found = dtables.NewUnscopedDiffTable(ctx, db.ddb, head), true
dt, found = dtables.NewUnscopedDiffTable(ctx, db.name, db.ddb, head), true
case doltdb.TableOfTablesInConflictName:
dt, found = dtables.NewTableOfTablesInConflict(ctx, db.name, db.ddb), true
case doltdb.TableOfTablesWithViolationsName:
@@ -937,12 +937,14 @@ func (p DoltDatabaseProvider) ExternalStoredProcedures(_ *sql.Context, name stri
// TableFunction implements the sql.TableFunctionProvider interface
func (p DoltDatabaseProvider) TableFunction(_ *sql.Context, name string) (sql.TableFunction, error) {
// currently, only one table function is supported, if we extend this, we should clean this up
// and store table functions in a map, similar to regular functions.
// TODO: Clean this up and store table functions in a map, similar to regular functions.
switch strings.ToLower(name) {
case "dolt_diff":
dtf := &DiffTableFunction{}
return dtf, nil
case "dolt_diff_stat":
dtf := &DiffStatTableFunction{}
return dtf, nil
case "dolt_diff_summary":
dtf := &DiffSummaryTableFunction{}
return dtf, nil
@@ -1,4 +1,4 @@
// Copyright 2020 Dolthub, Inc.
// Copyright 2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,6 +25,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/hash"
)
const HashOfFuncName = "hashof"
@@ -80,12 +81,21 @@ func (t *HashOf) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
} else {
ref, err := ddb.GetRefByNameInsensitive(ctx, name)
if err != nil {
return nil, err
}
cm, err = ddb.ResolveCommitRef(ctx, ref)
if err != nil {
return nil, err
hsh, parsed := hash.MaybeParse(name)
if parsed {
orgErr := err
cm, err = ddb.ReadCommit(ctx, hsh)
if err != nil {
return nil, orgErr
}
} else {
return nil, err
}
} else {
cm, err = ddb.ResolveCommitRef(ctx, ref)
if err != nil {
return nil, err
}
}
}
@@ -0,0 +1,571 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqle
import (
"errors"
"fmt"
"io"
"math"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/types"
"golang.org/x/sync/errgroup"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
)
var _ sql.TableFunction = (*DiffStatTableFunction)(nil)
type DiffStatTableFunction struct {
ctx *sql.Context
fromCommitExpr sql.Expression
toCommitExpr sql.Expression
dotCommitExpr sql.Expression
tableNameExpr sql.Expression
database sql.Database
}
var diffStatTableSchema = sql.Schema{
&sql.Column{Name: "table_name", Type: types.LongText, Nullable: false},
&sql.Column{Name: "rows_unmodified", Type: types.Int64, Nullable: true},
&sql.Column{Name: "rows_added", Type: types.Int64, Nullable: true},
&sql.Column{Name: "rows_deleted", Type: types.Int64, Nullable: true},
&sql.Column{Name: "rows_modified", Type: types.Int64, Nullable: true},
&sql.Column{Name: "cells_added", Type: types.Int64, Nullable: true},
&sql.Column{Name: "cells_deleted", Type: types.Int64, Nullable: true},
&sql.Column{Name: "cells_modified", Type: types.Int64, Nullable: true},
&sql.Column{Name: "old_row_count", Type: types.Int64, Nullable: true},
&sql.Column{Name: "new_row_count", Type: types.Int64, Nullable: true},
&sql.Column{Name: "old_cell_count", Type: types.Int64, Nullable: true},
&sql.Column{Name: "new_cell_count", Type: types.Int64, Nullable: true},
}
// NewInstance creates a new instance of TableFunction interface
func (ds *DiffStatTableFunction) NewInstance(ctx *sql.Context, db sql.Database, expressions []sql.Expression) (sql.Node, error) {
newInstance := &DiffStatTableFunction{
ctx: ctx,
database: db,
}
node, err := newInstance.WithExpressions(expressions...)
if err != nil {
return nil, err
}
return node, nil
}
// Database implements the sql.Databaser interface
func (ds *DiffStatTableFunction) Database() sql.Database {
return ds.database
}
// WithDatabase implements the sql.Databaser interface
func (ds *DiffStatTableFunction) WithDatabase(database sql.Database) (sql.Node, error) {
ds.database = database
return ds, nil
}
// Name implements the sql.TableFunction interface
func (ds *DiffStatTableFunction) Name() string {
return "dolt_diff_stat"
}
func (ds *DiffStatTableFunction) commitsResolved() bool {
if ds.dotCommitExpr != nil {
return ds.dotCommitExpr.Resolved()
}
return ds.fromCommitExpr.Resolved() && ds.toCommitExpr.Resolved()
}
// Resolved implements the sql.Resolvable interface
func (ds *DiffStatTableFunction) Resolved() bool {
if ds.tableNameExpr != nil {
return ds.commitsResolved() && ds.tableNameExpr.Resolved()
}
return ds.commitsResolved()
}
// String implements the Stringer interface
func (ds *DiffStatTableFunction) String() string {
if ds.dotCommitExpr != nil {
if ds.tableNameExpr != nil {
return fmt.Sprintf("DOLT_DIFF_STAT(%s, %s)", ds.dotCommitExpr.String(), ds.tableNameExpr.String())
}
return fmt.Sprintf("DOLT_DIFF_STAT(%s)", ds.dotCommitExpr.String())
}
if ds.tableNameExpr != nil {
return fmt.Sprintf("DOLT_DIFF_STAT(%s, %s, %s)", ds.fromCommitExpr.String(), ds.toCommitExpr.String(), ds.tableNameExpr.String())
}
return fmt.Sprintf("DOLT_DIFF_STAT(%s, %s)", ds.fromCommitExpr.String(), ds.toCommitExpr.String())
}
// Schema implements the sql.Node interface.
func (ds *DiffStatTableFunction) Schema() sql.Schema {
return diffStatTableSchema
}
// Children implements the sql.Node interface.
func (ds *DiffStatTableFunction) Children() []sql.Node {
return nil
}
// WithChildren implements the sql.Node interface.
func (ds *DiffStatTableFunction) WithChildren(children ...sql.Node) (sql.Node, error) {
if len(children) != 0 {
return nil, fmt.Errorf("unexpected children")
}
return ds, nil
}
// CheckPrivileges implements the interface sql.Node.
func (ds *DiffStatTableFunction) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool {
if ds.tableNameExpr != nil {
if !types.IsText(ds.tableNameExpr.Type()) {
return false
}
tableNameVal, err := ds.tableNameExpr.Eval(ds.ctx, nil)
if err != nil {
return false
}
tableName, ok := tableNameVal.(string)
if !ok {
return false
}
// TODO: Add tests for privilege checking
return opChecker.UserHasPrivileges(ctx,
sql.NewPrivilegedOperation(ds.database.Name(), tableName, "", sql.PrivilegeType_Select))
}
tblNames, err := ds.database.GetTableNames(ctx)
if err != nil {
return false
}
var operations []sql.PrivilegedOperation
for _, tblName := range tblNames {
operations = append(operations, sql.NewPrivilegedOperation(ds.database.Name(), tblName, "", sql.PrivilegeType_Select))
}
return opChecker.UserHasPrivileges(ctx, operations...)
}
// Expressions implements the sql.Expressioner interface.
func (ds *DiffStatTableFunction) Expressions() []sql.Expression {
exprs := []sql.Expression{}
if ds.dotCommitExpr != nil {
exprs = append(exprs, ds.dotCommitExpr)
} else {
exprs = append(exprs, ds.fromCommitExpr, ds.toCommitExpr)
}
if ds.tableNameExpr != nil {
exprs = append(exprs, ds.tableNameExpr)
}
return exprs
}
// WithExpressions implements the sql.Expressioner interface.
func (ds *DiffStatTableFunction) WithExpressions(expression ...sql.Expression) (sql.Node, error) {
if len(expression) < 1 {
return nil, sql.ErrInvalidArgumentNumber.New(ds.Name(), "1 to 3", len(expression))
}
for _, expr := range expression {
if !expr.Resolved() {
return nil, ErrInvalidNonLiteralArgument.New(ds.Name(), expr.String())
}
// prepared statements resolve functions beforehand, so above check fails
if _, ok := expr.(sql.FunctionExpression); ok {
return nil, ErrInvalidNonLiteralArgument.New(ds.Name(), expr.String())
}
}
if strings.Contains(expression[0].String(), "..") {
if len(expression) < 1 || len(expression) > 2 {
return nil, sql.ErrInvalidArgumentNumber.New(ds.Name(), "1 or 2", len(expression))
}
ds.dotCommitExpr = expression[0]
if len(expression) == 2 {
ds.tableNameExpr = expression[1]
}
} else {
if len(expression) < 2 || len(expression) > 3 {
return nil, sql.ErrInvalidArgumentNumber.New(ds.Name(), "2 or 3", len(expression))
}
ds.fromCommitExpr = expression[0]
ds.toCommitExpr = expression[1]
if len(expression) == 3 {
ds.tableNameExpr = expression[2]
}
}
// validate the expressions
if ds.dotCommitExpr != nil {
if !types.IsText(ds.dotCommitExpr.Type()) {
return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.dotCommitExpr.String())
}
} else {
if !types.IsText(ds.fromCommitExpr.Type()) {
return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.fromCommitExpr.String())
}
if !types.IsText(ds.toCommitExpr.Type()) {
return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.toCommitExpr.String())
}
}
if ds.tableNameExpr != nil {
if !types.IsText(ds.tableNameExpr.Type()) {
return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.tableNameExpr.String())
}
}
return ds, nil
}
// RowIter implements the sql.Node interface
func (ds *DiffStatTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.RowIter, error) {
fromCommitVal, toCommitVal, dotCommitVal, tableName, err := ds.evaluateArguments()
if err != nil {
return nil, err
}
sqledb, ok := ds.database.(SqlDatabase)
if !ok {
return nil, fmt.Errorf("unexpected database type: %T", ds.database)
}
fromCommitStr, toCommitStr, err := loadCommitStrings(ctx, fromCommitVal, toCommitVal, dotCommitVal, sqledb)
if err != nil {
return nil, err
}
sess := dsess.DSessFromSess(ctx.Session)
fromRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), fromCommitStr)
if err != nil {
return nil, err
}
toRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), toCommitStr)
if err != nil {
return nil, err
}
deltas, err := diff.GetTableDeltas(ctx, fromRoot, toRoot)
if err != nil {
return nil, err
}
// If tableNameExpr defined, return a single table diff stat result
if ds.tableNameExpr != nil {
delta := findMatchingDelta(deltas, tableName)
diffStat, hasDiff, err := getDiffStatNodeFromDelta(ctx, delta, fromRoot, toRoot, tableName)
if err != nil {
return nil, err
}
if !hasDiff {
return NewDiffStatTableFunctionRowIter([]diffStatNode{}), nil
}
return NewDiffStatTableFunctionRowIter([]diffStatNode{diffStat}), nil
}
var diffStats []diffStatNode
for _, delta := range deltas {
tblName := delta.ToName
if tblName == "" {
tblName = delta.FromName
}
diffStat, hasDiff, err := getDiffStatNodeFromDelta(ctx, delta, fromRoot, toRoot, tblName)
if err != nil {
if errors.Is(err, diff.ErrPrimaryKeySetChanged) {
ctx.Warn(dtables.PrimaryKeyChangeWarningCode, fmt.Sprintf("stat for table %s cannot be determined. Primary key set changed.", tblName))
// Report an empty diff for tables that have primary key set changes
diffStats = append(diffStats, diffStatNode{tblName: tblName})
continue
}
return nil, err
}
if hasDiff {
diffStats = append(diffStats, diffStat)
}
}
return NewDiffStatTableFunctionRowIter(diffStats), nil
}
// evaluateArguments returns fromCommitVal, toCommitVal, dotCommitVal, and tableName.
// It evaluates the argument expressions to turn them into values this DiffStatTableFunction
// can use. Note that this method only evals the expressions, and doesn't validate the values.
func (ds *DiffStatTableFunction) evaluateArguments() (interface{}, interface{}, interface{}, string, error) {
var tableName string
if ds.tableNameExpr != nil {
tableNameVal, err := ds.tableNameExpr.Eval(ds.ctx, nil)
if err != nil {
return nil, nil, nil, "", err
}
tn, ok := tableNameVal.(string)
if !ok {
return nil, nil, nil, "", ErrInvalidTableName.New(ds.tableNameExpr.String())
}
tableName = tn
}
if ds.dotCommitExpr != nil {
dotCommitVal, err := ds.dotCommitExpr.Eval(ds.ctx, nil)
if err != nil {
return nil, nil, nil, "", err
}
return nil, nil, dotCommitVal, tableName, nil
}
fromCommitVal, err := ds.fromCommitExpr.Eval(ds.ctx, nil)
if err != nil {
return nil, nil, nil, "", err
}
toCommitVal, err := ds.toCommitExpr.Eval(ds.ctx, nil)
if err != nil {
return nil, nil, nil, "", err
}
return fromCommitVal, toCommitVal, nil, tableName, nil
}
// getDiffStatNodeFromDelta returns diffStatNode object and whether there is data diff or not. It gets tables
// from roots and diff stat if there is a valid table exists in both fromRoot and toRoot.
func getDiffStatNodeFromDelta(ctx *sql.Context, delta diff.TableDelta, fromRoot, toRoot *doltdb.RootValue, tableName string) (diffStatNode, bool, error) {
var oldColLen int
var newColLen int
fromTable, _, fromTableExists, err := fromRoot.GetTableInsensitive(ctx, tableName)
if err != nil {
return diffStatNode{}, false, err
}
if fromTableExists {
fromSch, err := fromTable.GetSchema(ctx)
if err != nil {
return diffStatNode{}, false, err
}
oldColLen = len(fromSch.GetAllCols().GetColumns())
}
toTable, _, toTableExists, err := toRoot.GetTableInsensitive(ctx, tableName)
if err != nil {
return diffStatNode{}, false, err
}
if toTableExists {
toSch, err := toTable.GetSchema(ctx)
if err != nil {
return diffStatNode{}, false, err
}
newColLen = len(toSch.GetAllCols().GetColumns())
}
if !fromTableExists && !toTableExists {
return diffStatNode{}, false, sql.ErrTableNotFound.New(tableName)
}
// no diff from tableDelta
if delta.FromTable == nil && delta.ToTable == nil {
return diffStatNode{}, false, nil
}
diffStat, hasDiff, keyless, err := getDiffStat(ctx, delta)
if err != nil {
return diffStatNode{}, false, err
}
return diffStatNode{tableName, diffStat, oldColLen, newColLen, keyless}, hasDiff, nil
}
// getDiffStat returns diff.DiffStatProgress object and whether there is a data diff or not.
func getDiffStat(ctx *sql.Context, td diff.TableDelta) (diff.DiffStatProgress, bool, bool, error) {
// got this method from diff_output.go
ch := make(chan diff.DiffStatProgress)
grp, ctx2 := errgroup.WithContext(ctx)
grp.Go(func() error {
defer close(ch)
err := diff.StatForTableDelta(ctx2, ch, td)
return err
})
acc := diff.DiffStatProgress{}
var count int64
grp.Go(func() error {
for {
select {
case p, ok := <-ch:
if !ok {
return nil
}
acc.Adds += p.Adds
acc.Removes += p.Removes
acc.Changes += p.Changes
acc.CellChanges += p.CellChanges
acc.NewRowSize += p.NewRowSize
acc.OldRowSize += p.OldRowSize
acc.NewCellSize += p.NewCellSize
acc.OldCellSize += p.OldCellSize
count++
case <-ctx2.Done():
return ctx2.Err()
}
}
})
if err := grp.Wait(); err != nil {
return diff.DiffStatProgress{}, false, false, err
}
keyless, err := td.IsKeyless(ctx)
if err != nil {
return diff.DiffStatProgress{}, false, keyless, err
}
if (acc.Adds+acc.Removes+acc.Changes) == 0 && (acc.OldCellSize-acc.NewCellSize) == 0 {
return diff.DiffStatProgress{}, false, keyless, nil
}
return acc, true, keyless, nil
}
//------------------------------------
// diffStatTableFunctionRowIter
//------------------------------------
var _ sql.RowIter = &diffStatTableFunctionRowIter{}
type diffStatTableFunctionRowIter struct {
diffStats []diffStatNode
diffIdx int
}
func (d *diffStatTableFunctionRowIter) incrementIndexes() {
d.diffIdx++
if d.diffIdx >= len(d.diffStats) {
d.diffIdx = 0
d.diffStats = nil
}
}
type diffStatNode struct {
tblName string
diffStat diff.DiffStatProgress
oldColLen int
newColLen int
keyless bool
}
func NewDiffStatTableFunctionRowIter(ds []diffStatNode) sql.RowIter {
return &diffStatTableFunctionRowIter{
diffStats: ds,
}
}
func (d *diffStatTableFunctionRowIter) Next(ctx *sql.Context) (sql.Row, error) {
defer d.incrementIndexes()
if d.diffIdx >= len(d.diffStats) {
return nil, io.EOF
}
if d.diffStats == nil {
return nil, io.EOF
}
ds := d.diffStats[d.diffIdx]
return getRowFromDiffStat(ds.tblName, ds.diffStat, ds.newColLen, ds.oldColLen, ds.keyless), nil
}
func (d *diffStatTableFunctionRowIter) Close(context *sql.Context) error {
return nil
}
// getRowFromDiffStat takes diff.DiffStatProgress and calculates the row_modified, cell_added, cell_deleted.
// If the number of cell change from old to new cell count does not equal to cell_added and/or cell_deleted, there
// must be schema changes that affects cell_added and cell_deleted value addition to the row count * col length number.
func getRowFromDiffStat(tblName string, dsp diff.DiffStatProgress, newColLen, oldColLen int, keyless bool) sql.Row {
// if table is keyless table, match current CLI command result
if keyless {
return sql.Row{
tblName, // table_name
nil, // rows_unmodified
int64(dsp.Adds), // rows_added
int64(dsp.Removes), // rows_deleted
nil, // rows_modified
nil, // cells_added
nil, // cells_deleted
nil, // cells_modified
nil, // old_row_count
nil, // new_row_count
nil, // old_cell_count
nil, // new_cell_count
}
}
numCellInserts, numCellDeletes := GetCellsAddedAndDeleted(dsp, newColLen)
rowsUnmodified := dsp.OldRowSize - dsp.Changes - dsp.Removes
return sql.Row{
tblName, // table_name
int64(rowsUnmodified), // rows_unmodified
int64(dsp.Adds), // rows_added
int64(dsp.Removes), // rows_deleted
int64(dsp.Changes), // rows_modified
int64(numCellInserts), // cells_added
int64(numCellDeletes), // cells_deleted
int64(dsp.CellChanges), // cells_modified
int64(dsp.OldRowSize), // old_row_count
int64(dsp.NewRowSize), // new_row_count
int64(dsp.OldCellSize), // old_cell_count
int64(dsp.NewCellSize), // new_cell_count
}
}
// GetCellsAddedAndDeleted calculates cells added and deleted given diff.DiffStatProgress and toCommit table
// column length. We use rows added and deleted to calculate cells added and deleted, but it does not include
// cells added and deleted from schema changes. Here we fill those in using total number of cells in each commit table.
func GetCellsAddedAndDeleted(acc diff.DiffStatProgress, newColLen int) (uint64, uint64) {
var numCellInserts, numCellDeletes float64
rowToCellInserts := float64(acc.Adds) * float64(newColLen)
rowToCellDeletes := float64(acc.Removes) * float64(newColLen)
cellDiff := float64(acc.NewCellSize) - float64(acc.OldCellSize)
if cellDiff > 0 {
numCellInserts = cellDiff + rowToCellDeletes
numCellDeletes = rowToCellDeletes
} else if cellDiff < 0 {
numCellInserts = rowToCellInserts
numCellDeletes = math.Abs(cellDiff) + rowToCellInserts
} else {
if rowToCellInserts != rowToCellDeletes {
numCellDeletes = math.Max(rowToCellDeletes, rowToCellInserts)
numCellInserts = math.Max(rowToCellDeletes, rowToCellInserts)
} else {
numCellDeletes = rowToCellDeletes
numCellInserts = rowToCellInserts
}
}
return uint64(numCellInserts), uint64(numCellDeletes)
}
@@ -15,19 +15,16 @@
package sqle
import (
"errors"
"fmt"
"io"
"math"
"sort"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/types"
"golang.org/x/sync/errgroup"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
)
@@ -44,18 +41,11 @@ type DiffSummaryTableFunction struct {
}
var diffSummaryTableSchema = sql.Schema{
&sql.Column{Name: "table_name", Type: types.LongText, Nullable: false},
&sql.Column{Name: "rows_unmodified", Type: types.Int64, Nullable: true},
&sql.Column{Name: "rows_added", Type: types.Int64, Nullable: true},
&sql.Column{Name: "rows_deleted", Type: types.Int64, Nullable: true},
&sql.Column{Name: "rows_modified", Type: types.Int64, Nullable: true},
&sql.Column{Name: "cells_added", Type: types.Int64, Nullable: true},
&sql.Column{Name: "cells_deleted", Type: types.Int64, Nullable: true},
&sql.Column{Name: "cells_modified", Type: types.Int64, Nullable: true},
&sql.Column{Name: "old_row_count", Type: types.Int64, Nullable: true},
&sql.Column{Name: "new_row_count", Type: types.Int64, Nullable: true},
&sql.Column{Name: "old_cell_count", Type: types.Int64, Nullable: true},
&sql.Column{Name: "new_cell_count", Type: types.Int64, Nullable: true},
&sql.Column{Name: "from_table_name", Type: types.LongText, Nullable: false},
&sql.Column{Name: "to_table_name", Type: types.LongText, Nullable: false},
&sql.Column{Name: "diff_type", Type: types.Text, Nullable: false},
&sql.Column{Name: "data_change", Type: types.Boolean, Nullable: false},
&sql.Column{Name: "schema_change", Type: types.Boolean, Nullable: false},
}
// NewInstance creates a new instance of TableFunction interface
@@ -254,64 +244,73 @@ func (ds *DiffSummaryTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.
return nil, fmt.Errorf("unexpected database type: %T", ds.database)
}
fromCommitStr, toCommitStr, err := loadCommitStrings(ctx, fromCommitVal, toCommitVal, dotCommitVal, sqledb)
fromDetails, toDetails, err := loadDetailsForRefs(ctx, fromCommitVal, toCommitVal, dotCommitVal, sqledb)
if err != nil {
return nil, err
}
sess := dsess.DSessFromSess(ctx.Session)
fromRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), fromCommitStr)
deltas, err := diff.GetTableDeltas(ctx, fromDetails.root, toDetails.root)
if err != nil {
return nil, err
}
toRoot, _, err := sess.ResolveRootForRef(ctx, sqledb.Name(), toCommitStr)
if err != nil {
return nil, err
}
deltas, err := diff.GetTableDeltas(ctx, fromRoot, toRoot)
if err != nil {
return nil, err
}
sort.Slice(deltas, func(i, j int) bool {
return strings.Compare(deltas[i].ToName, deltas[j].ToName) < 0
})
// If tableNameExpr defined, return a single table diff summary result
if ds.tableNameExpr != nil {
delta := findMatchingDelta(deltas, tableName)
diffSum, hasDiff, err := getDiffSummaryNodeFromDelta(ctx, delta, fromRoot, toRoot, tableName)
summ, err := getSummaryForDelta(ctx, delta, sqledb, fromDetails, toDetails, true)
if err != nil {
return nil, err
}
if !hasDiff {
return NewDiffSummaryTableFunctionRowIter([]diffSummaryNode{}), nil
summs := []*diff.TableDeltaSummary{}
if summ != nil {
summs = []*diff.TableDeltaSummary{summ}
}
return NewDiffSummaryTableFunctionRowIter([]diffSummaryNode{diffSum}), nil
return NewDiffSummaryTableFunctionRowIter(summs), nil
}
var diffSummaries []diffSummaryNode
var diffSummaries []*diff.TableDeltaSummary
for _, delta := range deltas {
tblName := delta.ToName
if tblName == "" {
tblName = delta.FromName
}
diffSum, hasDiff, err := getDiffSummaryNodeFromDelta(ctx, delta, fromRoot, toRoot, tblName)
summ, err := getSummaryForDelta(ctx, delta, sqledb, fromDetails, toDetails, false)
if err != nil {
if errors.Is(err, diff.ErrPrimaryKeySetChanged) {
ctx.Warn(dtables.PrimaryKeyChangeWarningCode, fmt.Sprintf("summary for table %s cannot be determined. Primary key set changed.", tblName))
// Report an empty diff for tables that have primary key set changes
diffSummaries = append(diffSummaries, diffSummaryNode{tblName: tblName})
continue
}
return nil, err
}
if hasDiff {
diffSummaries = append(diffSummaries, diffSum)
if summ != nil {
diffSummaries = append(diffSummaries, summ)
}
}
return NewDiffSummaryTableFunctionRowIter(diffSummaries), nil
}
func getSummaryForDelta(ctx *sql.Context, delta diff.TableDelta, sqledb SqlDatabase, fromDetails, toDetails *refDetails, shouldErrorOnPKChange bool) (*diff.TableDeltaSummary, error) {
if delta.FromTable == nil && delta.ToTable == nil {
return nil, nil
}
if !schema.ArePrimaryKeySetsDiffable(delta.Format(), delta.FromSch, delta.ToSch) {
if shouldErrorOnPKChange {
return nil, fmt.Errorf("failed to compute diff summary for table %s: %w", delta.CurName(), diff.ErrPrimaryKeySetChanged)
}
ctx.Warn(dtables.PrimaryKeyChangeWarningCode, fmt.Sprintf(dtables.PrimaryKeyChangeWarning, fromDetails.hashStr, toDetails.hashStr))
return nil, nil
}
summ, err := delta.GetSummary(ctx)
if err != nil {
return nil, err
}
return summ, nil
}
// evaluateArguments returns fromCommitVal, toCommitVal, dotCommitVal, and tableName.
// It evaluates the argument expressions to turn them into values this DiffSummaryTableFunction
// can use. Note that this method only evals the expressions, and doesn't validate the values.
@@ -351,107 +350,6 @@ func (ds *DiffSummaryTableFunction) evaluateArguments() (interface{}, interface{
return fromCommitVal, toCommitVal, nil, tableName, nil
}
// getDiffSummaryNodeFromDelta returns diffSummaryNode object and whether there is data diff or not. It gets tables
// from roots and diff summary if there is a valid table exists in both fromRoot and toRoot.
func getDiffSummaryNodeFromDelta(ctx *sql.Context, delta diff.TableDelta, fromRoot, toRoot *doltdb.RootValue, tableName string) (diffSummaryNode, bool, error) {
var oldColLen int
var newColLen int
fromTable, _, fromTableExists, err := fromRoot.GetTableInsensitive(ctx, tableName)
if err != nil {
return diffSummaryNode{}, false, err
}
if fromTableExists {
fromSch, err := fromTable.GetSchema(ctx)
if err != nil {
return diffSummaryNode{}, false, err
}
oldColLen = len(fromSch.GetAllCols().GetColumns())
}
toTable, _, toTableExists, err := toRoot.GetTableInsensitive(ctx, tableName)
if err != nil {
return diffSummaryNode{}, false, err
}
if toTableExists {
toSch, err := toTable.GetSchema(ctx)
if err != nil {
return diffSummaryNode{}, false, err
}
newColLen = len(toSch.GetAllCols().GetColumns())
}
if !fromTableExists && !toTableExists {
return diffSummaryNode{}, false, sql.ErrTableNotFound.New(tableName)
}
// no diff from tableDelta
if delta.FromTable == nil && delta.ToTable == nil {
return diffSummaryNode{}, false, nil
}
diffSum, hasDiff, keyless, err := getDiffSummary(ctx, delta)
if err != nil {
return diffSummaryNode{}, false, err
}
return diffSummaryNode{tableName, diffSum, oldColLen, newColLen, keyless}, hasDiff, nil
}
// getDiffSummary returns diff.DiffSummaryProgress object and whether there is a data diff or not.
func getDiffSummary(ctx *sql.Context, td diff.TableDelta) (diff.DiffSummaryProgress, bool, bool, error) {
// got this method from diff_output.go
ch := make(chan diff.DiffSummaryProgress)
grp, ctx2 := errgroup.WithContext(ctx)
grp.Go(func() error {
defer close(ch)
err := diff.SummaryForTableDelta(ctx2, ch, td)
return err
})
acc := diff.DiffSummaryProgress{}
var count int64
grp.Go(func() error {
for {
select {
case p, ok := <-ch:
if !ok {
return nil
}
acc.Adds += p.Adds
acc.Removes += p.Removes
acc.Changes += p.Changes
acc.CellChanges += p.CellChanges
acc.NewRowSize += p.NewRowSize
acc.OldRowSize += p.OldRowSize
acc.NewCellSize += p.NewCellSize
acc.OldCellSize += p.OldCellSize
count++
case <-ctx2.Done():
return ctx2.Err()
}
}
})
if err := grp.Wait(); err != nil {
return diff.DiffSummaryProgress{}, false, false, err
}
keyless, err := td.IsKeyless(ctx)
if err != nil {
return diff.DiffSummaryProgress{}, false, keyless, err
}
if (acc.Adds+acc.Removes+acc.Changes) == 0 && (acc.OldCellSize-acc.NewCellSize) == 0 {
return diff.DiffSummaryProgress{}, false, keyless, nil
}
return acc, true, keyless, nil
}
//------------------------------------
// diffSummaryTableFunctionRowIter
//------------------------------------
@@ -459,113 +357,48 @@ func getDiffSummary(ctx *sql.Context, td diff.TableDelta) (diff.DiffSummaryProgr
var _ sql.RowIter = &diffSummaryTableFunctionRowIter{}
type diffSummaryTableFunctionRowIter struct {
diffSums []diffSummaryNode
diffIdx int
summaries []*diff.TableDeltaSummary
diffIdx int
}
func (d *diffSummaryTableFunctionRowIter) incrementIndexes() {
d.diffIdx++
if d.diffIdx >= len(d.diffSums) {
if d.diffIdx >= len(d.summaries) {
d.diffIdx = 0
d.diffSums = nil
d.summaries = nil
}
}
type diffSummaryNode struct {
tblName string
diffSummary diff.DiffSummaryProgress
oldColLen int
newColLen int
keyless bool
}
func NewDiffSummaryTableFunctionRowIter(ds []diffSummaryNode) sql.RowIter {
func NewDiffSummaryTableFunctionRowIter(ds []*diff.TableDeltaSummary) sql.RowIter {
return &diffSummaryTableFunctionRowIter{
diffSums: ds,
summaries: ds,
}
}
func (d *diffSummaryTableFunctionRowIter) Next(ctx *sql.Context) (sql.Row, error) {
defer d.incrementIndexes()
if d.diffIdx >= len(d.diffSums) {
if d.diffIdx >= len(d.summaries) {
return nil, io.EOF
}
if d.diffSums == nil {
if d.summaries == nil {
return nil, io.EOF
}
ds := d.diffSums[d.diffIdx]
return getRowFromDiffSummary(ds.tblName, ds.diffSummary, ds.newColLen, ds.oldColLen, ds.keyless), nil
ds := d.summaries[d.diffIdx]
return getRowFromSummary(ds), nil
}
func (d *diffSummaryTableFunctionRowIter) Close(context *sql.Context) error {
return nil
}
// getRowFromDiffSummary takes diff.DiffSummaryProgress and calculates the row_modified, cell_added, cell_deleted.
// If the number of cell change from old to new cell count does not equal to cell_added and/or cell_deleted, there
// must be schema changes that affects cell_added and cell_deleted value addition to the row count * col length number.
func getRowFromDiffSummary(tblName string, dsp diff.DiffSummaryProgress, newColLen, oldColLen int, keyless bool) sql.Row {
// if table is keyless table, match current CLI command result
if keyless {
return sql.Row{
tblName, // table_name
nil, // rows_unmodified
int64(dsp.Adds), // rows_added
int64(dsp.Removes), // rows_deleted
nil, // rows_modified
nil, // cells_added
nil, // cells_deleted
nil, // cells_modified
nil, // old_row_count
nil, // new_row_count
nil, // old_cell_count
nil, // new_cell_count
}
}
numCellInserts, numCellDeletes := GetCellsAddedAndDeleted(dsp, newColLen)
rowsUnmodified := dsp.OldRowSize - dsp.Changes - dsp.Removes
func getRowFromSummary(ds *diff.TableDeltaSummary) sql.Row {
return sql.Row{
tblName, // table_name
int64(rowsUnmodified), // rows_unmodified
int64(dsp.Adds), // rows_added
int64(dsp.Removes), // rows_deleted
int64(dsp.Changes), // rows_modified
int64(numCellInserts), // cells_added
int64(numCellDeletes), // cells_deleted
int64(dsp.CellChanges), // cells_modified
int64(dsp.OldRowSize), // old_row_count
int64(dsp.NewRowSize), // new_row_count
int64(dsp.OldCellSize), // old_cell_count
int64(dsp.NewCellSize), // new_cell_count
ds.FromTableName, // from_table_name
ds.ToTableName, // to_table_name
ds.DiffType, // diff_type
ds.DataChange, // data_change
ds.SchemaChange, // schema_change
}
}
// GetCellsAddedAndDeleted calculates cells added and deleted given diff.DiffSummaryProgress and toCommit table
// column length. We use rows added and deleted to calculate cells added and deleted, but it does not include
// cells added and deleted from schema changes. Here we fill those in using total number of cells in each commit table.
func GetCellsAddedAndDeleted(acc diff.DiffSummaryProgress, newColLen int) (uint64, uint64) {
var numCellInserts, numCellDeletes float64
rowToCellInserts := float64(acc.Adds) * float64(newColLen)
rowToCellDeletes := float64(acc.Removes) * float64(newColLen)
cellDiff := float64(acc.NewCellSize) - float64(acc.OldCellSize)
if cellDiff > 0 {
numCellInserts = cellDiff + rowToCellDeletes
numCellDeletes = rowToCellDeletes
} else if cellDiff < 0 {
numCellInserts = rowToCellInserts
numCellDeletes = math.Abs(cellDiff) + rowToCellInserts
} else {
if rowToCellInserts != rowToCellDeletes {
numCellDeletes = math.Max(rowToCellDeletes, rowToCellInserts)
numCellInserts = math.Max(rowToCellDeletes, rowToCellInserts)
} else {
numCellDeletes = rowToCellDeletes
numCellInserts = rowToCellInserts
}
}
return uint64(numCellInserts), uint64(numCellDeletes)
}
+21 -5
View File
@@ -92,7 +92,6 @@ func DefaultSession(pro DoltDatabaseProvider) *DoltSession {
// NewDoltSession creates a DoltSession object from a standard sql.Session and 0 or more Database objects.
func NewDoltSession(
ctx *sql.Context,
sqlSess *sql.BaseSession,
pro DoltDatabaseProvider,
conf config.ReadWriteConfig,
@@ -444,15 +443,26 @@ func (d *DoltSession) CommitTransaction(ctx *sql.Context, tx sql.Transaction) er
}
}
// isDirty returns whether the working set for the database named is dirty
// TODO: remove the dbname parameter, return a global dirty bit
func (d *DoltSession) isDirty(ctx *sql.Context, dbName string) (bool, error) {
dbState, _, err := d.LookupDbState(ctx, dbName)
if err != nil {
return false, err
}
return dbState.dirty, nil
}
// CommitWorkingSet commits the working set for the transaction given, without creating a new dolt commit.
// Clients should typically use CommitTransaction, which performs additional checks, instead of this method.
func (d *DoltSession) CommitWorkingSet(ctx *sql.Context, dbName string, tx sql.Transaction) error {
dbState, _, err := d.LookupDbState(ctx, dbName)
dirty, err := d.isDirty(ctx, dbName)
if err != nil {
return err
}
if !dbState.dirty {
if !dirty {
return nil
}
@@ -603,15 +613,20 @@ func (d *DoltSession) Rollback(ctx *sql.Context, tx sql.Transaction) error {
return nil
}
dbState, ok, err := d.LookupDbState(ctx, dbName)
dirty, err := d.isDirty(ctx, dbName)
if err != nil {
return err
}
if !dbState.dirty {
if !dirty {
return nil
}
dbState, ok, err := d.LookupDbState(ctx, dbName)
if err != nil {
return err
}
dtx, ok := tx.(*DoltTransaction)
if !ok {
return fmt.Errorf("expected a DoltTransaction")
@@ -900,6 +915,7 @@ func (d *DoltSession) SwitchWorkingSet(
return err
}
// TODO: should this be an error if any database in the transaction is dirty, or just this one?
if sessionState.dirty {
return ErrWorkingSetChanges.New()
}
@@ -130,7 +130,7 @@ func handleStagedUnstagedTables(staged, unstaged []diff.TableDelta, itr *StatusI
itr.statuses[idx] = tblDiffTypeToLabel[diff.RemovedTable]
} else if td.IsRename() {
itr.tables[idx] = fmt.Sprintf("%s -> %s", td.FromName, td.ToName)
itr.statuses[idx] = tblDiffTypeToLabel[diff.RemovedTable]
itr.statuses[idx] = tblDiffTypeToLabel[diff.RenamedTable]
} else {
itr.tables[idx] = td.CurName()
itr.statuses[idx] = tblDiffTypeToLabel[diff.ModifiedTable]
@@ -45,23 +45,16 @@ var _ sql.FilteredTable = (*UnscopedDiffTable)(nil)
// UnscopedDiffTable is a sql.Table implementation of a system table that shows which tables have
// changed in each commit, across all branches.
type UnscopedDiffTable struct {
dbName string
ddb *doltdb.DoltDB
head *doltdb.Commit
partitionFilters []sql.Expression
commitCheck doltdb.CommitFilter
}
// tableChange is an internal data structure used to hold the results of processing
// a diff.TableDelta structure into the output data for this system table.
type tableChange struct {
tableName string
dataChange bool
schemaChange bool
}
// NewUnscopedDiffTable creates an UnscopedDiffTable
func NewUnscopedDiffTable(_ *sql.Context, ddb *doltdb.DoltDB, head *doltdb.Commit) sql.Table {
return &UnscopedDiffTable{ddb: ddb, head: head}
func NewUnscopedDiffTable(_ *sql.Context, dbName string, ddb *doltdb.DoltDB, head *doltdb.Commit) sql.Table {
return &UnscopedDiffTable{dbName: dbName, ddb: ddb, head: head}
}
// Filters returns the list of filters that are applied to this table.
@@ -71,6 +64,7 @@ func (dt *UnscopedDiffTable) Filters() []sql.Expression {
// HandledFilters returns the list of filters that will be handled by the table itself
func (dt *UnscopedDiffTable) HandledFilters(filters []sql.Expression) []sql.Expression {
filters = append(filters, dt.partitionFilters...)
dt.partitionFilters = FilterFilters(filters, ColumnPredicate(filterColumnNameSet))
return dt.partitionFilters
}
@@ -192,9 +186,9 @@ func (dt *UnscopedDiffTable) LookupPartitions(ctx *sql.Context, lookup sql.Index
func (dt *UnscopedDiffTable) newWorkingSetRowItr(ctx *sql.Context) (sql.RowIter, error) {
sess := dsess.DSessFromSess(ctx.Session)
roots, ok := sess.GetRoots(ctx, ctx.GetCurrentDatabase())
roots, ok := sess.GetRoots(ctx, dt.dbName)
if !ok {
return nil, fmt.Errorf("unable to lookup roots for database %s", ctx.GetCurrentDatabase())
return nil, fmt.Errorf("unable to lookup roots for database %s", dt.dbName)
}
staged, unstaged, err := diff.GetStagedUnstagedTableDeltas(ctx, roots)
@@ -239,20 +233,20 @@ func (d *doltDiffWorkingSetRowItr) Next(ctx *sql.Context) (sql.Row, error) {
return nil, io.EOF
}
change, err := processTableDelta(ctx, tableDelta)
change, err := tableDelta.GetSummary(ctx)
if err != nil {
return nil, err
}
sqlRow := sql.NewRow(
changeSet,
change.tableName,
change.TableName,
nil, // committer
nil, // email
nil, // date
nil, // message
change.dataChange,
change.schemaChange,
change.DataChange,
change.SchemaChange,
)
return sqlRow, nil
@@ -286,7 +280,7 @@ type doltDiffCommitHistoryRowItr struct {
commits []*doltdb.Commit
meta *datas.CommitMeta
hash hash.Hash
tableChanges []tableChange
tableChanges []diff.TableDeltaSummary
tableChangesIdx int
}
@@ -356,13 +350,13 @@ func (itr *doltDiffCommitHistoryRowItr) Next(ctx *sql.Context) (sql.Row, error)
return sql.NewRow(
h.String(),
tableChange.tableName,
tableChange.TableName,
meta.Name,
meta.Email,
meta.Time(),
meta.Description,
tableChange.dataChange,
tableChange.schemaChange,
tableChange.DataChange,
tableChange.SchemaChange,
), nil
}
@@ -397,7 +391,7 @@ func (itr *doltDiffCommitHistoryRowItr) loadTableChanges(ctx context.Context, co
// calculateTableChanges calculates the tables that changed in the specified commit, by comparing that
// commit with its immediate ancestor commit.
func (itr *doltDiffCommitHistoryRowItr) calculateTableChanges(ctx context.Context, commit *doltdb.Commit) ([]tableChange, error) {
func (itr *doltDiffCommitHistoryRowItr) calculateTableChanges(ctx context.Context, commit *doltdb.Commit) ([]diff.TableDeltaSummary, error) {
if len(commit.DatasParents()) == 0 {
return nil, nil
}
@@ -422,9 +416,9 @@ func (itr *doltDiffCommitHistoryRowItr) calculateTableChanges(ctx context.Contex
return nil, err
}
tableChanges := make([]tableChange, len(deltas))
tableChanges := make([]diff.TableDeltaSummary, len(deltas))
for i := 0; i < len(deltas); i++ {
change, err := processTableDelta(itr.ctx, deltas[i])
change, err := deltas[i].GetSummary(itr.ctx)
if err != nil {
return nil, err
}
@@ -440,68 +434,6 @@ func (itr *doltDiffCommitHistoryRowItr) calculateTableChanges(ctx context.Contex
return tableChanges, nil
}
// processTableDelta processes the specified TableDelta to determine what kind of change it was (i.e. table drop,
// table rename, table create, or data update) and returns a tableChange struct representing the change.
func processTableDelta(ctx *sql.Context, delta diff.TableDelta) (*tableChange, error) {
// Dropping a table is always a schema change, and also a data change if the table contained data
if delta.IsDrop() {
isEmpty, err := isTableDataEmpty(ctx, delta.FromTable)
if err != nil {
return nil, err
}
return &tableChange{
tableName: delta.FromName,
dataChange: !isEmpty,
schemaChange: true,
}, nil
}
// Renaming a table is always a schema change, and also a data change if the table data differs
if delta.IsRename() {
dataChanged, err := delta.HasHashChanged()
if err != nil {
return nil, err
}
return &tableChange{
tableName: delta.ToName,
dataChange: dataChanged,
schemaChange: true,
}, nil
}
// Creating a table is always a schema change, and also a data change if data was inserted
if delta.IsAdd() {
isEmpty, err := isTableDataEmpty(ctx, delta.ToTable)
if err != nil {
return nil, err
}
return &tableChange{
tableName: delta.ToName,
dataChange: !isEmpty,
schemaChange: true,
}, nil
}
dataChanged, err := delta.HasHashChanged()
if err != nil {
return nil, err
}
schemaChanged, err := delta.HasSchemaChanged(ctx)
if err != nil {
return nil, err
}
return &tableChange{
tableName: delta.ToName,
dataChange: dataChanged,
schemaChange: schemaChanged,
}, nil
}
// Close closes the iterator.
func (itr *doltDiffCommitHistoryRowItr) Close(*sql.Context) error {
return nil
@@ -26,7 +26,7 @@ import (
"github.com/dolthub/go-mysql-server/enginetest/scriptgen/setup"
"github.com/dolthub/go-mysql-server/server"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/expression"
"github.com/dolthub/go-mysql-server/sql/analyzer"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
"github.com/dolthub/go-mysql-server/sql/plan"
gmstypes "github.com/dolthub/go-mysql-server/sql/types"
@@ -48,7 +48,7 @@ var skipPrepared bool
// SkipPreparedsCount is used by the "ci-check-repo CI workflow
// as a reminder to consider prepareds when adding a new
// enginetest suite.
const SkipPreparedsCount = 84
const SkipPreparedsCount = 83
const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS"
@@ -193,49 +193,24 @@ func TestSingleQueryPrepared(t *testing.T) {
func TestSingleScriptPrepared(t *testing.T) {
t.Skip()
s := []setup.SetupScript{
{
"create table test (pk int primary key, c1 int)",
"call dolt_add('.')",
"insert into test values (0,0), (1,1);",
"set @Commit1 = dolt_commit('-am', 'creating table');",
"call dolt_branch('-c', 'main', 'newb')",
"alter table test add column c2 int;",
"set @Commit2 = dolt_commit('-am', 'alter table');",
var script = queries.ScriptTest{
Name: "table with commit column should maintain its data in diff",
SetUpScript: []string{
"CREATE TABLE t (pk int PRIMARY KEY, commit varchar(20));",
"CALL DOLT_ADD('.');",
"CALL dolt_commit('-am', 'creating table t');",
"INSERT INTO t VALUES (1, '123456');",
"CALL dolt_commit('-am', 'insert data');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT to_pk, char_length(to_commit), from_pk, char_length(from_commit), diff_type from dolt_diff_t;",
Expected: []sql.Row{{1, 32, nil, 32, "added"}},
},
},
}
tt := queries.QueryTest{
Query: "select * from test as of 'HEAD~2' where pk=?",
Bindings: map[string]sql.Expression{
"v1": expression.NewLiteral(0, gmstypes.Int8),
},
Expected: []sql.Row{{0, 0}},
}
harness := newDoltHarness(t)
harness.Setup(setup.MydbData, s)
e, err := harness.NewEngine(t)
defer e.Close()
require.NoError(t, err)
ctx := harness.NewContext()
//e.Analyzer.Debug = true
//e.Analyzer.Verbose = true
// full impl
pre1, sch1, rows1 := enginetest.MustQueryWithPreBindings(ctx, e, tt.Query, tt.Bindings)
fmt.Println(pre1, sch1, rows1)
// inline bindings
sch2, rows2 := enginetest.MustQueryWithBindings(ctx, e, tt.Query, tt.Bindings)
fmt.Println(sch2, rows2)
// no bindings
//sch3, rows3 := enginetest.MustQuery(ctx, e, rawQuery)
//fmt.Println(sch3, rows3)
enginetest.TestQueryWithContext(t, ctx, e, harness, tt.Query, tt.Expected, tt.ExpectedColumns, tt.Bindings)
enginetest.TestScriptPrepared(t, harness, script)
}
func TestVersionedQueries(t *testing.T) {
@@ -407,6 +382,34 @@ func TestSpatialScripts(t *testing.T) {
enginetest.TestSpatialScripts(t, h)
}
func TestSpatialScriptsPrepared(t *testing.T) {
enginetest.TestSpatialScriptsPrepared(t, newDoltHarness(t))
}
func TestSpatialIndexScripts(t *testing.T) {
skipOldFormat(t)
schema.EnableSpatialIndex = true
enginetest.TestSpatialIndexScripts(t, newDoltHarness(t))
}
func TestSpatialIndexScriptsPrepared(t *testing.T) {
skipOldFormat(t)
schema.EnableSpatialIndex = true
enginetest.TestSpatialIndexScriptsPrepared(t, newDoltHarness(t))
}
func TestSpatialIndexPlans(t *testing.T) {
skipOldFormat(t)
schema.EnableSpatialIndex = true
enginetest.TestSpatialIndexPlans(t, newDoltHarness(t))
}
func TestSpatialIndexPlansPrepared(t *testing.T) {
skipOldFormat(t)
schema.EnableSpatialIndex = true
enginetest.TestSpatialIndexPlansPrepared(t, newDoltHarness(t))
}
func TestTruncate(t *testing.T) {
h := newDoltHarness(t)
defer h.Close()
@@ -1513,6 +1516,28 @@ func TestDiffTableFunctionPrepared(t *testing.T) {
}
}
func TestDiffStatTableFunction(t *testing.T) {
harness := newDoltHarness(t)
harness.Setup(setup.MydbData)
for _, test := range DiffStatTableFunctionScriptTests {
harness.engine = nil
t.Run(test.Name, func(t *testing.T) {
enginetest.TestScript(t, harness, test)
})
}
}
func TestDiffStatTableFunctionPrepared(t *testing.T) {
harness := newDoltHarness(t)
harness.Setup(setup.MydbData)
for _, test := range DiffStatTableFunctionScriptTests {
harness.engine = nil
t.Run(test.Name, func(t *testing.T) {
enginetest.TestScriptPrepared(t, harness, test)
})
}
}
func TestDiffSummaryTableFunction(t *testing.T) {
harness := newDoltHarness(t)
defer harness.Close()
@@ -1645,6 +1670,13 @@ func mustNewEngine(t *testing.T, h enginetest.Harness) *gms.Engine {
return e
}
var biasedCosters = []analyzer.Coster{
analyzer.NewInnerBiasedCoster(),
analyzer.NewLookupBiasedCoster(),
analyzer.NewHashBiasedCoster(),
analyzer.NewMergeBiasedCoster(),
}
func TestSystemTableIndexes(t *testing.T) {
if !types.IsFormat_DOLT(types.Format_Default) {
t.Skip("only new format support system table indexing")
@@ -1656,23 +1688,27 @@ func TestSystemTableIndexes(t *testing.T) {
harness.SkipSetupCommit()
e := mustNewEngine(t, harness)
defer e.Close()
e.Analyzer.Coster = analyzer.NewMergeBiasedCoster()
ctx := enginetest.NewContext(harness)
for _, q := range stt.setup {
enginetest.RunQuery(t, e, harness, q)
}
for _, tt := range stt.queries {
t.Run(fmt.Sprintf("%s: %s", stt.name, tt.query), func(t *testing.T) {
if tt.skip {
t.Skip()
}
for i, c := range []string{"inner", "lookup", "hash", "merge"} {
e.Analyzer.Coster = biasedCosters[i]
for _, tt := range stt.queries {
t.Run(fmt.Sprintf("%s(%s): %s", stt.name, c, tt.query), func(t *testing.T) {
if tt.skip {
t.Skip()
}
ctx = ctx.WithQuery(tt.query)
if tt.exp != nil {
enginetest.TestQueryWithContext(t, ctx, e, harness, tt.query, tt.exp, nil, nil)
}
})
ctx = ctx.WithQuery(tt.query)
if tt.exp != nil {
enginetest.TestQueryWithContext(t, ctx, e, harness, tt.query, tt.exp, nil, nil)
}
})
}
}
}
}
@@ -105,6 +105,7 @@ func (d *DoltHarness) resetScripts() []setup.SetupScript {
}
var resetCmds []setup.SetupScript
resetCmds = append(resetCmds, setup.SetupScript{"SET foreign_key_checks=0;"})
for i := range dbs {
db := dbs[i]
resetCmds = append(resetCmds, setup.SetupScript{fmt.Sprintf("use %s", db)})
@@ -139,6 +140,7 @@ func (d *DoltHarness) resetScripts() []setup.SetupScript {
resetCmds = append(resetCmds, setup.SetupScript{"call dreset('--hard', 'head')"})
}
resetCmds = append(resetCmds, setup.SetupScript{"SET foreign_key_checks=1;"})
resetCmds = append(resetCmds, setup.SetupScript{"use mydb"})
return resetCmds
}
@@ -168,13 +170,7 @@ func (d *DoltHarness) NewEngine(t *testing.T) (*gms.Engine, error) {
d.provider = doltProvider
var err error
d.session, err = dsess.NewDoltSession(
sql.NewEmptyContext(),
enginetest.NewBaseSession(),
doltProvider,
d.multiRepoEnv.Config(),
d.branchControl,
)
d.session, err = dsess.NewDoltSession(enginetest.NewBaseSession(), doltProvider, d.multiRepoEnv.Config(), d.branchControl)
require.NoError(t, err)
e, err := enginetest.NewEngine(t, d, d.provider, d.setupData)
@@ -270,13 +266,7 @@ func (d *DoltHarness) newSessionWithClient(client sql.Client) *dsess.DoltSession
localConfig := d.multiRepoEnv.Config()
pro := d.session.Provider()
dSession, err := dsess.NewDoltSession(
enginetest.NewContext(d),
sql.NewBaseSessionWithClientServer("address", client, 1),
pro.(dsess.DoltDatabaseProvider),
localConfig,
d.branchControl,
)
dSession, err := dsess.NewDoltSession(sql.NewBaseSessionWithClientServer("address", client, 1), pro.(dsess.DoltDatabaseProvider), localConfig, d.branchControl)
require.NoError(d.t, err)
return dSession
}
@@ -306,13 +296,7 @@ func (d *DoltHarness) NewDatabases(names ...string) []sql.Database {
d.provider = doltProvider
var err error
d.session, err = dsess.NewDoltSession(
sql.NewEmptyContext(),
enginetest.NewBaseSession(),
doltProvider,
d.multiRepoEnv.Config(),
d.branchControl,
)
d.session, err = dsess.NewDoltSession(enginetest.NewBaseSession(), doltProvider, d.multiRepoEnv.Config(), d.branchControl)
require.NoError(d.t, err)
// TODO: the engine tests should do this for us
@@ -777,6 +777,52 @@ var DoltScripts = []queries.ScriptTest{
},
},
},
{
Name: "test hashof",
SetUpScript: []string{
"CREATE TABLE hashof_test (pk int primary key, c1 int)",
"INSERT INTO hashof_test values (1,1), (2,2), (3,3)",
"CALL DOLT_ADD('hashof_test')",
"CALL DOLT_COMMIT('-a', '-m', 'first commit')",
"SET @Commit1 = (SELECT commit_hash FROM DOLT_LOG() LIMIT 1)",
"INSERT INTO hashof_test values (4,4), (5,5), (6,6)",
"CALL DOLT_COMMIT('-a', '-m', 'second commit')",
"SET @Commit2 = (SELECT commit_hash from DOLT_LOG() LIMIT 1)",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT (hashof(@Commit1) = hashof(@Commit2))",
Expected: []sql.Row{{false}},
},
{
Query: "SELECT (hashof(@Commit1) = hashof('HEAD~1'))",
Expected: []sql.Row{
{true},
},
},
{
Query: "SELECT (hashof(@Commit2) = hashof('HEAD'))",
Expected: []sql.Row{
{true},
},
},
{
Query: "SELECT (hashof(@Commit2) = hashof('main'))",
Expected: []sql.Row{
{true},
},
},
{
Query: "SELECT hashof('non_branch')",
ExpectedErrStr: "invalid ref spec",
},
{
// Test that a short commit is invalid. This may change in the future.
Query: "SELECT hashof(left(@Commit2,30))",
ExpectedErrStr: "invalid ref spec",
},
},
},
}
func makeLargeInsert(sz int) string {
@@ -817,6 +863,20 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT * FROM dolt_diff('main~..main', 'test');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// Without access to the database, dolt_diff_stat should fail with a database access error
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_stat('main~', 'main', 'test');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// Without access to the database, dolt_diff_stat with dots should fail with a database access error
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_stat('main~..main', 'test');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// Without access to the database, dolt_diff_summary should fail with a database access error
User: "tester",
@@ -873,6 +933,34 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT * FROM dolt_diff('main~..main', 'test2');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// With access to the db, but not the table, dolt_diff_stat should fail
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_stat('main~', 'main', 'test2');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// With access to the db, but not the table, dolt_diff_stat with dots should fail
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_stat('main~...main', 'test2');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// With access to the db, dolt_diff_stat should fail for all tables if no access any of tables
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_stat('main~', 'main');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// With access to the db, dolt_diff_stat with dots should fail for all tables if no access any of tables
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_stat('main~...main');",
ExpectedErr: sql.ErrPrivilegeCheckFailed,
},
{
// With access to the db, but not the table, dolt_diff_summary should fail
User: "tester",
@@ -943,6 +1031,20 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT COUNT(*) FROM dolt_diff('main~..main', 'test');",
Expected: []sql.Row{{1}},
},
{
// After granting access to the entire db, dolt_diff_stat should work
User: "tester",
Host: "localhost",
Query: "SELECT COUNT(*) FROM dolt_diff_stat('main~', 'main');",
Expected: []sql.Row{{1}},
},
{
// After granting access to the entire db, dolt_diff_stat with dots should work
User: "tester",
Host: "localhost",
Query: "SELECT COUNT(*) FROM dolt_diff_stat('main~...main');",
Expected: []sql.Row{{1}},
},
{
// After granting access to the entire db, dolt_diff_summary should work
User: "tester",
@@ -985,6 +1087,13 @@ var DoltUserPrivTests = []queries.UserPrivilegeTest{
Query: "SELECT * FROM dolt_diff('main~...main', 'test');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// After revoking access, dolt_diff_stat should fail
User: "tester",
Host: "localhost",
Query: "SELECT * FROM dolt_diff_stat('main~', 'main', 'test');",
ExpectedErr: sql.ErrDatabaseAccessDeniedForUser,
},
{
// After revoking access, dolt_diff_summary should fail
User: "tester",
File diff suppressed because it is too large Load Diff
@@ -21,12 +21,14 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
sqltypes "github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
@@ -141,7 +143,9 @@ func validateIndexConsistency(
def schema.Index,
primary, secondary prolly.Map,
) error {
// TODO: fix this later
// TODO: the descriptors in the primary key are different
// than the ones in the secondary key; this test assumes
// they're the same
if len(def.PrefixLengths()) > 0 {
return nil
}
@@ -177,7 +181,16 @@ func validateKeylessIndex(ctx context.Context, sch schema.Schema, def schema.Ind
for i := range mapping {
j := mapping.MapOrdinal(i)
// first field in |value| is cardinality
builder.PutRaw(i, value.GetField(j+1))
field := value.GetField(j + 1)
if def.IsSpatial() {
geom, err := sqltypes.GeometryType{}.Convert(field[:len(field)-1])
if err != nil {
panic(err)
}
cell := index.ZCell(geom.(sqltypes.GeometryValue))
field = cell[:]
}
builder.PutRaw(i, field)
}
builder.PutRaw(idxDesc.Count()-1, hashId.GetField(0))
k := builder.Build(primary.Pool())
@@ -220,7 +233,16 @@ func validatePkIndex(ctx context.Context, sch schema.Schema, def schema.Index, p
if j < pkSize {
builder.PutRaw(i, key.GetField(j))
} else {
builder.PutRaw(i, value.GetField(j-pkSize))
field := value.GetField(j - pkSize)
if def.IsSpatial() {
geom, err := sqltypes.GeometryType{}.Convert(field[:len(field)-1])
if err != nil {
panic(err)
}
cell := index.ZCell(geom.(sqltypes.GeometryValue))
field = cell[:]
}
builder.PutRaw(i, field)
}
}
k := builder.Build(primary.Pool())
+74 -6
View File
@@ -22,6 +22,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/expression"
sqltypes "github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
@@ -355,6 +356,7 @@ func getSecondaryIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sch
indexSch: idx.Schema(),
tableSch: sch,
unique: idx.IsUnique(),
spatial: idx.IsSpatial(),
isPk: false,
comment: idx.Comment(),
vrw: vrw,
@@ -483,6 +485,7 @@ type doltIndex struct {
indexSch schema.Schema
tableSch schema.Schema
unique bool
spatial bool
isPk bool
comment string
order sql.IndexOrder
@@ -569,12 +572,18 @@ func (di *doltIndex) getDurableState(ctx *sql.Context, ti DoltTableable) (*durab
return ret, nil
}
func (di *doltIndex) prollyRanges(ctx *sql.Context, ns tree.NodeStore, iranges ...sql.Range) ([]prolly.Range, error) {
func (di *doltIndex) prollyRanges(ctx *sql.Context, ns tree.NodeStore, ranges ...sql.Range) ([]prolly.Range, error) {
//todo(max): it is important that *doltIndexLookup maintains a reference
// to empty sqlRanges, otherwise the analyzer will dismiss the index and
// chose a less optimal lookup index. This is a GMS concern, so GMS should
// really not rely on the integrator to maintain this tenuous relationship.
ranges, err := pruneEmptyRanges(iranges)
var err error
if !di.spatial {
ranges, err = pruneEmptyRanges(ranges)
if err != nil {
return nil, err
}
}
pranges, err := di.prollyRangesFromSqlRanges(ctx, ns, ranges, di.keyBld)
if err != nil {
return nil, err
@@ -706,6 +715,10 @@ func (di *doltIndex) coversColumns(s *durableIndexState, cols []uint64) bool {
return false
}
if di.IsSpatial() {
return false
}
var idxCols *schema.ColCollection
if types.IsFormat_DOLT(di.Format()) {
// prolly indexes can cover an index lookup using
@@ -782,6 +795,11 @@ func (di *doltIndex) IsUnique() bool {
return di.unique
}
// IsSpatial implements sql.Index
func (di *doltIndex) IsSpatial() bool {
return di.spatial
}
// IsPrimaryKey implements DoltIndex.
func (di *doltIndex) IsPrimaryKey() bool {
return di.isPk
@@ -910,14 +928,64 @@ func (di *doltIndex) trimRangeCutValue(to int, keyPart interface{}) interface{}
return keyPart
}
func (di *doltIndex) prollySpatialRanges(ranges []sql.Range) ([]prolly.Range, error) {
// should be exactly one range
rng := ranges[0][0]
lower, upper := sql.GetRangeCutKey(rng.LowerBound), sql.GetRangeCutKey(rng.UpperBound)
minPoint, ok := lower.(sqltypes.Point)
if !ok {
return nil, fmt.Errorf("spatial index bounding box using non-point type")
}
maxPoint, ok := upper.(sqltypes.Point)
if !ok {
return nil, fmt.Errorf("spatial index bounding box using non-point type")
}
pranges := make([]prolly.Range, 65)
zMin := ZValue(minPoint)
zMax := ZValue(maxPoint)
// generate ranges for level 0 - 64
for level := byte(0); level < byte(65); level++ {
minVal := ZMask(level, zMin)
maxVal := ZMask(level, zMax)
field := prolly.RangeField{
Exact: false,
Lo: prolly.Bound{
Binding: true,
Inclusive: true,
Value: minVal[:],
},
Hi: prolly.Bound{
Binding: true,
Inclusive: true,
Value: maxVal[:],
},
}
pranges[level] = prolly.Range{
Fields: []prolly.RangeField{field},
Desc: di.keyBld.Desc,
}
}
return pranges, nil
}
func (di *doltIndex) prollyRangesFromSqlRanges(ctx context.Context, ns tree.NodeStore, ranges []sql.Range, tb *val.TupleBuilder) ([]prolly.Range, error) {
ranges, err := pruneEmptyRanges(ranges)
if err != nil {
return nil, err
var err error
if !di.spatial {
ranges, err = pruneEmptyRanges(ranges)
if err != nil {
return nil, err
}
}
if di.spatial {
return di.prollySpatialRanges(ranges)
}
pranges := make([]prolly.Range, len(ranges))
for k, rng := range ranges {
fields := make([]prolly.RangeField, len(rng))
for j, expr := range rng {
@@ -16,9 +16,13 @@ package index_test
import (
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math"
"math/rand"
"sort"
"strings"
"testing"
@@ -1532,3 +1536,150 @@ func TestSplitNullsFromRange(t *testing.T) {
assert.Len(t, rs, 8)
})
}
func TestLexFloat(t *testing.T) {
t.Run("test edge case lex float values", func(t *testing.T) {
assert.Equal(t, uint64(0x0010000000000000), index.LexFloat(-math.MaxFloat64))
assert.Equal(t, uint64(0x7ffffffffffffffe), index.LexFloat(-math.SmallestNonzeroFloat64))
assert.Equal(t, uint64(0x8000000000000000), index.LexFloat(0.0))
assert.Equal(t, uint64(0x8000000000000001), index.LexFloat(math.SmallestNonzeroFloat64))
assert.Equal(t, uint64(0xffefffffffffffff), index.LexFloat(math.MaxFloat64))
assert.Equal(t, uint64(0xfff8000000000001), index.LexFloat(math.NaN()))
assert.Equal(t, uint64(0x0007fffffffffffe), index.LexFloat(-math.NaN()))
assert.Equal(t, uint64(0xfff0000000000000), index.LexFloat(math.Inf(1)))
assert.Equal(t, uint64(0x000fffffffffffff), index.LexFloat(math.Inf(-1)))
})
t.Run("test reverse lex float values", func(t *testing.T) {
assert.Equal(t, -math.MaxFloat64, index.UnLexFloat(0x0010000000000000))
assert.Equal(t, -math.SmallestNonzeroFloat64, index.UnLexFloat(0x7ffffffffffffffe))
assert.Equal(t, 0.0, index.UnLexFloat(0x8000000000000000))
assert.Equal(t, math.SmallestNonzeroFloat64, index.UnLexFloat(0x8000000000000001))
assert.Equal(t, math.MaxFloat64, index.UnLexFloat(0xffefffffffffffff))
assert.True(t, math.IsNaN(index.UnLexFloat(0xfff8000000000001)))
assert.True(t, math.IsNaN(index.UnLexFloat(0xfff7fffffffffffe)))
assert.True(t, math.IsInf(index.UnLexFloat(0xfff0000000000000), 1))
assert.True(t, math.IsInf(index.UnLexFloat(0x000fffffffffffff), -1))
})
t.Run("test sort lex float", func(t *testing.T) {
sortedFloats := []float64{
-math.MaxFloat64,
-1.0,
-0.5,
-0.123456789,
-math.SmallestNonzeroFloat64,
-0.0,
0.0,
math.SmallestNonzeroFloat64,
0.5,
0.987654321,
1.0,
math.MaxFloat64,
}
randFloats := append([]float64{}, sortedFloats...)
rand.Shuffle(len(randFloats), func(i, j int) {
randFloats[i], randFloats[j] = randFloats[j], randFloats[i]
})
sort.Slice(randFloats, func(i, j int) bool {
l1 := index.LexFloat(randFloats[i])
l2 := index.LexFloat(randFloats[j])
return l1 < l2
})
assert.Equal(t, sortedFloats, randFloats)
})
}
func TestZValues(t *testing.T) {
tests := []struct {
p types.Point
e string
}{
{
p: types.Point{X: -5000, Y: -5000},
e: "0fff30f03f3fffffffffffffffffffff",
},
{
p: types.Point{X: -1, Y: -1},
e: "300000ffffffffffffffffffffffffff",
},
{
p: types.Point{X: -1, Y: 0},
e: "90000055555555555555555555555555",
},
{
p: types.Point{X: -1, Y: 1},
e: "9aaaaa55555555555555555555555555",
},
{
p: types.Point{X: 0, Y: -1},
e: "600000aaaaaaaaaaaaaaaaaaaaaaaaaa",
},
{
p: types.Point{X: 1, Y: -1},
e: "655555aaaaaaaaaaaaaaaaaaaaaaaaaa",
},
{
p: types.Point{X: 0, Y: 0},
e: "c0000000000000000000000000000000",
},
{
p: types.Point{X: 1, Y: 0},
e: "c5555500000000000000000000000000",
},
{
p: types.Point{X: 0, Y: 1},
e: "caaaaa00000000000000000000000000",
},
{
p: types.Point{X: 1, Y: 1},
e: "cfffff00000000000000000000000000",
},
{
p: types.Point{X: 2, Y: 2},
e: "f0000000000000000000000000000000",
},
{
p: types.Point{X: 50000, Y: 50000},
e: "f000fcc03ccc00000000000000000000",
},
}
t.Run("test z-values", func(t *testing.T) {
for _, test := range tests {
z := index.ZValue(test.p)
assert.Equal(t, test.e, fmt.Sprintf("%016x%016x", z[0], z[1]))
}
})
t.Run("test un-z-values", func(t *testing.T) {
for _, test := range tests {
v, _ := hex.DecodeString(test.e)
z := [2]uint64{}
z[0] = binary.BigEndian.Uint64(v[:8])
z[1] = binary.BigEndian.Uint64(v[8:])
assert.Equal(t, test.p, index.UnZValue(z))
}
})
t.Run("test sorting points by z-value", func(t *testing.T) {
sortedPoints := []types.Point{
{X: -5000, Y: -5000},
{X: -1, Y: -1},
{X: 1, Y: -1},
{X: -1, Y: 0},
{X: -1, Y: 1},
{X: 0, Y: 0},
{X: 1, Y: 0},
{X: 1, Y: 1},
{X: 2, Y: 2},
{X: 100, Y: 100},
}
randPoints := append([]types.Point{}, sortedPoints...)
rand.Shuffle(len(randPoints), func(i, j int) {
randPoints[i], randPoints[j] = randPoints[j], randPoints[i]
})
assert.Equal(t, sortedPoints, index.ZSort(randPoints))
})
}
@@ -111,7 +111,7 @@ func setupIndexes(t *testing.T, tableName, insertQuery string) (*sqle.Engine, *e
// Get an updated root to use for the rest of the test
ctx := sql.NewEmptyContext()
controller := branch_control.CreateDefaultController()
sess, err := dsess.NewDoltSession(ctx, ctx.Session.(*sql.BaseSession), pro, config.NewEmptyMapConfig(), controller)
sess, err := dsess.NewDoltSession(ctx.Session.(*sql.BaseSession), pro, config.NewEmptyMapConfig(), controller)
require.NoError(t, err)
roots, ok := sess.GetRoots(ctx, db.Name())
require.True(t, ok)
@@ -118,6 +118,8 @@ func GetField(ctx context.Context, td val.TupleDesc, i int, tup val.Tuple, ns tr
}
case val.CommitAddrEnc:
v, ok = td.GetCommitAddr(i, tup)
case val.CellEnc:
v, ok = td.GetCell(i, tup)
default:
panic("unknown val.encoding")
}
@@ -188,7 +190,7 @@ func PutField(ctx context.Context, ns tree.NodeStore, tb *val.TupleBuilder, i in
if len(geo) > math.MaxUint16 {
return ErrValueExceededMaxFieldSize
}
tb.PutGeometry(i, serializeGeometry(v))
tb.PutGeometry(i, geo)
case val.JSONAddrEnc:
buf, err := convJson(v)
if err != nil {
@@ -17,9 +17,8 @@ package index
import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/types"
)
+23 -9
View File
@@ -152,6 +152,21 @@ func ZSort(points []types.Point) []types.Point {
return points
}
// ZMask masks in pairs by shifting based off of level (shift amount)
func ZMask(level byte, zVal [2]uint64) val.Cell {
cell := val.Cell{}
cell[0] = level
if level < 32 {
shamt := level << 1
binary.BigEndian.PutUint64(cell[1:], zVal[0])
binary.BigEndian.PutUint64(cell[9:], (zVal[1]>>shamt)<<shamt)
} else {
shamt := (level - 32) << 1
binary.BigEndian.PutUint64(cell[1:], (zVal[0]>>shamt)<<shamt)
}
return cell
}
// ZCell converts the GeometryValue into a Cell
// Note: there is an inefficiency here where small polygons may be placed into a level that's significantly larger
func ZCell(v types.GeometryValue) val.Cell {
@@ -159,23 +174,22 @@ func ZCell(v types.GeometryValue) val.Cell {
zMin := ZValue(types.Point{X: bbox[0], Y: bbox[1]})
zMax := ZValue(types.Point{X: bbox[2], Y: bbox[3]})
cell := val.Cell{}
binary.BigEndian.PutUint64(cell.ZValue[:], zMin[0])
binary.BigEndian.PutUint64(cell.ZValue[8:], zMin[1])
if res := zMin[0] ^ zMax[0]; res != 0 {
cell.Level = byte(64 - bits.LeadingZeros64(res)/2)
// Level rounds up by adding 1 and dividing by two (same as a left shift by 1)
var level byte
if zMin[0] != zMax[0] {
level = byte((bits.Len64(zMin[0]^zMax[0])+1)>>1) + 32
} else {
cell.Level = byte(32 - bits.LeadingZeros64(zMin[1]^zMax[1])/2)
level = byte((bits.Len64(zMin[1]^zMax[1]) + 1) >> 1)
}
return cell
return ZMask(level, zMin)
}
// ZAddr converts the GeometryValue into a key: (level, min_z_val)
func ZAddr(v types.GeometryValue) [17]byte {
func ZAddr(v types.GeometryValue) val.Cell {
bbox := spatial.FindBBox(v)
zMin := ZValue(types.Point{X: bbox[0], Y: bbox[1]})
zMax := ZValue(types.Point{X: bbox[2], Y: bbox[3]})
addr := [17]byte{}
addr := val.Cell{}
binary.BigEndian.PutUint64(addr[1:], zMin[0])
binary.BigEndian.PutUint64(addr[9:], zMin[1])
if res := zMin[0] ^ zMax[0]; res != 0 {
@@ -262,3 +262,25 @@ func TestZSort(t *testing.T) {
assert.Equal(t, sortedGeoms, ZAddrSort(randomGeoms))
})
}
func TestZCell(t *testing.T) {
t.Run("test low level linestring", func(t *testing.T) {
line := types.LineString{Points: []types.Point{
{X: 0, Y: 0},
{X: math.SmallestNonzeroFloat64, Y: math.SmallestNonzeroFloat64},
}}
poly := types.Polygon{Lines: []types.LineString{line}}
z := ZCell(poly)
assert.Equal(t, "01c0000000000000000000000000000000", hex.EncodeToString(z[:]))
})
t.Run("test high level linestring", func(t *testing.T) {
line := types.LineString{Points: []types.Point{
{X: -1, Y: -1},
{X: 1, Y: 1},
}}
poly := types.Polygon{Lines: []types.LineString{line}}
z := ZCell(poly)
assert.Equal(t, "4000000000000000000000000000000000", hex.EncodeToString(z[:]))
})
}
@@ -243,6 +243,7 @@ type fmtIndex struct {
cols []schema.Column
unique bool
spatial bool
generated bool
comment string
}
@@ -281,6 +282,11 @@ func (idx fmtIndex) IsUnique() bool {
return idx.unique
}
// IsSpatial implements sql.Index
func (idx fmtIndex) IsSpatial() bool {
return idx.spatial
}
// Comment implements sql.Index
func (idx fmtIndex) Comment() string {
return idx.comment
+5 -2
View File
@@ -1358,6 +1358,7 @@ func (t *AlterableDoltTable) RewriteInserter(
prefixLengths,
schema.IndexProperties{
IsUnique: index.IsUnique(),
IsSpatial: index.IsSpatial(),
IsUserDefined: index.IsUserDefined(),
Comment: index.Comment(),
})
@@ -1799,7 +1800,7 @@ func (t *AlterableDoltTable) CreateIndex(ctx *sql.Context, idx sql.IndexDef) err
if err := branch_control.CheckAccess(ctx, branch_control.Permissions_Write); err != nil {
return err
}
if idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
if !schema.EnableSpatialIndex && idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
return fmt.Errorf("only the following types of index constraints are supported: none, unique")
}
@@ -1820,6 +1821,7 @@ func (t *AlterableDoltTable) CreateIndex(ctx *sql.Context, idx sql.IndexDef) err
columns,
allocatePrefixLengths(idx.Columns),
idx.Constraint == sql.IndexConstraint_Unique,
idx.Constraint == sql.IndexConstraint_Spatial,
true,
idx.Comment,
t.opts,
@@ -2174,7 +2176,7 @@ func (t *AlterableDoltTable) UpdateForeignKey(ctx *sql.Context, fkName string, s
// CreateIndexForForeignKey implements sql.ForeignKeyTable
func (t *AlterableDoltTable) CreateIndexForForeignKey(ctx *sql.Context, idx sql.IndexDef) error {
if idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
if !schema.EnableSpatialIndex && idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
return fmt.Errorf("only the following types of index constraints are supported: none, unique")
}
columns := make([]string, len(idx.Columns))
@@ -2194,6 +2196,7 @@ func (t *AlterableDoltTable) CreateIndexForForeignKey(ctx *sql.Context, idx sql.
columns,
allocatePrefixLengths(idx.Columns),
idx.Constraint == sql.IndexConstraint_Unique,
idx.Constraint == sql.IndexConstraint_Spatial,
false,
"",
t.opts,
+2 -1
View File
@@ -257,7 +257,7 @@ func (t *TempTable) IndexedAccess(_ sql.IndexLookup) sql.IndexedTable {
}
func (t *TempTable) CreateIndex(ctx *sql.Context, idx sql.IndexDef) error {
if idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
if !schema.EnableSpatialIndex && idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
return fmt.Errorf("only the following types of index constraints are supported: none, unique")
}
cols := make([]string, len(idx.Columns))
@@ -272,6 +272,7 @@ func (t *TempTable) CreateIndex(ctx *sql.Context, idx sql.IndexDef) error {
cols,
allocatePrefixLengths(idx.Columns),
idx.Constraint == sql.IndexConstraint_Unique,
idx.Constraint == sql.IndexConstraint_Spatial,
true,
idx.Comment,
t.opts,
+1 -7
View File
@@ -119,13 +119,7 @@ func NewTestSQLCtx(ctx context.Context) *sql.Context {
}
func NewTestSQLCtxWithProvider(ctx context.Context, pro dsess.DoltDatabaseProvider) *sql.Context {
s, err := dsess.NewDoltSession(
sql.NewEmptyContext(),
sql.NewBaseSession(),
pro,
config2.NewMapConfig(make(map[string]string)),
branch_control.CreateDefaultController(),
)
s, err := dsess.NewDoltSession(sql.NewBaseSession(), pro, config2.NewMapConfig(make(map[string]string)), branch_control.CreateDefaultController())
if err != nil {
panic(err)
}

Some files were not shown because too many files have changed in this diff Show More