mirror of
https://github.com/dolthub/dolt.git
synced 2026-02-11 18:49:14 -06:00
merge with main
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
|
||||
if [ "$#" -lt 9 ]; then
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <format> <issueNumber>"
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timePrefix> <actorPrefix> <format> <issueNumber> <initBigRepo> <nomsBinFormat>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -16,8 +16,18 @@ timePrefix="$6"
|
||||
actorPrefix="$7"
|
||||
format="$8"
|
||||
issueNumber="$9"
|
||||
initBigRepo="${10}"
|
||||
nomsBinFormat="${11}"
|
||||
tpccRegex="tpcc%"
|
||||
|
||||
if [ -n "$initBigRepo" ]; then
|
||||
initBigRepo="\"--init-big-repo=$initBigRepo\","
|
||||
fi
|
||||
|
||||
if [ -n "$nomsBinFormat" ]; then
|
||||
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
|
||||
fi
|
||||
|
||||
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan')"
|
||||
medianLatencyChangeReadsQuery="select f.test_name as read_tests, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) < -0.1 then 1 when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) > 0.1 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
|
||||
|
||||
@@ -70,11 +80,12 @@ echo '
|
||||
"--results-dir='$timePrefix'",
|
||||
"--results-prefix='$actorPrefix'",
|
||||
"--withTpcc=true",
|
||||
'"$initBigRepo"'
|
||||
'"$nomsBinFormat"'
|
||||
"--sysbenchQueries='"$medianLatencyChangeReadsQuery"'",
|
||||
"--sysbenchQueries='"$medianLatencyChangeWritesQuery"'",
|
||||
"--tpccQueries='"$tpccLatencyQuery"'",
|
||||
"--tpccQueries='"$tpccTpsQuery"'",
|
||||
"--init-big-repo"
|
||||
"--tpccQueries='"$tpccTpsQuery"'"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
|
||||
if [ "$#" -lt 8 ]; then
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timeprefix> <actorprefix> <format>"
|
||||
echo "Usage: ./get-job-json.sh <jobname> <fromServer> <fromVersion> <toServer> <toVersion> <timeprefix> <actorprefix> <format> <initBigRepo> <nomsBinFormat>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -15,9 +15,19 @@ toVersion="$5"
|
||||
timeprefix="$6"
|
||||
actorprefix="$7"
|
||||
format="$8"
|
||||
initBigRepo="$9"
|
||||
nomsBinFormat="${10}"
|
||||
precision="1"
|
||||
tpccRegex="tpcc%"
|
||||
|
||||
if [ -n "$initBigRepo" ]; then
|
||||
initBigRepo="\"--init-big-repo=$initBigRepo\","
|
||||
fi
|
||||
|
||||
if [ -n "$nomsBinFormat" ]; then
|
||||
nomsBinFormat="\"--noms-bin-format=$nomsBinFormat\","
|
||||
fi
|
||||
|
||||
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan')"
|
||||
medianLatencyMultiplierReadsQuery="select f.test_name as read_tests, f.server_name, f.server_version, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, t.server_name, t.server_version, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) < 1.0 then 1.0 else ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) end as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
|
||||
meanMultiplierReadsQuery="select round(avg(multipliers), $precision) as reads_mean_multiplier from (select case when (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) < 1.0 then 1.0 else (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) end as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name)"
|
||||
@@ -75,6 +85,8 @@ echo '
|
||||
"--results-dir='$timeprefix'",
|
||||
"--results-prefix='$actorprefix'",
|
||||
"--withTpcc=true",
|
||||
'"$initBigRepo"'
|
||||
'"$nomsBinFormat"'
|
||||
"--sysbenchQueries='"$medianLatencyMultiplierReadsQuery"'",
|
||||
"--sysbenchQueries='"$meanMultiplierReadsQuery"'",
|
||||
"--sysbenchQueries='"$medianLatencyMultiplierWritesQuery"'",
|
||||
|
||||
@@ -36,7 +36,11 @@ short=${TO_VERSION:0:8}
|
||||
lowered=$(echo "$ACTOR" | tr '[:upper:]' '[:lower:]')
|
||||
actorShort="$lowered-$short"
|
||||
|
||||
jobname="$actorShort"
|
||||
# random sleep
|
||||
sleep 0.$[ ( $RANDOM % 10 ) + 1 ]s
|
||||
|
||||
timesuffix=`date +%s%N`
|
||||
jobname="$actorShort-$timesuffix"
|
||||
|
||||
timeprefix=$(date +%Y/%m/%d)
|
||||
|
||||
@@ -51,7 +55,19 @@ fi
|
||||
# or default to -1
|
||||
issuenumber=${ISSUE_NUMBER:-"-1"}
|
||||
|
||||
source "$TEMPLATE_SCRIPT" "$jobname" "$FROM_SERVER" "$FROM_VERSION" "$TO_SERVER" "$TO_VERSION" "$timeprefix" "$actorprefix" "$format" "$issuenumber" > job.json
|
||||
source \
|
||||
"$TEMPLATE_SCRIPT" \
|
||||
"$jobname" \
|
||||
"$FROM_SERVER" \
|
||||
"$FROM_VERSION" \
|
||||
"$TO_SERVER" \
|
||||
"$TO_VERSION" \
|
||||
"$timeprefix" \
|
||||
"$actorprefix" \
|
||||
"$format" \
|
||||
"$issuenumber" \
|
||||
"$INIT_BIG_REPO" \
|
||||
"$NOMS_BIN_FORMAT" > job.json
|
||||
|
||||
out=$(KUBECONFIG="$KUBECONFIG" kubectl apply -f job.json || true)
|
||||
|
||||
|
||||
100
.github/workflows/ci-performance-benchmarks-new-format.yaml
vendored
Normal file
100
.github/workflows/ci-performance-benchmarks-new-format.yaml
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
name: Benchmark Pull Requests (New Format)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ opened ]
|
||||
issue_comment:
|
||||
types: [ created ]
|
||||
|
||||
jobs:
|
||||
validate-commentor:
|
||||
runs-on: ubuntu-18.04
|
||||
outputs:
|
||||
valid: ${{ steps.set_valid.outputs.valid }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Validate Commentor
|
||||
id: set_valid
|
||||
run: ./.github/scripts/performance-benchmarking/validate-commentor.sh "$ACTOR"
|
||||
env:
|
||||
ACTOR: ${{ github.actor }}
|
||||
|
||||
check-comments:
|
||||
runs-on: ubuntu-18.04
|
||||
needs: validate-commentor
|
||||
if: ${{ needs.validate-commentor.outputs.valid == 'true' }}
|
||||
outputs:
|
||||
benchmark: ${{ steps.set_benchmark.outputs.benchmark }}
|
||||
comment-body: ${{ steps.set_body.outputs.body }}
|
||||
steps:
|
||||
- name: Check for Deploy Trigger
|
||||
uses: dolthub/pull-request-comment-trigger@master
|
||||
id: check
|
||||
with:
|
||||
trigger: '#newbenchmark'
|
||||
reaction: rocket
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Set Benchmark
|
||||
if: ${{ steps.check.outputs.triggered == 'true' }}
|
||||
id: set_benchmark
|
||||
run: |
|
||||
echo "::set-output name=benchmark::true"
|
||||
|
||||
performance:
|
||||
runs-on: ubuntu-18.04
|
||||
needs: [validate-commentor, check-comments]
|
||||
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
|
||||
name: Benchmark Performance
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: azure/setup-kubectl@v2.0
|
||||
with:
|
||||
version: 'v1.23.6'
|
||||
- name: Install aws-iam-authenticator
|
||||
run: |
|
||||
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
|
||||
chmod +x ./aws-iam-authenticator && \
|
||||
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
|
||||
aws-iam-authenticator version
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-west-2
|
||||
- uses: xt0rted/pull-request-comment-branch@v1
|
||||
id: comment-branch
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create and Auth kubeconfig
|
||||
run: |
|
||||
echo "$CONFIG" > kubeconfig
|
||||
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
|
||||
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
|
||||
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
|
||||
env:
|
||||
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
|
||||
- name: Get pull number
|
||||
uses: actions/github-script@v3
|
||||
id: get_pull_number
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: core.setOutput("pull_number", JSON.stringify(context.issue.number));
|
||||
- name: Run benchmarks
|
||||
id: run-benchmarks
|
||||
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
|
||||
env:
|
||||
FROM_SERVER: 'dolt'
|
||||
FROM_VERSION: ${{ github.sha }}
|
||||
TO_SERVER: 'dolt'
|
||||
TO_VERSION: ${{ steps.comment-branch.outputs.head_sha }}
|
||||
MODE: 'pullRequest'
|
||||
ISSUE_NUMBER: ${{ steps.get_pull_number.outputs.pull_number }}
|
||||
ACTOR: ${{ github.actor }}
|
||||
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
|
||||
KUBECONFIG: "./kubeconfig"
|
||||
INIT_BIG_REPO: "false"
|
||||
NOMS_BIN_FORMAT: "__DOLT_1__"
|
||||
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-dolt-dolt-job-json.sh"
|
||||
@@ -76,4 +76,5 @@ jobs:
|
||||
ACTOR_EMAIL: ${{ needs.set-version-actor.outputs.actor_email }}
|
||||
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
|
||||
KUBECONFIG: "./kubeconfig"
|
||||
NOMS_BIN_FORMAT: "__LD_1__"
|
||||
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-mysql-dolt-job-json.sh"
|
||||
|
||||
@@ -95,4 +95,6 @@ jobs:
|
||||
ACTOR: ${{ github.actor }}
|
||||
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
|
||||
KUBECONFIG: "./kubeconfig"
|
||||
INIT_BIG_REPO: "true"
|
||||
NOMS_BIN_FORMAT: "__LD_1__"
|
||||
TEMPLATE_SCRIPT: "./.github/scripts/performance-benchmarking/get-dolt-dolt-job-json.sh"
|
||||
|
||||
198
go/Godeps/LICENSES
generated
198
go/Godeps/LICENSES
generated
@@ -3934,204 +3934,6 @@ testdata/COPYING file for details.
|
||||
= LICENSE 6c5ae159496bacd951e6cd937d5e6427b172a2a6284bfdf1954ae338 =
|
||||
================================================================================
|
||||
|
||||
================================================================================
|
||||
= github.com/golang/glog licensed under: =
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
= LICENSE 402f6aca074a87cb9a2cd5b95d9af0d840cdd50ec73029e22195c7b8 =
|
||||
================================================================================
|
||||
|
||||
================================================================================
|
||||
= github.com/golang/groupcache licensed under: =
|
||||
|
||||
|
||||
@@ -220,7 +220,7 @@ func moveBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseR
|
||||
force := apr.Contains(forceFlag)
|
||||
src := apr.Arg(0)
|
||||
dest := apr.Arg(1)
|
||||
err := actions.RenameBranch(ctx, dEnv, src, apr.Arg(1), force)
|
||||
err := actions.RenameBranch(ctx, dEnv.DbData(), dEnv.Config, src, apr.Arg(1), force)
|
||||
|
||||
var verr errhand.VerboseError
|
||||
if err != nil {
|
||||
@@ -285,7 +285,7 @@ func handleDeleteBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser
|
||||
for i := 0; i < apr.NArg(); i++ {
|
||||
brName := apr.Arg(i)
|
||||
|
||||
err := actions.DeleteBranch(ctx, dEnv, brName, actions.DeleteOptions{
|
||||
err := actions.DeleteBranch(ctx, dEnv.DbData(), dEnv.Config, brName, actions.DeleteOptions{
|
||||
Force: force,
|
||||
Remote: apr.Contains(remoteFlag),
|
||||
})
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
|
||||
)
|
||||
|
||||
// Serve starts a MySQL-compatible server. Returns any errors that were encountered.
|
||||
@@ -62,6 +63,7 @@ func Serve(
|
||||
}
|
||||
serverController.StopServer()
|
||||
serverController.serverStopped(closeError)
|
||||
sqlserver.SetRunningServer(nil)
|
||||
}()
|
||||
|
||||
if startError = ValidateConfig(serverConfig); startError != nil {
|
||||
@@ -195,6 +197,8 @@ func Serve(
|
||||
if startError != nil {
|
||||
cli.PrintErr(startError)
|
||||
return
|
||||
} else {
|
||||
sqlserver.SetRunningServer(mySQLServer)
|
||||
}
|
||||
|
||||
var metSrv *http.Server
|
||||
|
||||
@@ -154,17 +154,17 @@ func TestServerGoodParams(t *testing.T) {
|
||||
|
||||
tests := []ServerConfig{
|
||||
DefaultServerConfig(),
|
||||
DefaultServerConfig().withHost("127.0.0.1").withPort(15400),
|
||||
DefaultServerConfig().withHost("localhost").withPort(15401),
|
||||
//DefaultServerConfig().withHost("::1").withPort(15402), // Fails on Jenkins, assuming no IPv6 support
|
||||
DefaultServerConfig().withUser("testusername").withPort(15403),
|
||||
DefaultServerConfig().withPassword("hunter2").withPort(15404),
|
||||
DefaultServerConfig().withTimeout(0).withPort(15405),
|
||||
DefaultServerConfig().withTimeout(5).withPort(15406),
|
||||
DefaultServerConfig().withLogLevel(LogLevel_Debug).withPort(15407),
|
||||
DefaultServerConfig().withLogLevel(LogLevel_Info).withPort(15408),
|
||||
DefaultServerConfig().withReadOnly(true).withPort(15409),
|
||||
DefaultServerConfig().withUser("testusernamE").withPassword("hunter2").withTimeout(4).withPort(15410),
|
||||
DefaultServerConfig().withHost("127.0.0.1").WithPort(15400),
|
||||
DefaultServerConfig().withHost("localhost").WithPort(15401),
|
||||
//DefaultServerConfig().withHost("::1").WithPort(15402), // Fails on Jenkins, assuming no IPv6 support
|
||||
DefaultServerConfig().withUser("testusername").WithPort(15403),
|
||||
DefaultServerConfig().withPassword("hunter2").WithPort(15404),
|
||||
DefaultServerConfig().withTimeout(0).WithPort(15405),
|
||||
DefaultServerConfig().withTimeout(5).WithPort(15406),
|
||||
DefaultServerConfig().withLogLevel(LogLevel_Debug).WithPort(15407),
|
||||
DefaultServerConfig().withLogLevel(LogLevel_Info).WithPort(15408),
|
||||
DefaultServerConfig().withReadOnly(true).WithPort(15409),
|
||||
DefaultServerConfig().withUser("testusernamE").withPassword("hunter2").withTimeout(4).WithPort(15410),
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -188,7 +188,7 @@ func TestServerGoodParams(t *testing.T) {
|
||||
|
||||
func TestServerSelect(t *testing.T) {
|
||||
env := dtestutils.CreateEnvWithSeedData(t)
|
||||
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).withPort(15300)
|
||||
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15300)
|
||||
|
||||
sc := NewServerController()
|
||||
defer sc.StopServer()
|
||||
@@ -263,7 +263,7 @@ func TestServerFailsIfPortInUse(t *testing.T) {
|
||||
|
||||
func TestServerSetDefaultBranch(t *testing.T) {
|
||||
dEnv := dtestutils.CreateEnvWithSeedData(t)
|
||||
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).withPort(15302)
|
||||
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15302)
|
||||
|
||||
sc := NewServerController()
|
||||
defer sc.StopServer()
|
||||
@@ -413,7 +413,7 @@ func TestReadReplica(t *testing.T) {
|
||||
|
||||
// start server as read replica
|
||||
sc := NewServerController()
|
||||
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).withPort(15303)
|
||||
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15303)
|
||||
|
||||
func() {
|
||||
os.Chdir(multiSetup.DbPaths[readReplicaDbName])
|
||||
|
||||
@@ -267,8 +267,8 @@ func (cfg *commandLineServerConfig) withHost(host string) *commandLineServerConf
|
||||
return cfg
|
||||
}
|
||||
|
||||
// withPort updates the port and returns the called `*commandLineServerConfig`, which is useful for chaining calls.
|
||||
func (cfg *commandLineServerConfig) withPort(port int) *commandLineServerConfig {
|
||||
// WithPort updates the port and returns the called `*commandLineServerConfig`, which is useful for chaining calls.
|
||||
func (cfg *commandLineServerConfig) WithPort(port int) *commandLineServerConfig {
|
||||
cfg.port = port
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ func getCommandLineServerConfig(dEnv *env.DoltEnv, apr *argparser.ArgParseResult
|
||||
serverConfig.withHost(host)
|
||||
}
|
||||
if port, ok := apr.GetInt(portFlag); ok {
|
||||
serverConfig.withPort(port)
|
||||
serverConfig.WithPort(port)
|
||||
}
|
||||
if user, ok := apr.GetValue(userFlag); ok {
|
||||
serverConfig.withUser(user)
|
||||
|
||||
@@ -53,7 +53,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "0.40.4"
|
||||
Version = "0.40.5"
|
||||
)
|
||||
|
||||
var dumpDocsCommand = &commands.DumpDocsCmd{}
|
||||
|
||||
@@ -19,7 +19,7 @@ require (
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
|
||||
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
|
||||
github.com/dolthub/vitess v0.0.0-20220601164959-a2100d98bd3b
|
||||
github.com/dolthub/vitess v0.0.0-20220603212614-514e62ec66cd
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||
@@ -68,7 +68,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220603231154-a7d094b136de
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220604073908-f302c0189c8a
|
||||
github.com/google/flatbuffers v2.0.6+incompatible
|
||||
github.com/gosuri/uilive v0.0.4
|
||||
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
|
||||
@@ -99,7 +99,6 @@ require (
|
||||
github.com/go-pdf/fpdf v0.6.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
||||
github.com/golang/glog v0.0.0-20210429001901-424d2337a529 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
|
||||
10
go/go.sum
10
go/go.sum
@@ -178,8 +178,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
|
||||
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220603231154-a7d094b136de h1:XWyYKpXJGZ82Cm0Z0FzHYmxLPQ9uNRhtNwwyR60bANM=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220603231154-a7d094b136de/go.mod h1:VY2z/8rjWxzGzHFIRpOBFC7qBTj1PXQvNaXd5KNP+8A=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220604073908-f302c0189c8a h1:cwUbfuVOs5QgyiOf+V8Ms/qtqmvnABw2ZyWwp0/yirI=
|
||||
github.com/dolthub/go-mysql-server v0.11.1-0.20220604073908-f302c0189c8a/go.mod h1:gvDEMITJQDVYDLR4XtcqEZx6rawTvMh2veM1bPsJC3I=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
|
||||
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
|
||||
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
|
||||
@@ -188,8 +188,8 @@ github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66 h1:WRPDbpJWEnPxP
|
||||
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66/go.mod h1:N5ZIbMGuDUpTpOFQ7HcsN6WSIpTGQjHP+Mz27AfmAgk=
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
|
||||
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
|
||||
github.com/dolthub/vitess v0.0.0-20220601164959-a2100d98bd3b h1:3IG5hRFsoJeKNgdnwE+n1iZQOIuwKDFgrvDOCiK9S3E=
|
||||
github.com/dolthub/vitess v0.0.0-20220601164959-a2100d98bd3b/go.mod h1:jxgvpEvrTNw2i4BKlwT75E775eUXBeMv5MPeQkIb9zI=
|
||||
github.com/dolthub/vitess v0.0.0-20220603212614-514e62ec66cd h1:2JUMs9E68P6LYJrm1yz4KcGnxiavqj3EeE+fRpVJaEI=
|
||||
github.com/dolthub/vitess v0.0.0-20220603212614-514e62ec66cd/go.mod h1:5xfuFfpljoMYespuUmyl5zrHoK0Rl7Bm6yAsnJJJzuY=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
@@ -304,8 +304,6 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v0.0.0-20210429001901-424d2337a529 h1:2voWjNECnrZRbfwXxHB1/j8wa6xdKn85B5NzgVL/pTU=
|
||||
github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
||||
@@ -21,10 +21,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/row"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/store/diff"
|
||||
"github.com/dolthub/dolt/go/store/prolly"
|
||||
"github.com/dolthub/dolt/go/store/prolly/tree"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
"github.com/dolthub/dolt/go/store/val"
|
||||
)
|
||||
|
||||
type DiffSummaryProgress struct {
|
||||
@@ -33,20 +37,51 @@ type DiffSummaryProgress struct {
|
||||
|
||||
type reporter func(ctx context.Context, change *diff.Difference, ch chan<- DiffSummaryProgress) error
|
||||
|
||||
// Summary reports a summary of diff changes between two values
|
||||
// todo: make package private once dolthub is migrated
|
||||
// Summary reports a summary of diff changes between two values
|
||||
// Summary reports a summary of diff changes between two values
|
||||
func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to types.Map) (err error) {
|
||||
func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.Index, fromSch, toSch schema.Schema) (err error) {
|
||||
ch <- DiffSummaryProgress{OldSize: from.Count(), NewSize: to.Count()}
|
||||
|
||||
if from.Format() == types.Format_DOLT_1 {
|
||||
return prollySummary(ctx, ch, from, to, fromSch, toSch)
|
||||
}
|
||||
|
||||
return nomsSummary(ctx, ch, from, to)
|
||||
}
|
||||
|
||||
func prollySummary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.Index, fromSch, toSch schema.Schema) error {
|
||||
_, vMapping, err := MapSchemaBasedOnName(fromSch, toSch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f := durable.ProllyMapFromIndex(from)
|
||||
t := durable.ProllyMapFromIndex(to)
|
||||
_, fVD := f.Descriptors()
|
||||
_, tVD := t.Descriptors()
|
||||
|
||||
err = prolly.DiffMaps(ctx, f, t, func(ctx context.Context, diff tree.Diff) error {
|
||||
err := reportPkChanges(ctx, vMapping, fVD, tVD, diff, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func nomsSummary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.Index) (err error) {
|
||||
ad := NewAsyncDiffer(1024)
|
||||
ad.Start(ctx, from, to)
|
||||
ad.Start(ctx, durable.NomsMapFromIndex(from), durable.NomsMapFromIndex(to))
|
||||
defer func() {
|
||||
if cerr := ad.Close(); cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
ch <- DiffSummaryProgress{OldSize: from.Len(), NewSize: to.Len()}
|
||||
|
||||
hasMore := true
|
||||
var diffs []*diff.Difference
|
||||
for hasMore {
|
||||
@@ -57,7 +92,7 @@ func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to types.Ma
|
||||
|
||||
for i := range diffs {
|
||||
curr := diffs[i]
|
||||
err := reportPkChanges(ctx, curr, ch)
|
||||
err := reportNomsPkChanges(ctx, curr, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -91,7 +126,7 @@ func SummaryForTableDelta(ctx context.Context, ch chan DiffSummaryProgress, td T
|
||||
if keyless {
|
||||
rpr = reportKeylessChanges
|
||||
} else {
|
||||
rpr = reportPkChanges
|
||||
rpr = reportNomsPkChanges
|
||||
ch <- DiffSummaryProgress{
|
||||
OldSize: fromRows.Len(),
|
||||
NewSize: toRows.Len(),
|
||||
@@ -133,7 +168,59 @@ func summaryWithReporter(ctx context.Context, ch chan DiffSummaryProgress, from,
|
||||
return nil
|
||||
}
|
||||
|
||||
func reportPkChanges(ctx context.Context, change *diff.Difference, ch chan<- DiffSummaryProgress) error {
|
||||
func reportPkChanges(ctx context.Context, vMapping val.OrdinalMapping, fromD, toD val.TupleDesc, change tree.Diff, ch chan<- DiffSummaryProgress) error {
|
||||
var summary DiffSummaryProgress
|
||||
switch change.Type {
|
||||
case tree.AddedDiff:
|
||||
summary = DiffSummaryProgress{Adds: 1}
|
||||
case tree.RemovedDiff:
|
||||
summary = DiffSummaryProgress{Removes: 1}
|
||||
case tree.ModifiedDiff:
|
||||
cellChanges := prollyCountCellDiff(vMapping, fromD, toD, val.Tuple(change.From), val.Tuple(change.To))
|
||||
summary = DiffSummaryProgress{Changes: 1, CellChanges: cellChanges}
|
||||
default:
|
||||
return errors.New("unknown change type")
|
||||
}
|
||||
select {
|
||||
case ch <- summary:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// prollyCountCellDiff counts the number of changes columns between two tuples
|
||||
// |from| and |to|. |mapping| should map columns from |from| to |to|.
|
||||
func prollyCountCellDiff(mapping val.OrdinalMapping, fromD, toD val.TupleDesc, from val.Tuple, to val.Tuple) uint64 {
|
||||
newCols := uint64(toD.Count())
|
||||
changed := uint64(0)
|
||||
for i, j := range mapping {
|
||||
newCols--
|
||||
if j == -1 {
|
||||
// column was dropped
|
||||
changed++
|
||||
continue
|
||||
}
|
||||
|
||||
if fromD.Types[i].Enc != toD.Types[j].Enc {
|
||||
// column type is different
|
||||
changed++
|
||||
continue
|
||||
}
|
||||
|
||||
if fromD.CompareField(toD.GetField(j, to), i, from) != 0 {
|
||||
// column was modified
|
||||
changed++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// some columns were added
|
||||
changed += newCols
|
||||
return changed
|
||||
}
|
||||
|
||||
func reportNomsPkChanges(ctx context.Context, change *diff.Difference, ch chan<- DiffSummaryProgress) error {
|
||||
var summary DiffSummaryProgress
|
||||
switch change.ChangeType {
|
||||
case types.DiffChangeAdded:
|
||||
@@ -195,3 +282,46 @@ func reportKeylessChanges(ctx context.Context, change *diff.Difference, ch chan<
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// MapSchemaBasedOnName can be used to map column values from one schema to
|
||||
// another schema. A column in |inSch| is mapped to |outSch| if they share the
|
||||
// same name and primary key membership status. It returns ordinal mappings that
|
||||
// can be use to map key, value val.Tuple's of schema |inSch| to a sql.Row of
|
||||
// |outSch|. The first ordinal map is for keys, and the second is for values. If
|
||||
// a column of |inSch| is missing in |outSch| then that column's index in the
|
||||
// ordinal map holds -1.
|
||||
// TODO (dhruv): Unit tests
|
||||
func MapSchemaBasedOnName(inSch, outSch schema.Schema) (val.OrdinalMapping, val.OrdinalMapping, error) {
|
||||
keyMapping := make(val.OrdinalMapping, inSch.GetPKCols().Size())
|
||||
valMapping := make(val.OrdinalMapping, inSch.GetNonPKCols().Size())
|
||||
|
||||
err := inSch.GetPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
i := inSch.GetPKCols().TagToIdx[tag]
|
||||
if col, ok := outSch.GetPKCols().GetByName(col.Name); ok {
|
||||
j := outSch.GetAllCols().TagToIdx[col.Tag]
|
||||
keyMapping[i] = j
|
||||
} else {
|
||||
return true, fmt.Errorf("could not map primary key column %s", col.Name)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = inSch.GetNonPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
i := inSch.GetNonPKCols().TagToIdx[col.Tag]
|
||||
if col, ok := outSch.GetNonPKCols().GetByName(col.Name); ok {
|
||||
j := outSch.GetAllCols().TagToIdx[col.Tag]
|
||||
valMapping[i] = j
|
||||
} else {
|
||||
valMapping[i] = -1
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return keyMapping, valMapping, nil
|
||||
}
|
||||
|
||||
24
go/libraries/doltcore/env/actions/branch.go
vendored
24
go/libraries/doltcore/env/actions/branch.go
vendored
@@ -30,17 +30,17 @@ var ErrAlreadyExists = errors.New("already exists")
|
||||
var ErrCOBranchDelete = errors.New("attempted to delete checked out branch")
|
||||
var ErrUnmergedBranchDelete = errors.New("attempted to delete a branch that is not fully merged into its parent; use `-f` to force")
|
||||
|
||||
func RenameBranch(ctx context.Context, dEnv *env.DoltEnv, oldBranch, newBranch string, force bool) error {
|
||||
func RenameBranch(ctx context.Context, dbData env.DbData, config *env.DoltCliConfig, oldBranch, newBranch string, force bool) error {
|
||||
oldRef := ref.NewBranchRef(oldBranch)
|
||||
newRef := ref.NewBranchRef(newBranch)
|
||||
|
||||
err := CopyBranch(ctx, dEnv, oldBranch, newBranch, force)
|
||||
err := CopyBranchOnDB(ctx, dbData.Ddb, oldBranch, newBranch, force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), oldRef) {
|
||||
err = dEnv.RepoStateWriter().SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: newRef})
|
||||
if ref.Equals(dbData.Rsr.CWBHeadRef(), oldRef) {
|
||||
err = dbData.Rsw.SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: newRef})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -59,13 +59,13 @@ func RenameBranch(ctx context.Context, dEnv *env.DoltEnv, oldBranch, newBranch s
|
||||
// We always `force` here, because the CopyBranch up
|
||||
// above created a new branch and it will have a
|
||||
// working set.
|
||||
err = dEnv.DoltDB.CopyWorkingSet(ctx, fromWSRef, toWSRef, true /* force */)
|
||||
err = dbData.Ddb.CopyWorkingSet(ctx, fromWSRef, toWSRef, true /* force */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return DeleteBranch(ctx, dEnv, oldBranch, DeleteOptions{Force: true})
|
||||
return DeleteBranch(ctx, dbData, config, oldBranch, DeleteOptions{Force: true})
|
||||
}
|
||||
|
||||
func CopyBranch(ctx context.Context, dEnv *env.DoltEnv, oldBranch, newBranch string, force bool) error {
|
||||
@@ -111,7 +111,7 @@ type DeleteOptions struct {
|
||||
Remote bool
|
||||
}
|
||||
|
||||
func DeleteBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, opts DeleteOptions) error {
|
||||
func DeleteBranch(ctx context.Context, dbData env.DbData, config *env.DoltCliConfig, brName string, opts DeleteOptions) error {
|
||||
var dref ref.DoltRef
|
||||
if opts.Remote {
|
||||
var err error
|
||||
@@ -121,16 +121,16 @@ func DeleteBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, opts De
|
||||
}
|
||||
} else {
|
||||
dref = ref.NewBranchRef(brName)
|
||||
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), dref) {
|
||||
if ref.Equals(dbData.Rsr.CWBHeadRef(), dref) {
|
||||
return ErrCOBranchDelete
|
||||
}
|
||||
}
|
||||
|
||||
return DeleteBranchOnDB(ctx, dEnv, dref, opts)
|
||||
return DeleteBranchOnDB(ctx, dbData, config, dref, opts)
|
||||
}
|
||||
|
||||
func DeleteBranchOnDB(ctx context.Context, dEnv *env.DoltEnv, dref ref.DoltRef, opts DeleteOptions) error {
|
||||
ddb := dEnv.DoltDB
|
||||
func DeleteBranchOnDB(ctx context.Context, dbData env.DbData, config *env.DoltCliConfig, dref ref.DoltRef, opts DeleteOptions) error {
|
||||
ddb := dbData.Ddb
|
||||
hasRef, err := ddb.HasRef(ctx, dref)
|
||||
|
||||
if err != nil {
|
||||
@@ -140,7 +140,7 @@ func DeleteBranchOnDB(ctx context.Context, dEnv *env.DoltEnv, dref ref.DoltRef,
|
||||
}
|
||||
|
||||
if !opts.Force && !opts.Remote {
|
||||
ms, err := doltdb.NewCommitSpec(env.GetDefaultInitBranch(dEnv.Config))
|
||||
ms, err := doltdb.NewCommitSpec(env.GetDefaultInitBranch(config))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -310,19 +310,23 @@ func getTableInfoFromRoot(ctx context.Context, tblName string, root *doltdb.Root
|
||||
|
||||
func calcTableMergeStats(ctx context.Context, tbl *doltdb.Table, mergeTbl *doltdb.Table) (MergeStats, error) {
|
||||
ms := MergeStats{Operation: TableModified}
|
||||
if tbl.Format() == types.Format_DOLT_1 {
|
||||
// TODO (dhruv): calculate stats for V1
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
rows, err := tbl.GetNomsRowData(ctx)
|
||||
|
||||
rows, err := tbl.GetRowData(ctx)
|
||||
if err != nil {
|
||||
return MergeStats{}, err
|
||||
}
|
||||
|
||||
mergeRows, err := mergeTbl.GetNomsRowData(ctx)
|
||||
mergeRows, err := mergeTbl.GetRowData(ctx)
|
||||
if err != nil {
|
||||
return MergeStats{}, err
|
||||
}
|
||||
|
||||
sch, err := tbl.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return MergeStats{}, err
|
||||
}
|
||||
|
||||
mergeSch, err := mergeTbl.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return MergeStats{}, err
|
||||
}
|
||||
@@ -331,7 +335,8 @@ func calcTableMergeStats(ctx context.Context, tbl *doltdb.Table, mergeTbl *doltd
|
||||
ch := make(chan diff.DiffSummaryProgress)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
err := diff.Summary(ctx, ch, rows, mergeRows)
|
||||
// todo (dhruv): In the new storage format we can do better than executing a diff...
|
||||
err := diff.Summary(ctx, ch, rows, mergeRows, sch, mergeSch)
|
||||
|
||||
ae.SetIfError(err)
|
||||
}()
|
||||
|
||||
@@ -50,6 +50,8 @@ type mergeResult struct {
|
||||
func mergeTableData(ctx context.Context, vrw types.ValueReadWriter, postMergeSchema, rootSchema, mergeSchema, ancSchema schema.Schema, tbl, mergeTbl, tableToUpdate *doltdb.Table, ancRows durable.Index, ancIndexSet durable.IndexSet) (mergeResult, error) {
|
||||
group, gCtx := errgroup.WithContext(ctx)
|
||||
|
||||
stats := &MergeStats{Operation: TableModified}
|
||||
|
||||
indexEdits := make(chan indexEdit, 128)
|
||||
conflicts := make(chan confVals, 128)
|
||||
var updatedTable *doltdb.Table
|
||||
@@ -90,7 +92,7 @@ func mergeTableData(ctx context.Context, vrw types.ValueReadWriter, postMergeSch
|
||||
}
|
||||
confEditor := durable.ProllyMapFromConflictIndex(confIdx).Editor()
|
||||
group.Go(func() error {
|
||||
return processConflicts(ctx, conflicts, confEditor)
|
||||
return processConflicts(ctx, stats, conflicts, confEditor)
|
||||
})
|
||||
|
||||
err = group.Wait()
|
||||
@@ -122,7 +124,7 @@ func mergeTableData(ctx context.Context, vrw types.ValueReadWriter, postMergeSch
|
||||
return mergeResult{
|
||||
tbl: updatedTable,
|
||||
cons: confIdx,
|
||||
stats: &MergeStats{Operation: TableModified},
|
||||
stats: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -308,7 +310,7 @@ func (m *valueMerger) processColumn(i int, left, right, base val.Tuple) ([]byte,
|
||||
}
|
||||
}
|
||||
|
||||
func processConflicts(ctx context.Context, conflictChan chan confVals, editor prolly.ConflictEditor) error {
|
||||
func processConflicts(ctx context.Context, stats *MergeStats, conflictChan chan confVals, editor prolly.ConflictEditor) error {
|
||||
OUTER:
|
||||
for {
|
||||
select {
|
||||
@@ -316,6 +318,7 @@ OUTER:
|
||||
if !ok {
|
||||
break OUTER
|
||||
}
|
||||
stats.Conflicts++
|
||||
err := editor.Add(ctx, conflict.key, conflict.ourVal, conflict.theirVal, conflict.baseVal)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -261,6 +261,36 @@ func TagMappingWithNameFallback(srcSch, destSch schema.Schema) (*FieldMapping, e
|
||||
return NewFieldMapping(srcSch, destSch, srcToDest)
|
||||
}
|
||||
|
||||
// TagMappingByName takes a source schema and a destination schema and maps
|
||||
// columns by matching names.
|
||||
func TagMappingByName(srcSch, destSch schema.Schema) (*FieldMapping, error) {
|
||||
successes := 0
|
||||
srcCols := srcSch.GetAllCols()
|
||||
destCols := destSch.GetAllCols()
|
||||
|
||||
srcToDest := make(map[uint64]uint64, destCols.Size())
|
||||
err := destCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
srcCol, ok := srcCols.GetByName(col.Name)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
srcToDest[srcCol.Tag] = col.Tag
|
||||
successes++
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if successes == 0 {
|
||||
return nil, ErrEmptyMapping
|
||||
}
|
||||
|
||||
return NewFieldMapping(srcSch, destSch, srcToDest)
|
||||
}
|
||||
|
||||
// TypedToUntypedMapping takes a schema and creates a mapping to an untyped schema with all the same columns.
|
||||
func TypedToUntypedMapping(sch schema.Schema) (*FieldMapping, error) {
|
||||
untypedSch, err := untyped.UntypeSchema(sch)
|
||||
|
||||
@@ -30,7 +30,7 @@ var IdentityConverter = &RowConverter{nil, true, nil}
|
||||
|
||||
// WarnFunction is a callback function that callers can optionally provide during row conversion
|
||||
// to take an extra action when a value cannot be automatically converted to the output data type.
|
||||
type WarnFunction func(int, string, ...string)
|
||||
type WarnFunction func(int, string, ...interface{})
|
||||
|
||||
var DatatypeCoercionFailureWarning = "unable to coerce value from field '%s' into latest column schema"
|
||||
|
||||
|
||||
@@ -229,8 +229,8 @@ func IsUsingSpatialColAsKey(sch Schema) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// CopyChecks copies check constraints from the |from| schema to the |to| schema and returns it
|
||||
func CopyChecks(from, to Schema) Schema {
|
||||
// CopyChecksConstraints copies check constraints from the |from| schema to the |to| schema and returns it
|
||||
func CopyChecksConstraints(from, to Schema) Schema {
|
||||
fromSch, toSch := from.(*schemaImpl), to.(*schemaImpl)
|
||||
toSch.checkCollection = fromSch.checkCollection
|
||||
return toSch
|
||||
|
||||
@@ -728,21 +728,3 @@ func keyedRowDataToKeylessRowData(ctx context.Context, nbf *types.NomsBinFormat,
|
||||
|
||||
return mapEditor.Map(ctx)
|
||||
}
|
||||
|
||||
func validateSpatialTypeSRID(c schema.Column, v types.Value) error {
|
||||
sc, ok := c.TypeInfo.ToSqlType().(sql.SpatialColumnType)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
sqlVal, err := c.TypeInfo.ConvertNomsValueToValue(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = sc.MatchSRID(sqlVal)
|
||||
if err != nil {
|
||||
if sql.ErrNotMatchingSRID.Is(err) {
|
||||
return sql.ErrNotMatchingSRIDWithColName.New(c.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -789,7 +789,7 @@ func (db Database) CreateTemporaryTable(ctx *sql.Context, tableName string, pkSc
|
||||
return nil
|
||||
}
|
||||
|
||||
// renameTable implements sql.TableRenamer
|
||||
// RenameTable implements sql.TableRenamer
|
||||
func (db Database) RenameTable(ctx *sql.Context, oldName, newName string) error {
|
||||
root, err := db.GetRoot(ctx)
|
||||
|
||||
|
||||
@@ -26,8 +26,11 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/argparser"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/filesys"
|
||||
)
|
||||
|
||||
const DoltBranchFuncName = "dolt_branch"
|
||||
@@ -91,50 +94,130 @@ func DoDoltBranch(ctx *sql.Context, args []string) (int, error) {
|
||||
|
||||
switch {
|
||||
case apr.Contains(cli.CopyFlag):
|
||||
err = makeACopyOfBranch(ctx, dbData, apr)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
err = copyBranch(ctx, dbData, apr)
|
||||
case apr.Contains(cli.MoveFlag):
|
||||
return 1, errors.New("Renaming a branch is not supported.")
|
||||
case apr.Contains(cli.DeleteFlag):
|
||||
return 1, errors.New("Deleting branches is not supported.")
|
||||
case apr.Contains(cli.DeleteForceFlag):
|
||||
return 1, errors.New("Deleting branches is not supported.")
|
||||
err = renameBranch(ctx, dbData, apr)
|
||||
case apr.Contains(cli.DeleteFlag), apr.Contains(cli.DeleteForceFlag):
|
||||
err = deleteBranches(ctx, apr, dbData)
|
||||
default:
|
||||
// regular branch - create new branch
|
||||
if apr.NArg() != 1 {
|
||||
return 1, InvalidArgErr
|
||||
}
|
||||
|
||||
branchName := apr.Arg(0)
|
||||
if len(branchName) == 0 {
|
||||
return 1, EmptyBranchNameErr
|
||||
}
|
||||
|
||||
err = createNewBranch(ctx, dbData, branchName)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
err = createNewBranch(ctx, dbData, apr)
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func createNewBranch(ctx *sql.Context, dbData env.DbData, branchName string) error {
|
||||
// Check if the branch already exists.
|
||||
isBranch, err := actions.IsBranch(ctx, dbData.Ddb, branchName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if isBranch {
|
||||
return errors.New(fmt.Sprintf("fatal: A branch named '%s' already exists.", branchName))
|
||||
return 1, err
|
||||
} else {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
startPt := fmt.Sprintf("head")
|
||||
return actions.CreateBranchWithStartPt(ctx, dbData, branchName, startPt, false)
|
||||
}
|
||||
|
||||
func makeACopyOfBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error {
|
||||
func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error {
|
||||
if apr.NArg() != 2 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
oldBranchName, newBranchName := apr.Arg(0), apr.Arg(1)
|
||||
if oldBranchName == "" || newBranchName == "" {
|
||||
return EmptyBranchNameErr
|
||||
}
|
||||
force := apr.Contains(cli.ForceFlag)
|
||||
|
||||
if !force {
|
||||
err := validateBranchNotActiveInAnySession(ctx, oldBranchName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return actions.RenameBranch(ctx, dbData, loadConfig(ctx), oldBranchName, newBranchName, force)
|
||||
}
|
||||
|
||||
func deleteBranches(ctx *sql.Context, apr *argparser.ArgParseResults, dbData env.DbData) error {
|
||||
if apr.NArg() == 0 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
|
||||
for _, branchName := range apr.Args {
|
||||
if len(branchName) == 0 {
|
||||
return EmptyBranchNameErr
|
||||
}
|
||||
force := apr.Contains(cli.DeleteForceFlag) || apr.Contains(cli.ForceFlag)
|
||||
|
||||
if !force {
|
||||
err := validateBranchNotActiveInAnySession(ctx, branchName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := actions.DeleteBranch(ctx, dbData, loadConfig(ctx), branchName, actions.DeleteOptions{
|
||||
Force: force,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateBranchNotActiveInAnySessions returns an error if the specified branch is currently
|
||||
// selected as the active branch for any active server sessions.
|
||||
func validateBranchNotActiveInAnySession(ctx *sql.Context, branchName string) error {
|
||||
dbName := ctx.GetCurrentDatabase()
|
||||
if dbName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if sqlserver.RunningInServerMode() == false {
|
||||
return nil
|
||||
}
|
||||
|
||||
runningServer := sqlserver.GetRunningServer()
|
||||
if runningServer == nil {
|
||||
return nil
|
||||
}
|
||||
sessionManager := runningServer.SessionManager()
|
||||
branchRef := ref.NewBranchRef(branchName)
|
||||
|
||||
return sessionManager.Iter(func(session sql.Session) (bool, error) {
|
||||
dsess, ok := session.(*dsess.DoltSession)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("unexpected session type: %T", session)
|
||||
}
|
||||
|
||||
activeBranchRef, err := dsess.CWBHeadRef(ctx, dbName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if ref.Equals(branchRef, activeBranchRef) {
|
||||
return false, fmt.Errorf("unsafe to delete or rename branches in use in other sessions; " +
|
||||
"use --force to force the change")
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func loadConfig(ctx *sql.Context) *env.DoltCliConfig {
|
||||
// When executing branch actions from SQL, we don't have access to a DoltEnv like we do from
|
||||
// within the CLI. We can fake it here enough to get a DoltCliConfig, but we can't rely on the
|
||||
// DoltEnv because tests and production will run with different settings (e.g. in-mem versus file).
|
||||
dEnv := env.Load(ctx, env.GetCurrentUserHomeDir, filesys.LocalFS, doltdb.LocalDirDoltDB, "")
|
||||
return dEnv.Config
|
||||
}
|
||||
|
||||
func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error {
|
||||
if apr.NArg() != 1 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
|
||||
branchName := apr.Arg(0)
|
||||
if len(branchName) == 0 {
|
||||
return EmptyBranchNameErr
|
||||
}
|
||||
|
||||
return actions.CreateBranchWithStartPt(ctx, dbData, branchName, "HEAD", apr.Contains(cli.ForceFlag))
|
||||
}
|
||||
|
||||
func copyBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error {
|
||||
if apr.NArg() != 2 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
|
||||
@@ -15,15 +15,22 @@ package dtables
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/conflict"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
|
||||
"github.com/dolthub/dolt/go/store/pool"
|
||||
"github.com/dolthub/dolt/go/store/prolly"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
"github.com/dolthub/dolt/go/store/val"
|
||||
)
|
||||
|
||||
var _ sql.Table = ConflictsTable{}
|
||||
@@ -35,6 +42,8 @@ type ConflictsTable struct {
|
||||
root *doltdb.RootValue
|
||||
tbl *doltdb.Table
|
||||
rd *merge.ConflictReader
|
||||
confIdx durable.ConflictIndex
|
||||
confSch conflict.ConflictSchema
|
||||
rs RootSetter
|
||||
}
|
||||
|
||||
@@ -51,12 +60,34 @@ func NewConflictsTable(ctx *sql.Context, tblName string, root *doltdb.RootValue,
|
||||
return nil, sql.ErrTableNotFound.New(tblName)
|
||||
}
|
||||
|
||||
rd, err := merge.NewConflictReader(ctx, tbl)
|
||||
schs, confIdx, err := tbl.GetConflicts(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if schs.Base == nil || schs.Schema == nil || schs.MergeSchema == nil {
|
||||
schs.Base, err = tbl.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
schs.Schema, schs.MergeSchema = schs.Base, schs.Base
|
||||
}
|
||||
|
||||
sqlSch, err := sqlutil.FromDoltSchema(doltdb.DoltConfTablePrefix+tblName, rd.GetSchema())
|
||||
var rd *merge.ConflictReader
|
||||
var confSch schema.Schema
|
||||
if tbl.Format() == types.Format_DOLT_1 {
|
||||
confSch, err = CalculateConflictSchema(schs.Base, schs.Schema, schs.MergeSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
rd, err = merge.NewConflictReader(ctx, tbl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
confSch = rd.GetSchema()
|
||||
}
|
||||
|
||||
sqlSch, err := sqlutil.FromDoltSchema(doltdb.DoltConfTablePrefix+tblName, confSch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -67,6 +98,8 @@ func NewConflictsTable(ctx *sql.Context, tblName string, root *doltdb.RootValue,
|
||||
root: root,
|
||||
tbl: tbl,
|
||||
rd: rd,
|
||||
confIdx: confIdx,
|
||||
confSch: schs,
|
||||
rs: rs,
|
||||
}, nil
|
||||
}
|
||||
@@ -93,13 +126,199 @@ func (ct ConflictsTable) Partitions(ctx *sql.Context) (sql.PartitionIter, error)
|
||||
|
||||
// PartitionRows returns a RowIter for the given partition
|
||||
func (ct ConflictsTable) PartitionRows(ctx *sql.Context, part sql.Partition) (sql.RowIter, error) {
|
||||
if ct.tbl.Format() == types.Format_DOLT_1 {
|
||||
return newProllyConflictRowIter(ctx, durable.ProllyMapFromConflictIndex(ct.confIdx), ct.confSch.Base, ct.confSch.Schema, ct.confSch.MergeSchema)
|
||||
}
|
||||
|
||||
return conflictRowIter{ct.rd}, nil
|
||||
}
|
||||
|
||||
// Deleter returns a RowDeleter for this table. The RowDeleter will get one call to Delete for each row to be deleted,
|
||||
// and will end with a call to Close() to finalize the delete operation.
|
||||
func (ct ConflictsTable) Deleter(*sql.Context) sql.RowDeleter {
|
||||
return &conflictDeleter{ct: ct, rs: ct.rs}
|
||||
func (ct ConflictsTable) Deleter(ctx *sql.Context) sql.RowDeleter {
|
||||
if ct.tbl.Format() == types.Format_DOLT_1 {
|
||||
return newProllyConflictDeleter(ct)
|
||||
} else {
|
||||
return &conflictDeleter{ct: ct, rs: ct.rs}
|
||||
}
|
||||
}
|
||||
|
||||
type prollyConflictRowIter struct {
|
||||
confItr prolly.ConflictIter
|
||||
kd val.TupleDesc
|
||||
baseVD, oursVD, theirsVD val.TupleDesc
|
||||
// offsets for each version
|
||||
b, o, t int
|
||||
n int
|
||||
}
|
||||
|
||||
func newProllyConflictRowIter(ctx context.Context, conflictMap prolly.ConflictMap, baseSch, ourSch, theirSch schema.Schema) (prollyConflictRowIter, error) {
|
||||
iter, err := conflictMap.IterAll(ctx)
|
||||
if err != nil {
|
||||
return prollyConflictRowIter{}, err
|
||||
}
|
||||
|
||||
kd := prolly.KeyDescriptorFromSchema(baseSch)
|
||||
baseVD := prolly.ValueDescriptorFromSchema(baseSch)
|
||||
oursVD := prolly.ValueDescriptorFromSchema(ourSch)
|
||||
theirsVD := prolly.ValueDescriptorFromSchema(theirSch)
|
||||
|
||||
b := 0
|
||||
o := kd.Count() + baseVD.Count()
|
||||
t := o + kd.Count() + oursVD.Count()
|
||||
n := o + t + kd.Count() + theirsVD.Count()
|
||||
|
||||
return prollyConflictRowIter{
|
||||
confItr: iter,
|
||||
kd: kd,
|
||||
baseVD: baseVD,
|
||||
oursVD: oursVD,
|
||||
theirsVD: theirsVD,
|
||||
b: b,
|
||||
o: o,
|
||||
t: t,
|
||||
n: n,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ sql.RowIter = prollyConflictRowIter{}
|
||||
|
||||
func (itr prollyConflictRowIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
k, v, err := itr.confItr.Next(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := make(sql.Row, itr.n)
|
||||
|
||||
for i := 0; i < itr.kd.Count(); i++ {
|
||||
f, err := index.GetField(itr.kd, i, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r[itr.b+i], r[itr.o+i], r[itr.t+i] = f, f, f
|
||||
}
|
||||
|
||||
tup := v.BaseValue()
|
||||
for i := 0; i < itr.baseVD.Count(); i++ {
|
||||
f, err := index.GetField(itr.baseVD, i, tup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r[itr.b+itr.kd.Count()+i] = f
|
||||
}
|
||||
tup = v.OurValue()
|
||||
for i := 0; i < itr.oursVD.Count(); i++ {
|
||||
f, err := index.GetField(itr.oursVD, i, tup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r[itr.o+itr.kd.Count()+i] = f
|
||||
}
|
||||
tup = v.TheirValue()
|
||||
for i := 0; i < itr.theirsVD.Count(); i++ {
|
||||
f, err := index.GetField(itr.theirsVD, i, tup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r[itr.t+itr.kd.Count()+i] = f
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (itr prollyConflictRowIter) Close(ctx *sql.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type prollyConflictDeleter struct {
|
||||
kd val.TupleDesc
|
||||
kB *val.TupleBuilder
|
||||
pool pool.BuffPool
|
||||
ed prolly.ConflictEditor
|
||||
ct ConflictsTable
|
||||
rs RootSetter
|
||||
conflictSchema conflict.ConflictSchema
|
||||
}
|
||||
|
||||
func newProllyConflictDeleter(ct ConflictsTable) *prollyConflictDeleter {
|
||||
conflictMap := durable.ProllyMapFromConflictIndex(ct.confIdx)
|
||||
kd, _, _, _ := conflictMap.Descriptors()
|
||||
ed := conflictMap.Editor()
|
||||
kB := val.NewTupleBuilder(kd)
|
||||
p := conflictMap.Pool()
|
||||
return &prollyConflictDeleter{
|
||||
kd: kd,
|
||||
kB: kB,
|
||||
pool: p,
|
||||
ed: ed,
|
||||
ct: ct,
|
||||
conflictSchema: ct.confSch,
|
||||
}
|
||||
}
|
||||
|
||||
func (cd *prollyConflictDeleter) Delete(ctx *sql.Context, r sql.Row) error {
|
||||
// first columns are the keys
|
||||
for i := 0; i < cd.kd.Count(); i++ {
|
||||
err := index.PutField(cd.kB, i, r[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
key := cd.kB.Build(cd.pool)
|
||||
err := cd.ed.Delete(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatementBegin implements the interface sql.TableEditor. Currently a no-op.
|
||||
func (cd *prollyConflictDeleter) StatementBegin(ctx *sql.Context) {}
|
||||
|
||||
// DiscardChanges implements the interface sql.TableEditor. Currently a no-op.
|
||||
func (cd *prollyConflictDeleter) DiscardChanges(ctx *sql.Context, errorEncountered error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatementComplete implements the interface sql.TableEditor. Currently a no-op.
|
||||
func (cd *prollyConflictDeleter) StatementComplete(ctx *sql.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close finalizes the delete operation, persisting the result.
|
||||
func (cd *prollyConflictDeleter) Close(ctx *sql.Context) error {
|
||||
conflicts, err := cd.ed.Flush(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: We can delete from more than one table in a single statement. Root
|
||||
// updates should be restricted to write session and not individual table
|
||||
// editors.
|
||||
|
||||
// TODO (dhruv): move this code into some kind of ResolveConflicts function
|
||||
var updatedTbl *doltdb.Table
|
||||
if conflicts.Count() == 0 {
|
||||
updatedTbl, err = cd.ct.tbl.ClearConflicts(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
updatedTbl, err = cd.ct.tbl.SetConflicts(ctx, cd.conflictSchema, durable.ConflictIndexFromProllyMap(conflicts))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
updatedRoot, err := cd.ct.root.PutTable(ctx, cd.ct.tblName, updatedTbl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cd.ct.rs.SetRoot(ctx, updatedRoot)
|
||||
}
|
||||
|
||||
type conflictRowIter struct {
|
||||
@@ -123,14 +342,14 @@ func (itr conflictRowIter) Close(*sql.Context) error {
|
||||
return itr.rd.Close()
|
||||
}
|
||||
|
||||
var _ sql.RowDeleter = &conflictDeleter{}
|
||||
|
||||
type conflictDeleter struct {
|
||||
ct ConflictsTable
|
||||
rs RootSetter
|
||||
pks []types.Value
|
||||
}
|
||||
|
||||
var _ sql.RowDeleter = &conflictDeleter{}
|
||||
|
||||
// Delete deletes the given row. Returns ErrDeleteRowNotFound if the row was not found. Delete will be called once for
|
||||
// each row to process for the delete operation, which may involve many rows. After all rows have been processed,
|
||||
// Close is called.
|
||||
@@ -186,3 +405,48 @@ func (cd *conflictDeleter) Close(ctx *sql.Context) error {
|
||||
|
||||
return cd.rs.SetRoot(ctx, updatedRoot)
|
||||
}
|
||||
|
||||
func CalculateConflictSchema(base, ours, theirs schema.Schema) (schema.Schema, error) {
|
||||
cols := make([]schema.Column, ours.GetAllCols().Size()+theirs.GetAllCols().Size()+base.GetAllCols().Size())
|
||||
|
||||
i := 0
|
||||
putWithPrefix := func(prefix string, sch schema.Schema) error {
|
||||
err := sch.GetPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
c, err := schema.NewColumnWithTypeInfo(prefix+col.Name, uint64(i), col.TypeInfo, false, col.Default, false, col.Comment)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
cols[i] = c
|
||||
i++
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = sch.GetNonPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
c, err := schema.NewColumnWithTypeInfo(prefix+col.Name, uint64(i), col.TypeInfo, false, col.Default, false, col.Comment)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
cols[i] = c
|
||||
i++
|
||||
return false, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
err := putWithPrefix("base_", base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = putWithPrefix("ours_", ours)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = putWithPrefix("theirs_", theirs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return schema.UnkeyedSchemaFromCols(schema.NewColCollection(cols...)), nil
|
||||
}
|
||||
|
||||
@@ -105,11 +105,7 @@ func newNomsDiffIter(ctx *sql.Context, ddb *doltdb.DoltDB, joiner *rowconv.Joine
|
||||
rd.StartWithRange(ctx, durable.NomsMapFromIndex(fromData), durable.NomsMapFromIndex(toData), ranges[0].Start, rangeFunc)
|
||||
}
|
||||
|
||||
warnFn := func(code int, message string, args ...string) {
|
||||
ctx.Warn(code, message, args)
|
||||
}
|
||||
|
||||
src := diff.NewRowDiffSource(rd, joiner, warnFn)
|
||||
src := diff.NewRowDiffSource(rd, joiner, ctx.Warn)
|
||||
src.AddInputRowConversion(fromConv, toConv)
|
||||
|
||||
return &diffRowItr{
|
||||
@@ -260,12 +256,12 @@ func newProllyDiffIter(ctx *sql.Context, dp DiffPartition, ddb *doltdb.DoltDB, t
|
||||
}
|
||||
to := durable.ProllyMapFromIndex(t)
|
||||
|
||||
fromConverter, err := NewProllyRowConverter(fSch, targetFromSchema)
|
||||
fromConverter, err := NewProllyRowConverter(fSch, targetFromSchema, ctx.Warn)
|
||||
if err != nil {
|
||||
return prollyDiffIter{}, err
|
||||
}
|
||||
|
||||
toConverter, err := NewProllyRowConverter(tSch, targetToSchema)
|
||||
toConverter, err := NewProllyRowConverter(tSch, targetToSchema, ctx.Warn)
|
||||
if err != nil {
|
||||
return prollyDiffIter{}, err
|
||||
}
|
||||
@@ -341,8 +337,8 @@ func (itr prollyDiffIter) queueRows(ctx context.Context) {
|
||||
// todo(andy): copy string fields
|
||||
func (itr prollyDiffIter) makeDiffRow(d tree.Diff) (r sql.Row, err error) {
|
||||
|
||||
n := itr.targetFromSch.GetAllCols().Size()
|
||||
m := itr.targetToSch.GetAllCols().Size()
|
||||
n := itr.targetToSch.GetAllCols().Size()
|
||||
m := itr.targetFromSch.GetAllCols().Size()
|
||||
// 2 commit names, 2 commit dates, 1 diff_type
|
||||
r = make(sql.Row, n+m+5)
|
||||
|
||||
|
||||
@@ -514,7 +514,7 @@ func (dp DiffPartition) rowConvForSchema(ctx context.Context, vrw types.ValueRea
|
||||
return rowconv.IdentityConverter, nil
|
||||
}
|
||||
|
||||
fm, err := rowconv.TagMappingWithNameFallback(srcSch, targetSch)
|
||||
fm, err := rowconv.TagMappingByName(srcSch, targetSch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -424,6 +424,7 @@ func rowConvForSchema(ctx context.Context, vrw types.ValueReadWriter, targetSch
|
||||
return rowconv.IdentityConverter, nil
|
||||
}
|
||||
|
||||
// TODO: Update history table to also match by name only
|
||||
fm, err := rowconv.TagMappingWithNameFallback(sch, targetSch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
package dtables
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/rowconv"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
|
||||
"github.com/dolthub/dolt/go/store/prolly"
|
||||
@@ -36,10 +36,11 @@ type ProllyRowConverter struct {
|
||||
valDesc val.TupleDesc
|
||||
pkTargetTypes []sql.Type
|
||||
nonPkTargetTypes []sql.Type
|
||||
warnFn rowconv.WarnFunction
|
||||
}
|
||||
|
||||
func NewProllyRowConverter(inSch, outSch schema.Schema) (ProllyRowConverter, error) {
|
||||
keyProj, valProj, err := MapSchemaBasedOnName(inSch, outSch)
|
||||
func NewProllyRowConverter(inSch, outSch schema.Schema, warnFn rowconv.WarnFunction) (ProllyRowConverter, error) {
|
||||
keyProj, valProj, err := diff.MapSchemaBasedOnName(inSch, outSch)
|
||||
if err != nil {
|
||||
return ProllyRowConverter{}, err
|
||||
}
|
||||
@@ -80,89 +81,49 @@ func NewProllyRowConverter(inSch, outSch schema.Schema) (ProllyRowConverter, err
|
||||
valDesc: vd,
|
||||
pkTargetTypes: pkTargetTypes,
|
||||
nonPkTargetTypes: nonPkTargetTypes,
|
||||
warnFn: warnFn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutConverted converts the |key| and |value| val.Tuple from |inSchema| to |outSchema|
|
||||
// and places the converted row in |dstRow|.
|
||||
func (c ProllyRowConverter) PutConverted(key, value val.Tuple, dstRow []interface{}) error {
|
||||
for i, j := range c.keyProj {
|
||||
if j == -1 {
|
||||
continue
|
||||
}
|
||||
f, err := index.GetField(c.keyDesc, i, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t := c.pkTargetTypes[i]; t != nil {
|
||||
dstRow[j], err = t.Convert(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dstRow[j] = f
|
||||
}
|
||||
err := c.putFields(key, c.keyProj, c.keyDesc, c.pkTargetTypes, dstRow)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, j := range c.valProj {
|
||||
if j == -1 {
|
||||
continue
|
||||
}
|
||||
f, err := index.GetField(c.valDesc, i, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t := c.nonPkTargetTypes[i]; t != nil {
|
||||
dstRow[j], err = t.Convert(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dstRow[j] = f
|
||||
}
|
||||
err = c.putFields(value, c.valProj, c.valDesc, c.nonPkTargetTypes, dstRow)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MapSchemaBasedOnName can be used to map column values from one schema to
|
||||
// another schema. A column in |inSch| is mapped to |outSch| if they share the
|
||||
// same name and primary key membership status. It returns ordinal mappings that
|
||||
// can be use to map key, value val.Tuple's of schema |inSch| to a sql.Row of
|
||||
// |outSch|. The first ordinal map is for keys, and the second is for values. If
|
||||
// a column of |inSch| is missing in |outSch| then that column's index in the
|
||||
// ordinal map holds -1.
|
||||
func MapSchemaBasedOnName(inSch, outSch schema.Schema) (val.OrdinalMapping, val.OrdinalMapping, error) {
|
||||
keyMapping := make(val.OrdinalMapping, inSch.GetPKCols().Size())
|
||||
valMapping := make(val.OrdinalMapping, inSch.GetNonPKCols().Size())
|
||||
|
||||
err := inSch.GetPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
i := inSch.GetPKCols().TagToIdx[tag]
|
||||
if col, ok := outSch.GetPKCols().GetByName(col.Name); ok {
|
||||
j := outSch.GetAllCols().TagToIdx[col.Tag]
|
||||
keyMapping[i] = j
|
||||
} else {
|
||||
return true, fmt.Errorf("could not map primary key column %s", col.Name)
|
||||
func (c ProllyRowConverter) putFields(tup val.Tuple, proj val.OrdinalMapping, desc val.TupleDesc, targetTypes []sql.Type, dstRow []interface{}) error {
|
||||
for i, j := range proj {
|
||||
if j == -1 {
|
||||
continue
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = inSch.GetNonPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
i := inSch.GetNonPKCols().TagToIdx[col.Tag]
|
||||
if col, ok := outSch.GetNonPKCols().GetByName(col.Name); ok {
|
||||
j := outSch.GetAllCols().TagToIdx[col.Tag]
|
||||
valMapping[i] = j
|
||||
} else {
|
||||
valMapping[i] = -1
|
||||
f, err := index.GetField(desc, i, tup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t := targetTypes[i]; t != nil {
|
||||
dstRow[j], err = t.Convert(f)
|
||||
if sql.ErrInvalidValue.Is(err) && c.warnFn != nil {
|
||||
col := c.inSchema.GetAllCols().GetByIndex(i)
|
||||
c.warnFn(rowconv.DatatypeCoercionFailureWarningCode, rowconv.DatatypeCoercionFailureWarning, col.Name)
|
||||
dstRow[j] = nil
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dstRow[j] = f
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return keyMapping, valMapping, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func TestSingleQuery(t *testing.T) {
|
||||
|
||||
// Convenience test for debugging a single query. Unskip and set to the desired query.
|
||||
func TestSingleScript(t *testing.T) {
|
||||
//t.Skip()
|
||||
t.Skip()
|
||||
|
||||
var scripts = []queries.ScriptTest{
|
||||
{
|
||||
@@ -185,14 +185,6 @@ func TestAmbiguousColumnResolution(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInsertInto(t *testing.T) {
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
for i := len(queries.InsertScripts) - 1; i >= 0; i-- {
|
||||
//TODO: on duplicate key broken for foreign keys in new format
|
||||
if queries.InsertScripts[i].Name == "Insert on duplicate key" {
|
||||
queries.InsertScripts = append(queries.InsertScripts[:i], queries.InsertScripts[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
enginetest.TestInsertInto(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -649,6 +641,12 @@ func TestDoltReset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoltBranch(t *testing.T) {
|
||||
for _, script := range DoltBranchScripts {
|
||||
enginetest.TestScript(t, newDoltHarness(t), script)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSingleTransactionScript is a convenience method for debugging a single transaction test. Unskip and set to the
|
||||
// desired test.
|
||||
func TestSingleTransactionScript(t *testing.T) {
|
||||
@@ -773,7 +771,6 @@ func TestUnscopedDiffSystemTable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDiffTableFunction(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
harness := newDoltHarness(t)
|
||||
harness.Setup(setup.MydbData)
|
||||
for _, test := range DiffTableFunctionScriptTests {
|
||||
@@ -785,7 +782,6 @@ func TestDiffTableFunction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitDiffSystemTable(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
harness := newDoltHarness(t)
|
||||
harness.Setup(setup.MydbData)
|
||||
for _, test := range CommitDiffSystemTableScriptTests {
|
||||
@@ -797,7 +793,6 @@ func TestCommitDiffSystemTable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDiffSystemTable(t *testing.T) {
|
||||
skipNewFormat(t)
|
||||
harness := newDoltHarness(t)
|
||||
harness.Setup(setup.MydbData)
|
||||
for _, test := range DiffSystemTableScriptTests {
|
||||
@@ -937,14 +932,6 @@ func TestScriptsPrepared(t *testing.T) {
|
||||
|
||||
func TestInsertScriptsPrepared(t *testing.T) {
|
||||
skipPreparedTests(t)
|
||||
if types.IsFormat_DOLT_1(types.Format_Default) {
|
||||
for i := len(queries.InsertScripts) - 1; i >= 0; i-- {
|
||||
//TODO: on duplicate key broken for foreign keys in new format
|
||||
if queries.InsertScripts[i].Name == "Insert on duplicate key" {
|
||||
queries.InsertScripts = append(queries.InsertScripts[:i], queries.InsertScripts[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
enginetest.TestInsertScriptsPrepared(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -995,8 +982,6 @@ func TestPrepared(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPreparedInsert(t *testing.T) {
|
||||
//TODO: on duplicate key broken for foreign keys in new format
|
||||
skipNewFormat(t)
|
||||
skipPreparedTests(t)
|
||||
enginetest.TestPreparedInsert(t, newDoltHarness(t))
|
||||
}
|
||||
|
||||
@@ -1297,6 +1297,144 @@ var MergeScripts = []queries.ScriptTest{
|
||||
},
|
||||
}
|
||||
|
||||
var DoltBranchScripts = []queries.ScriptTest{
|
||||
{
|
||||
Name: "Create branches from HEAD with dolt_branch procedure",
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('myNewBranch1')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT COUNT(*) FROM DOLT_BRANCHES WHERE NAME='myNewBranch1';",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
// Trying to recreate that branch fails without the force flag
|
||||
Query: "CALL DOLT_BRANCH('myNewBranch1')",
|
||||
ExpectedErrStr: "fatal: A branch named 'myNewBranch1' already exists.",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-f', 'myNewBranch1')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Rename branches with dolt_branch procedure",
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('myNewBranch1')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('myNewBranch2')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
// Renaming to an existing name fails without the force flag
|
||||
Query: "CALL DOLT_BRANCH('-m', 'myNewBranch1', 'myNewBranch2')",
|
||||
ExpectedErrStr: "already exists",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-mf', 'myNewBranch1', 'myNewBranch2')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-m', 'myNewBranch2', 'myNewBranch3')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Copy branches from other branches using dolt_branch procedure",
|
||||
SetUpScript: []string{
|
||||
"CALL DOLT_BRANCH('myNewBranch1')",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-c')",
|
||||
ExpectedErrStr: "error: invalid usage",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-c', 'myNewBranch1')",
|
||||
ExpectedErrStr: "error: invalid usage",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-c', 'myNewBranch2')",
|
||||
ExpectedErrStr: "error: invalid usage",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-c', '', '')",
|
||||
ExpectedErrStr: "error: cannot branch empty string",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-c', 'myNewBranch1', 'myNewBranch2')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT COUNT(*) FROM DOLT_BRANCHES WHERE NAME='myNewBranch2';",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-c', 'myNewBranch1', 'myNewBranch2')",
|
||||
ExpectedErrStr: "fatal: A branch named 'myNewBranch2' already exists.",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-cf', 'myNewBranch1', 'myNewBranch2')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Delete branches with dolt_branch procedure",
|
||||
SetUpScript: []string{
|
||||
"CALL DOLT_BRANCH('myNewBranch1')",
|
||||
"CALL DOLT_BRANCH('myNewBranch2')",
|
||||
"CALL DOLT_BRANCH('myNewBranch3')",
|
||||
"CALL DOLT_BRANCH('myNewBranchWithCommit')",
|
||||
"CALL DOLT_CHECKOUT('myNewBranchWithCommit')",
|
||||
"CALL DOLT_COMMIT('--allow-empty', '-am', 'empty commit')",
|
||||
"CALL DOLT_CHECKOUT('main')",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-d')",
|
||||
ExpectedErrStr: "error: invalid usage",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-d', '')",
|
||||
ExpectedErrStr: "error: cannot branch empty string",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-d', 'branchDoesNotExist')",
|
||||
ExpectedErrStr: "branch not found",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-d', 'myNewBranch1')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "SELECT COUNT(*) FROM DOLT_BRANCHES WHERE NAME='myNewBranch1'",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-d', 'myNewBranch2', 'myNewBranch3')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
// Trying to delete a branch with unpushed changes fails without force option
|
||||
Query: "CALL DOLT_BRANCH('-d', 'myNewBranchWithCommit')",
|
||||
ExpectedErrStr: "attempted to delete a branch that is not fully merged into its parent; use `-f` to force",
|
||||
},
|
||||
{
|
||||
Query: "CALL DOLT_BRANCH('-df', 'myNewBranchWithCommit')",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var DoltReset = []queries.ScriptTest{
|
||||
{
|
||||
Name: "CALL DOLT_RESET('--hard') should reset the merge state after uncommitted merge",
|
||||
@@ -1546,21 +1684,65 @@ var DiffSystemTableScriptTests = []queries.ScriptTest{
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, diff_type FROM DOLT_DIFF_t WHERE TO_COMMIT=@Commit1 ORDER BY to_pk;",
|
||||
Expected: []sql.Row{
|
||||
{1, 3, nil, nil, "added"},
|
||||
{4, 6, nil, nil, "added"},
|
||||
{1, 2, nil, nil, "added"},
|
||||
{4, 5, nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, diff_type FROM DOLT_DIFF_t WHERE TO_COMMIT=@Commit2 ORDER BY to_pk;",
|
||||
Expected: []sql.Row{
|
||||
{1, 3, 1, 3, "modified"},
|
||||
{4, 6, 4, 6, "modified"},
|
||||
{1, nil, 1, 2, "modified"},
|
||||
{4, nil, 4, 5, "modified"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, diff_type FROM DOLT_DIFF_t WHERE TO_COMMIT=@Commit3 ORDER BY to_pk;",
|
||||
Expected: []sql.Row{
|
||||
{100, 101, nil, nil, "added"},
|
||||
// TODO: It's more correct to also return the following rows.
|
||||
//{1, 3, 1, nil, "modified"},
|
||||
//{4, 6, 4, nil, "modified"}
|
||||
|
||||
// To explain why, let's inspect table t at each of the commits:
|
||||
//
|
||||
// @Commit1 @Commit2 @Commit3
|
||||
// +----+----+----+ +----+----+ +-----+-----+
|
||||
// | pk | c1 | c2 | | pk | c2 | | pk | c1 |
|
||||
// +----+----+----+ +----+----+ +-----+-----+
|
||||
// | 1 | 2 | 3 | | 1 | 3 | | 1 | 3 |
|
||||
// | 4 | 5 | 6 | | 4 | 6 | | 4 | 6 |
|
||||
// +----+----+----+ +----+----+ | 100 | 101 |
|
||||
// +-----+-----+
|
||||
//
|
||||
// If you were to interpret each table using the schema at
|
||||
// @Commit3, (pk, c1), you would see the following:
|
||||
//
|
||||
// @Commit1 @Commit2 @Commit3
|
||||
// +----+----+ +----+------+ +-----+-----+
|
||||
// | pk | c1 | | pk | c1 | | pk | c1 |
|
||||
// +----+----+ +----+------+ +-----+-----+
|
||||
// | 1 | 2 | | 1 | NULL | | 1 | 3 |
|
||||
// | 4 | 5 | | 4 | NULL | | 4 | 6 |
|
||||
// +----+----+ +----+------+ | 100 | 101 |
|
||||
// +-----+-----+
|
||||
//
|
||||
// The corresponding diffs for the interpreted tables:
|
||||
//
|
||||
// Diff between init and @Commit1:
|
||||
// + (1, 2)
|
||||
// + (4, 5)
|
||||
//
|
||||
// Diff between @Commit1 and @Commit2:
|
||||
// ~ (1, NULL)
|
||||
// ~ (4, NULL)
|
||||
//
|
||||
// Diff between @Commit2 and @Commit3:
|
||||
// ~ (1, 3) <- currently not outputted
|
||||
// ~ (4, 6) <- currently not outputted
|
||||
// + (100, 101)
|
||||
//
|
||||
// The missing rows are not produced by diff since the
|
||||
// underlying value of the prolly trees are not modified during a column rename.
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2592,20 +2774,21 @@ var CommitDiffSystemTableScriptTests = []queries.ScriptTest{
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, diff_type FROM DOLT_COMMIT_DIFF_t WHERE TO_COMMIT=@Commit1 and FROM_COMMIT=@Commit0 ORDER BY to_pk;",
|
||||
Expected: []sql.Row{
|
||||
{1, 3, nil, nil, "added"},
|
||||
{4, 6, nil, nil, "added"},
|
||||
{1, 2, nil, nil, "added"},
|
||||
{4, 5, nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, diff_type FROM DOLT_COMMIT_DIFF_t WHERE TO_COMMIT=@Commit2 and FROM_COMMIT=@Commit1 ORDER BY to_pk;",
|
||||
Expected: []sql.Row{
|
||||
{1, 3, 1, 3, "modified"},
|
||||
{4, 6, 4, 6, "modified"},
|
||||
{1, nil, 1, 2, "modified"},
|
||||
{4, nil, 4, 5, "modified"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "SELECT to_pk, to_c1, from_pk, from_c1, diff_type FROM DOLT_COMMIT_DIFF_t WHERE TO_COMMIT=@Commit3 and FROM_COMMIT=@Commit2 ORDER BY to_pk;",
|
||||
Expected: []sql.Row{
|
||||
// TODO: Missing rows here see TestDiffSystemTable tests
|
||||
{100, 101, nil, nil, "added"},
|
||||
},
|
||||
},
|
||||
|
||||
243
go/libraries/doltcore/sqle/enginetest/dolt_server_test.go
Normal file
243
go/libraries/doltcore/sqle/enginetest/dolt_server_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package enginetest
|
||||
|
||||
import (
|
||||
"context"
|
||||
gosql "database/sql"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/enginetest/queries"
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/gocraft/dbr/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/commands/sqlserver"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
|
||||
)
|
||||
|
||||
// DoltBranchMultiSessionScriptTests contain tests that need to be run in a multi-session server environment
|
||||
// in order to fully test branch deletion and renaming logic.
|
||||
var DoltBranchMultiSessionScriptTests = []queries.ScriptTest{
|
||||
{
|
||||
Name: "Test multi-session behavior for deleting branches",
|
||||
SetUpScript: []string{
|
||||
"call dolt_branch('branch1');",
|
||||
"call dolt_branch('branch2');",
|
||||
"call dolt_branch('branch3');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "/* client a */ CALL DOLT_CHECKOUT('branch1');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ select active_branch();",
|
||||
Expected: []sql.Row{{"branch1"}},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-d', 'branch1');",
|
||||
ExpectedErrStr: "Error 1105: unsafe to delete or rename branches in use in other sessions; use --force to force the change",
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ CALL DOLT_CHECKOUT('branch2');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-d', 'branch1');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-d', 'branch2');",
|
||||
ExpectedErrStr: "Error 1105: unsafe to delete or rename branches in use in other sessions; use --force to force the change",
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-df', 'branch2');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-d', 'branch3');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Test multi-session behavior for renaming branches",
|
||||
SetUpScript: []string{
|
||||
"call dolt_branch('branch1');",
|
||||
"call dolt_branch('branch2');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "/* client a */ CALL DOLT_CHECKOUT('branch1');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "/* client a */ select active_branch();",
|
||||
Expected: []sql.Row{{"branch1"}},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-m', 'branch1', 'movedBranch1');",
|
||||
ExpectedErrStr: "Error 1105: unsafe to delete or rename branches in use in other sessions; use --force to force the change",
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-mf', 'branch1', 'movedBranch1');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
{
|
||||
Query: "/* client b */ CALL DOLT_BRANCH('-m', 'branch2', 'movedBranch2');",
|
||||
Expected: []sql.Row{{0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// TestDoltMultiSessionBehavior runs tests that exercise multi-session logic on a running SQL server. Statements
|
||||
// are sent through the server, from out of process, instead of directly to the in-process engine API.
|
||||
func TestDoltMultiSessionBehavior(t *testing.T) {
|
||||
// When this test runs with the new storage engine format, we get a panic about an unknown message id.
|
||||
// Ex: https://github.com/dolthub/dolt/runs/6679643619?check_suite_focus=true
|
||||
skipNewFormat(t)
|
||||
|
||||
testMultiSessionScriptTests(t, DoltBranchMultiSessionScriptTests)
|
||||
}
|
||||
|
||||
func testMultiSessionScriptTests(t *testing.T, tests []queries.ScriptTest) {
|
||||
sc, serverConfig := startServer(t)
|
||||
defer sc.StopServer()
|
||||
|
||||
for _, test := range tests {
|
||||
conn1, sess1 := newConnection(t, serverConfig)
|
||||
conn2, sess2 := newConnection(t, serverConfig)
|
||||
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
for _, setupStatement := range test.SetUpScript {
|
||||
_, err := sess1.Exec(setupStatement)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, assertion := range test.Assertions {
|
||||
t.Run(assertion.Query, func(t *testing.T) {
|
||||
var activeSession *dbr.Session
|
||||
if strings.Contains(strings.ToLower(assertion.Query), "/* client a */") {
|
||||
activeSession = sess1
|
||||
} else if strings.Contains(strings.ToLower(assertion.Query), "/* client b */") {
|
||||
activeSession = sess2
|
||||
} else {
|
||||
require.Fail(t, "unsupported client specification: "+assertion.Query)
|
||||
}
|
||||
|
||||
rows, err := activeSession.Query(assertion.Query)
|
||||
|
||||
if len(assertion.ExpectedErrStr) > 0 {
|
||||
require.EqualError(t, err, assertion.ExpectedErrStr)
|
||||
} else if assertion.ExpectedErr != nil {
|
||||
require.True(t, assertion.ExpectedErr.Is(err))
|
||||
} else if assertion.Expected != nil {
|
||||
require.NoError(t, err)
|
||||
assertResultsEqual(t, assertion.Expected, rows)
|
||||
} else {
|
||||
require.Fail(t, "unsupported ScriptTestAssertion property: %v", assertion)
|
||||
}
|
||||
if rows != nil {
|
||||
require.NoError(t, rows.Close())
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
require.NoError(t, conn1.Close())
|
||||
require.NoError(t, conn2.Close())
|
||||
}
|
||||
}
|
||||
|
||||
func makeDestinationSlice(t *testing.T, columnTypes []*gosql.ColumnType) []interface{} {
|
||||
dest := make([]any, len(columnTypes))
|
||||
for i, columnType := range columnTypes {
|
||||
switch strings.ToLower(columnType.DatabaseTypeName()) {
|
||||
case "int", "tinyint", "bigint":
|
||||
var integer int
|
||||
dest[i] = &integer
|
||||
case "text":
|
||||
var s string
|
||||
dest[i] = &s
|
||||
default:
|
||||
require.Fail(t, "unsupported type: "+columnType.DatabaseTypeName())
|
||||
}
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
func assertResultsEqual(t *testing.T, expected []sql.Row, rows *gosql.Rows) {
|
||||
columnTypes, err := rows.ColumnTypes()
|
||||
require.NoError(t, err)
|
||||
dest := makeDestinationSlice(t, columnTypes)
|
||||
|
||||
for _, expectedRow := range expected {
|
||||
ok := rows.Next()
|
||||
if !ok {
|
||||
require.Fail(t, "Fewer results than expected")
|
||||
}
|
||||
err := rows.Scan(dest...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expectedRow), len(dest),
|
||||
"Different number of columns returned than expected")
|
||||
|
||||
for j, expectedValue := range expectedRow {
|
||||
switch strings.ToUpper(columnTypes[j].DatabaseTypeName()) {
|
||||
case "TEXT":
|
||||
actualValue, ok := dest[j].(*string)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, expectedValue, *actualValue)
|
||||
case "INT", "TINYINT", "BIGINT":
|
||||
actualValue, ok := dest[j].(*int)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, expectedValue, *actualValue)
|
||||
default:
|
||||
require.Fail(t, "Unsupported datatype: %s", columnTypes[j].DatabaseTypeName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if rows.Next() {
|
||||
require.Fail(t, "More results than expected")
|
||||
}
|
||||
}
|
||||
|
||||
func startServer(t *testing.T) (*sqlserver.ServerController, sqlserver.ServerConfig) {
|
||||
dEnv := dtestutils.CreateEnvWithSeedData(t)
|
||||
port := 15403 + rand.Intn(25)
|
||||
serverConfig := sqlserver.DefaultServerConfig().WithPort(port)
|
||||
|
||||
sc := sqlserver.NewServerController()
|
||||
go func() {
|
||||
_, _ = sqlserver.Serve(context.Background(), "", serverConfig, sc, dEnv)
|
||||
}()
|
||||
err := sc.WaitForStart()
|
||||
require.NoError(t, err)
|
||||
|
||||
return sc, serverConfig
|
||||
}
|
||||
|
||||
func newConnection(t *testing.T, serverConfig sqlserver.ServerConfig) (*dbr.Connection, *dbr.Session) {
|
||||
const dbName = "dolt"
|
||||
conn, err := dbr.Open("mysql", sqlserver.ConnectionString(serverConfig)+dbName, nil)
|
||||
require.NoError(t, err)
|
||||
sess := conn.NewSession(nil)
|
||||
return conn, sess
|
||||
}
|
||||
@@ -35,6 +35,7 @@ type DoltIndex interface {
|
||||
Schema() schema.Schema
|
||||
IndexSchema() schema.Schema
|
||||
Format() *types.NomsBinFormat
|
||||
IsPrimaryKey() bool
|
||||
GetDurableIndexes(*sql.Context, *doltdb.Table) (durable.Index, durable.Index, error)
|
||||
}
|
||||
|
||||
@@ -147,6 +148,7 @@ func getPrimaryKeyIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sc
|
||||
indexSch: sch,
|
||||
tableSch: sch,
|
||||
unique: true,
|
||||
isPk: true,
|
||||
comment: "",
|
||||
vrw: t.ValueReadWriter(),
|
||||
keyBld: keyBld,
|
||||
@@ -173,6 +175,7 @@ func getSecondaryIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sch
|
||||
indexSch: idx.Schema(),
|
||||
tableSch: sch,
|
||||
unique: idx.IsUnique(),
|
||||
isPk: false,
|
||||
comment: idx.Comment(),
|
||||
vrw: t.ValueReadWriter(),
|
||||
keyBld: keyBld,
|
||||
@@ -189,6 +192,7 @@ type doltIndex struct {
|
||||
indexSch schema.Schema
|
||||
tableSch schema.Schema
|
||||
unique bool
|
||||
isPk bool
|
||||
comment string
|
||||
|
||||
vrw types.ValueReadWriter
|
||||
@@ -397,6 +401,11 @@ func (di doltIndex) IsUnique() bool {
|
||||
return di.unique
|
||||
}
|
||||
|
||||
// IsPrimaryKey implements DoltIndex.
|
||||
func (di doltIndex) IsPrimaryKey() bool {
|
||||
return di.isPk
|
||||
}
|
||||
|
||||
// Comment implements sql.Index
|
||||
func (di doltIndex) Comment() string {
|
||||
return di.comment
|
||||
|
||||
@@ -1072,7 +1072,7 @@ func (t *AlterableDoltTable) RewriteInserter(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newSch = schema.CopyChecks(oldSch, newSch)
|
||||
newSch = schema.CopyChecksConstraints(oldSch, newSch)
|
||||
|
||||
if isColumnDrop(oldSchema, newSchema) {
|
||||
newSch = schema.CopyIndexes(oldSch, newSch)
|
||||
|
||||
@@ -25,20 +25,60 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/globalstate"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/writer"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor/creation"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
)
|
||||
|
||||
type TempTable struct {
|
||||
tableName string
|
||||
dbName string
|
||||
pkSch sql.PrimaryKeySchema
|
||||
|
||||
table *doltdb.Table
|
||||
sch schema.Schema
|
||||
|
||||
lookup sql.IndexLookup
|
||||
|
||||
ed writer.TableWriter
|
||||
opts editor.Options
|
||||
}
|
||||
|
||||
var _ sql.TemporaryTable = &TempTable{}
|
||||
var _ sql.Table = &TempTable{}
|
||||
var _ sql.PrimaryKeyTable = &TempTable{}
|
||||
var _ sql.IndexedTable = &TempTable{}
|
||||
var _ sql.IndexAlterableTable = &TempTable{}
|
||||
var _ sql.ForeignKeyTable = &TempTable{}
|
||||
var _ sql.CheckTable = &TempTable{}
|
||||
var _ sql.CheckAlterableTable = &TempTable{}
|
||||
var _ sql.StatisticsTable = &TempTable{}
|
||||
|
||||
func NewTempTable(
|
||||
ctx context.Context,
|
||||
ctx *sql.Context,
|
||||
ddb *doltdb.DoltDB,
|
||||
pkSch sql.PrimaryKeySchema,
|
||||
name, db string,
|
||||
opts editor.Options,
|
||||
) (*TempTable, error) {
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
|
||||
dbState, ok, err := sess.LookupDbState(ctx, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("database %s not found in session", db)
|
||||
}
|
||||
|
||||
ws := dbState.WorkingSet
|
||||
|
||||
sch, err := temporaryDoltSchema(ctx, pkSch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -56,46 +96,77 @@ func NewTempTable(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ed, err := editor.NewTableEditor(ctx, tbl, sch, name, opts)
|
||||
newRoot, err := ws.WorkingRoot().PutTable(ctx, name, tbl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TempTable{
|
||||
newWs := ws.WithWorkingRoot(newRoot)
|
||||
|
||||
gs := globalstate.NewGlobalStateStore()
|
||||
ait, err := gs.GetAutoIncrementTracker(ctx, newWs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
writeSession := writer.NewWriteSession(tbl.Format(), newWs, ait, opts)
|
||||
|
||||
tempTable := &TempTable{
|
||||
tableName: name,
|
||||
dbName: db,
|
||||
pkSch: pkSch,
|
||||
table: tbl,
|
||||
sch: sch,
|
||||
ed: ed,
|
||||
opts: opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
tempTable.ed, err = writeSession.GetTableWriter(ctx, name, db, setTempTableRoot(tempTable), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tempTable, nil
|
||||
}
|
||||
|
||||
type TempTable struct {
|
||||
tableName string
|
||||
dbName string
|
||||
pkSch sql.PrimaryKeySchema
|
||||
func setTempTableRoot(t *TempTable) func(ctx *sql.Context, dbName string, newRoot *doltdb.RootValue) error {
|
||||
return func(ctx *sql.Context, dbName string, newRoot *doltdb.RootValue) error {
|
||||
newTable, _, err := newRoot.GetTable(ctx, t.tableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
table *doltdb.Table
|
||||
sch schema.Schema
|
||||
t.table = newTable
|
||||
|
||||
lookup sql.IndexLookup
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
|
||||
ed editor.TableEditor
|
||||
opts editor.Options
|
||||
dbState, ok, err := sess.LookupDbState(ctx, t.dbName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("database %s not found in session", t.dbName)
|
||||
}
|
||||
|
||||
ws := dbState.WorkingSet
|
||||
newWs := ws.WithWorkingRoot(newRoot)
|
||||
|
||||
gs := globalstate.NewGlobalStateStore()
|
||||
ait, err := gs.GetAutoIncrementTracker(ctx, newWs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writeSession := writer.NewWriteSession(newTable.Format(), newWs, ait, t.opts)
|
||||
t.ed, err = writeSession.GetTableWriter(ctx, t.tableName, t.dbName, setTempTableRoot(t), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var _ sql.TemporaryTable = &TempTable{}
|
||||
var _ sql.Table = &TempTable{}
|
||||
var _ sql.PrimaryKeyTable = &TempTable{}
|
||||
var _ sql.IndexedTable = &TempTable{}
|
||||
var _ sql.IndexAlterableTable = &TempTable{}
|
||||
var _ sql.ForeignKeyTable = &TempTable{}
|
||||
var _ sql.CheckTable = &TempTable{}
|
||||
var _ sql.CheckAlterableTable = &TempTable{}
|
||||
var _ sql.StatisticsTable = &TempTable{}
|
||||
|
||||
func (t *TempTable) GetIndexes(ctx *sql.Context) ([]sql.Index, error) {
|
||||
return index.DoltIndexesFromTable(ctx, t.dbName, t.tableName, t.table)
|
||||
}
|
||||
@@ -309,55 +380,15 @@ func (t *TempTable) DropCheck(ctx *sql.Context, chName string) error {
|
||||
}
|
||||
|
||||
func (t *TempTable) Insert(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
vrw := t.table.ValueReadWriter()
|
||||
if !schema.IsKeyless(t.sch) {
|
||||
k, v, tagToVal, err := sqlutil.DoltKeyValueAndMappingFromSqlRow(ctx, vrw, sqlRow, t.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.ed.InsertKeyVal(ctx, k, v, tagToVal, t.duplicateKeyErrFunc)
|
||||
}
|
||||
dRow, err := sqlutil.SqlRowToDoltRow(ctx, vrw, sqlRow, t.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.ed.InsertRow(ctx, dRow, t.duplicateKeyErrFunc)
|
||||
return t.ed.Insert(ctx, sqlRow)
|
||||
}
|
||||
|
||||
func (t *TempTable) Update(ctx *sql.Context, oldRow sql.Row, newRow sql.Row) error {
|
||||
vrw := t.table.ValueReadWriter()
|
||||
dOldRow, err := sqlutil.SqlRowToDoltRow(ctx, vrw, oldRow, t.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dNewRow, err := sqlutil.SqlRowToDoltRow(ctx, vrw, newRow, t.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.ed.UpdateRow(ctx, dOldRow, dNewRow, t.duplicateKeyErrFunc)
|
||||
return t.ed.Update(ctx, oldRow, newRow)
|
||||
}
|
||||
|
||||
func (t *TempTable) Delete(ctx *sql.Context, sqlRow sql.Row) error {
|
||||
vrw := t.table.ValueReadWriter()
|
||||
if !schema.IsKeyless(t.sch) {
|
||||
k, tagToVal, err := sqlutil.DoltKeyAndMappingFromSqlRow(ctx, vrw, sqlRow, t.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.ed.DeleteByKey(ctx, k, tagToVal)
|
||||
} else {
|
||||
dRow, err := sqlutil.SqlRowToDoltRow(ctx, vrw, sqlRow, t.sch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.ed.DeleteRow(ctx, dRow)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TempTable) duplicateKeyErrFunc(keyString, indexName string, k, v types.Tuple, isPk bool) error {
|
||||
// todo: improve error msg
|
||||
return sql.NewUniqueKeyErr(keyString, isPk, nil)
|
||||
return t.ed.Delete(ctx, sqlRow)
|
||||
}
|
||||
|
||||
func (t *TempTable) StatementBegin(ctx *sql.Context) {
|
||||
@@ -375,19 +406,9 @@ func (t *TempTable) StatementComplete(ctx *sql.Context) error {
|
||||
}
|
||||
|
||||
func (t *TempTable) Close(ctx *sql.Context) error {
|
||||
tbl, err := t.ed.Table(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.table = tbl
|
||||
|
||||
if err = t.ed.Close(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
err := t.ed.Close(ctx)
|
||||
|
||||
t.lookup = nil
|
||||
t.ed, err = editor.NewTableEditor(ctx, tbl, t.sch, t.tableName, t.opts)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -56,13 +56,10 @@ func (n prollyFkIndexer) Partitions(ctx *sql.Context) (sql.PartitionIter, error)
|
||||
// PartitionRows implements the interface sql.Table.
|
||||
func (n prollyFkIndexer) PartitionRows(ctx *sql.Context, _ sql.Partition) (sql.RowIter, error) {
|
||||
var idxWriter indexWriter
|
||||
for _, secondaryWriter := range n.writer.secondary {
|
||||
if secondaryWriter.Name() == n.index.ID() {
|
||||
idxWriter = secondaryWriter
|
||||
break
|
||||
}
|
||||
}
|
||||
if idxWriter == nil {
|
||||
var ok bool
|
||||
if n.index.IsPrimaryKey() {
|
||||
idxWriter = n.writer.primary
|
||||
} else if idxWriter, ok = n.writer.secondary[n.index.ID()]; !ok {
|
||||
return nil, fmt.Errorf("unable to find writer for index `%s`", n.index.ID())
|
||||
}
|
||||
|
||||
@@ -173,8 +170,8 @@ func (iter prollyFkKeylessRowIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
hashId := k.GetField(k.Count() - 1)
|
||||
iter.primary.valBld.PutHash128(0, hashId)
|
||||
primaryKey := iter.primary.valBld.Build(sharePool)
|
||||
iter.primary.keyBld.PutHash128(0, hashId)
|
||||
primaryKey := iter.primary.keyBld.Build(sharePool)
|
||||
|
||||
nextRow := make(sql.Row, len(iter.primary.valMap))
|
||||
err = iter.primary.mut.Get(ctx, primaryKey, func(tblKey, tblVal val.Tuple) error {
|
||||
|
||||
@@ -56,11 +56,12 @@ func getPrimaryKeylessProllyWriter(ctx context.Context, t *doltdb.Table, sqlSch
|
||||
|
||||
m := durable.ProllyMapFromIndex(idx)
|
||||
|
||||
_, valDesc := m.Descriptors()
|
||||
keyDesc, valDesc := m.Descriptors()
|
||||
_, valMap := ordinalMappingsFromSchema(sqlSch, sch)
|
||||
|
||||
return prollyKeylessWriter{
|
||||
mut: m.Mutate(),
|
||||
keyBld: val.NewTupleBuilder(keyDesc),
|
||||
valBld: val.NewTupleBuilder(valDesc),
|
||||
valMap: valMap,
|
||||
}, nil
|
||||
@@ -238,6 +239,7 @@ type prollyKeylessWriter struct {
|
||||
name string
|
||||
mut prolly.MutableMap
|
||||
|
||||
keyBld *val.TupleBuilder
|
||||
valBld *val.TupleBuilder
|
||||
valMap val.OrdinalMapping
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ type prollyTableWriter struct {
|
||||
dbName string
|
||||
|
||||
primary indexWriter
|
||||
secondary []indexWriter
|
||||
secondary map[string]indexWriter
|
||||
|
||||
tbl *doltdb.Table
|
||||
sch schema.Schema
|
||||
@@ -54,17 +54,18 @@ type prollyTableWriter struct {
|
||||
|
||||
var _ TableWriter = &prollyTableWriter{}
|
||||
|
||||
func getSecondaryProllyIndexWriters(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema) ([]indexWriter, error) {
|
||||
func getSecondaryProllyIndexWriters(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema) (map[string]indexWriter, error) {
|
||||
s, err := t.GetIndexSet(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
definitions := sch.Indexes().AllIndexes()
|
||||
writers := make([]indexWriter, len(definitions))
|
||||
writers := make(map[string]indexWriter)
|
||||
|
||||
for i, def := range definitions {
|
||||
idxRows, err := s.GetIndex(ctx, sch, def.Name())
|
||||
for _, def := range definitions {
|
||||
defName := def.Name()
|
||||
idxRows, err := s.GetIndex(ctx, sch, defName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -73,8 +74,8 @@ func getSecondaryProllyIndexWriters(ctx context.Context, t *doltdb.Table, sqlSch
|
||||
keyMap, valMap := ordinalMappingsFromSchema(sqlSch, def.Schema())
|
||||
keyDesc, valDesc := m.Descriptors()
|
||||
|
||||
writers[i] = prollyIndexWriter{
|
||||
name: def.Name(),
|
||||
writers[defName] = prollyIndexWriter{
|
||||
name: defName,
|
||||
mut: m.Mutate(),
|
||||
keyBld: val.NewTupleBuilder(keyDesc),
|
||||
keyMap: keyMap,
|
||||
@@ -86,17 +87,18 @@ func getSecondaryProllyIndexWriters(ctx context.Context, t *doltdb.Table, sqlSch
|
||||
return writers, nil
|
||||
}
|
||||
|
||||
func getSecondaryKeylessProllyWriters(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema, primary prollyKeylessWriter) ([]indexWriter, error) {
|
||||
func getSecondaryKeylessProllyWriters(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema, primary prollyKeylessWriter) (map[string]indexWriter, error) {
|
||||
s, err := t.GetIndexSet(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
definitions := sch.Indexes().AllIndexes()
|
||||
writers := make([]indexWriter, len(definitions))
|
||||
writers := make(map[string]indexWriter)
|
||||
|
||||
for i, def := range definitions {
|
||||
idxRows, err := s.GetIndex(ctx, sch, def.Name())
|
||||
for _, def := range definitions {
|
||||
defName := def.Name()
|
||||
idxRows, err := s.GetIndex(ctx, sch, defName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,8 +108,8 @@ func getSecondaryKeylessProllyWriters(ctx context.Context, t *doltdb.Table, sqlS
|
||||
keyMap, valMap := ordinalMappingsFromSchema(sqlSch, def.Schema())
|
||||
keyDesc, valDesc := m.Descriptors()
|
||||
|
||||
writers[i] = prollyKeylessSecondaryWriter{
|
||||
name: def.Name(),
|
||||
writers[defName] = prollyKeylessSecondaryWriter{
|
||||
name: defName,
|
||||
mut: m.Mutate(),
|
||||
primary: primary,
|
||||
unique: def.IsUnique(),
|
||||
@@ -260,7 +262,7 @@ func (w *prollyTableWriter) Reset(ctx context.Context, sess *prollyWriteSession,
|
||||
aiCol := autoIncrementColFromSchema(sch)
|
||||
var newPrimary indexWriter
|
||||
|
||||
var newSecondaries []indexWriter
|
||||
var newSecondaries map[string]indexWriter
|
||||
if schema.IsKeyless(sch) {
|
||||
newPrimary, err = getPrimaryKeylessProllyWriter(ctx, tbl, sqlSch.Schema, sch)
|
||||
if err != nil {
|
||||
|
||||
@@ -66,7 +66,7 @@ func (s *prollyWriteSession) GetTableWriter(ctx context.Context, table, db strin
|
||||
autoCol := autoIncrementColFromSchema(sch)
|
||||
|
||||
var pw indexWriter
|
||||
var sws []indexWriter
|
||||
var sws map[string]indexWriter
|
||||
if schema.IsKeyless(sch) {
|
||||
pw, err = getPrimaryKeylessProllyWriter(ctx, t, pkSch.Schema, sch)
|
||||
if err != nil {
|
||||
|
||||
45
go/libraries/doltcore/sqlserver/dolt_server.go
Normal file
45
go/libraries/doltcore/sqlserver/dolt_server.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2022 Dolthub, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/server"
|
||||
)
|
||||
|
||||
var mySQLServer *server.Server
|
||||
var mySQLServerMutex sync.Mutex
|
||||
|
||||
// RunningInServerMode returns true if the current process is running a SQL server.
|
||||
func RunningInServerMode() bool {
|
||||
mySQLServerMutex.Lock()
|
||||
defer mySQLServerMutex.Unlock()
|
||||
return mySQLServer != nil
|
||||
}
|
||||
|
||||
// GetRunningServer returns the Server instance running in this process, or nil if no SQL server is running.
|
||||
func GetRunningServer() *server.Server {
|
||||
mySQLServerMutex.Lock()
|
||||
defer mySQLServerMutex.Unlock()
|
||||
return mySQLServer
|
||||
}
|
||||
|
||||
// SetRunningServer sets the specified Server as the running SQL server for this process.
|
||||
func SetRunningServer(server *server.Server) {
|
||||
mySQLServerMutex.Lock()
|
||||
defer mySQLServerMutex.Unlock()
|
||||
mySQLServer = server
|
||||
}
|
||||
@@ -5,6 +5,7 @@ set -o pipefail
|
||||
SYSBENCH_TEST="oltp_point_select"
|
||||
WORKING_DIR=`mktemp -d`
|
||||
PPROF=0
|
||||
PORT=3366
|
||||
|
||||
# parse options
|
||||
# superuser.com/questions/186272/
|
||||
@@ -13,10 +14,10 @@ do
|
||||
case "$1" in
|
||||
|
||||
# benchmark with new NomsBinFmt
|
||||
--new-nbf) export DOLT_FORMAT_FEATURE_FLAG=true
|
||||
--new-nbf) export DOLT_DEFAULT_BIN_FORMAT="__DOLT_1__"
|
||||
;;
|
||||
|
||||
--new-new) export DOLT_FORMAT_FEATURE_FLAG=true &&
|
||||
--new-new) export DOLT_DEFAULT_BIN_FORMAT="__DOLT_1__" &&
|
||||
export ENABLE_ROW_ITER_2=true
|
||||
;;
|
||||
|
||||
@@ -61,7 +62,7 @@ user:
|
||||
|
||||
listener:
|
||||
host: "0.0.0.0"
|
||||
port: 3306
|
||||
port: $PORT
|
||||
max_connections: 128
|
||||
read_timeout_millis: 28800000
|
||||
write_timeout_millis: 28800000
|
||||
@@ -87,7 +88,9 @@ echo "benchmark $SYSBENCH_TEST bootstrapping at $WORKING_DIR"
|
||||
|
||||
sleep 1
|
||||
sysbench \
|
||||
--db-driver="mysql" \
|
||||
--mysql-host="0.0.0.0" \
|
||||
--mysql-port="$PORT" \
|
||||
--mysql-user="user" \
|
||||
--mysql-password="pass" \
|
||||
"$SYSBENCH_TEST" prepare
|
||||
@@ -109,13 +112,15 @@ sleep 1
|
||||
echo "benchmark $SYSBENCH_TEST starting at $WORKING_DIR"
|
||||
|
||||
sysbench \
|
||||
--db-driver="mysql" \
|
||||
--mysql-host="0.0.0.0" \
|
||||
--mysql-port="$PORT" \
|
||||
--mysql-user="user" \
|
||||
--mysql-password="pass" \
|
||||
--db-ps-mode=disable \
|
||||
"$SYSBENCH_TEST" run
|
||||
|
||||
unset DOLT_FORMAT_FEATURE_FLAG
|
||||
unset DOLT_DEFAULT_BIN_FORMAT
|
||||
unset ENABLE_ROW_ITER_2
|
||||
unset SINGLE_THREAD_FEATURE_FLAG
|
||||
unset GOMAXPROCS
|
||||
|
||||
@@ -279,8 +279,10 @@ type Config struct {
|
||||
TestOptions []string
|
||||
// ScriptDir is a path to a directory of lua scripts
|
||||
ScriptDir string
|
||||
// DirtyClone downloads a database with existing chunks and commits
|
||||
// InitBigRepo downloads a database with existing chunks and commits
|
||||
InitBigRepo bool
|
||||
// NomsBinFormat specifies the NomsBinFormat
|
||||
NomsBinFormat string
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config
|
||||
|
||||
@@ -32,6 +32,7 @@ const (
|
||||
dbName = "test"
|
||||
luaPath = "?.lua"
|
||||
bigEmptyRepo = "max-hoffman/big-empty"
|
||||
nbfEnvVar = "DOLT_DEFAULT_BIN_FORMAT"
|
||||
)
|
||||
|
||||
var stampFunc = func() string { return time.Now().UTC().Format(stampFormat) }
|
||||
@@ -50,7 +51,7 @@ func BenchmarkDolt(ctx context.Context, config *Config, serverConfig *ServerConf
|
||||
return nil, err
|
||||
}
|
||||
|
||||
testRepo, err := initDoltRepo(ctx, serverConfig, config.InitBigRepo)
|
||||
testRepo, err := initDoltRepo(ctx, serverConfig, config.InitBigRepo, config.NomsBinFormat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,7 +134,7 @@ func doltVersion(ctx context.Context, config *ServerConfig) error {
|
||||
}
|
||||
|
||||
// initDoltRepo initializes a dolt repo and returns the repo path
|
||||
func initDoltRepo(ctx context.Context, config *ServerConfig, initBigRepo bool) (string, error) {
|
||||
func initDoltRepo(ctx context.Context, config *ServerConfig, initBigRepo bool, nbf string) (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -152,6 +153,12 @@ func initDoltRepo(ctx context.Context, config *ServerConfig, initBigRepo bool) (
|
||||
return "", err
|
||||
}
|
||||
|
||||
if nbf != "" {
|
||||
if err = os.Setenv(nbfEnvVar, nbf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
doltInit := ExecCommand(ctx, config.ServerExec, "init")
|
||||
doltInit.Dir = testRepo
|
||||
err = doltInit.Run()
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/pool"
|
||||
"github.com/dolthub/dolt/go/store/prolly/message"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
@@ -115,6 +116,11 @@ func (c ConflictMap) IterOrdinalRange(ctx context.Context, start, stop uint64) (
|
||||
return c.conflicts.iterOrdinalRange(ctx, start, stop)
|
||||
}
|
||||
|
||||
// Pool returns the pool.BuffPool of the underlying conflicts' tree.NodeStore
|
||||
func (c ConflictMap) Pool() pool.BuffPool {
|
||||
return c.conflicts.ns.Pool()
|
||||
}
|
||||
|
||||
func (c ConflictMap) Editor() ConflictEditor {
|
||||
return ConflictEditor{
|
||||
conflicts: c.conflicts.mutate(),
|
||||
|
||||
@@ -245,36 +245,48 @@ SQL
|
||||
[ "$output" = "$mainhash" ]
|
||||
}
|
||||
|
||||
@test "sql-branch: asserts unsupported -m, -d, -D flags" {
|
||||
@test "sql-branch: SELECT DOLT_BRANCH to rename and delete" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
dolt branch new_branch
|
||||
|
||||
run dolt sql -q "SELECT DOLT_BRANCH('-m', 'new_branch', 'changed');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Renaming a branch is not supported." ]] || false
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT DOLT_BRANCH('-d', 'new_branch');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Deleting branches is not supported." ]] || false
|
||||
run dolt sql -q "SELECT DOLT_BRANCH('-d', 'changed');"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "SELECT DOLT_BRANCH('-D', 'new_branch');"
|
||||
dolt branch branch_with_unpushed_commit
|
||||
dolt checkout branch_with_unpushed_commit
|
||||
dolt commit --allow-empty -am 'empty commit'
|
||||
dolt checkout main
|
||||
|
||||
run dolt sql -q "SELECT DOLT_BRANCH('-d', 'branch_with_unpushed_commit');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Deleting branches is not supported." ]] || false
|
||||
[[ "$output" =~ "attempted to delete a branch that is not fully merged" ]] || false
|
||||
|
||||
run dolt sql -q "SELECT DOLT_BRANCH('-D', 'branch_with_unpushed_commit');"
|
||||
[ $status -eq 0 ]
|
||||
}
|
||||
|
||||
@test "sql-branch: asserts unsupported -m, -d, -D flags on CALL" {
|
||||
@test "sql-branch: CALL DOLT_BRANCH to rename and delete" {
|
||||
dolt add . && dolt commit -m "1, 2, and 3 in test table"
|
||||
dolt branch new_branch
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-m', 'new_branch', 'changed');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Renaming a branch is not supported." ]] || false
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-d', 'new_branch');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Deleting branches is not supported." ]] || false
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-d', 'changed');"
|
||||
[ $status -eq 0 ]
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-D', 'new_branch');"
|
||||
dolt branch branch_with_unpushed_commit
|
||||
dolt checkout branch_with_unpushed_commit
|
||||
dolt commit --allow-empty -am 'empty commit'
|
||||
dolt checkout main
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-d', 'branch_with_unpushed_commit');"
|
||||
[ $status -eq 1 ]
|
||||
[[ "$output" =~ "Deleting branches is not supported." ]] || false
|
||||
[[ "$output" =~ "attempted to delete a branch that is not fully merged" ]] || false
|
||||
|
||||
run dolt sql -q "CALL DOLT_BRANCH('-D', 'branch_with_unpushed_commit');"
|
||||
[ $status -eq 0 ]
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ load $BATS_TEST_DIRNAME/helper/common.bash
|
||||
|
||||
setup() {
|
||||
setup_common
|
||||
skip_nbf_dolt_1
|
||||
}
|
||||
|
||||
teardown() {
|
||||
@@ -582,9 +581,9 @@ SQL
|
||||
}
|
||||
|
||||
@test "sql-create-tables: Alter on a temporary table" {
|
||||
skip "unskip once DDL operations are moved to the SQL engine"
|
||||
|
||||
run dolt sql <<SQL
|
||||
skip "cannot alter temporary tables"
|
||||
|
||||
dolt sql <<SQL
|
||||
CREATE TEMPORARY TABLE goodtable(pk int PRIMARY KEY);
|
||||
ALTER TABLE goodtable ADD COLUMN val int;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user