Merge branch 'main' into fulghum/orm-testing

This commit is contained in:
Jason Fulghum
2022-11-09 11:36:36 -08:00
167 changed files with 3629 additions and 4385 deletions
+5 -4
View File
@@ -55,7 +55,7 @@ jobs:
- name: Setup Python 3.x
uses: actions/setup-python@v4
with:
python-version: ^3.6
python-version: "3.10"
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
@@ -104,10 +104,11 @@ jobs:
id: parquet_cli
working-directory: ./.ci_bin
run: |
git clone https://github.com/apache/parquet-mr.git
cd parquet-mr/parquet-cli
curl -OL https://github.com/apache/parquet-mr/archive/refs/tags/apache-parquet-1.12.3.tar.gz
tar zxvf apache-parquet-1.12.3.tar.gz
cd parquet-mr-apache-parquet-1.12.3/parquet-cli
mvn clean install -DskipTests
runtime_jar="$(pwd)"/target/parquet-cli-1.13.0-SNAPSHOT-runtime.jar
runtime_jar="$(pwd)"/target/parquet-cli-1.12.3-runtime.jar
echo "runtime_jar=$runtime_jar" >> $GITHUB_OUTPUT
- name: Check expect
run: expect -v
+1 -1
View File
@@ -100,7 +100,7 @@ jobs:
- name: Setup Python 3.x
uses: actions/setup-python@v4
with:
python-version: ^3.6
python-version: "3.10"
- uses: actions/checkout@v3
if: ${{ github.event_name == 'repository_dispatch' }}
with:
+10
View File
@@ -0,0 +1,10 @@
name: Import Benchmarks
on:
workflow_dispatch:
jobs:
test:
name: Import benchmarks
steps:
- name: Stub
run: echo hello
-807
View File
@@ -868,33 +868,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= git.sr.ht/~sbinet/gg licensed under: =
Copyright (C) 2022 The gg Authors
Copyright (C) 2016 Michael Fogleman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
= LICENSE.md 6ce9a5e176cfbd7286201308cde581cd0074888742a82ceffeceacee =
================================================================================
================================================================================
= github.com/HdrHistogram/hdrhistogram-go licensed under: =
@@ -952,233 +925,6 @@ SOFTWARE.
= LICENSE 776df548a1197b2f7e08bb8af22033f154591c7960101bba3438812b =
================================================================================
================================================================================
= github.com/ajstarks/svgo licensed under: =
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree to
be bound by the terms and conditions of this Creative Commons Attribution
4.0 International Public License ("Public License"). To the extent this
Public License may be interpreted as a contract, You are granted the
Licensed Rights in consideration of Your acceptance of these terms and
conditions, and the Licensor grants You such rights in consideration
of benefits the Licensor receives from making the Licensed Material
available under these terms and conditions.
Section 1 Definitions.
Adapted Material means material subject to Copyright and Similar Rights
that is derived from or based upon the Licensed Material and in which
the Licensed Material is translated, altered, arranged, transformed, or
otherwise modified in a manner requiring permission under the Copyright
and Similar Rights held by the Licensor. For purposes of this Public
License, where the Licensed Material is a musical work, performance,
or sound recording, Adapted Material is always produced where the
Licensed Material is synched in timed relation with a moving image.
Adapter's License means the license You apply to Your Copyright and
Similar Rights in Your contributions to Adapted Material in accordance
with the terms and conditions of this Public License. Copyright and
Similar Rights means copyright and/or similar rights closely related to
copyright including, without limitation, performance, broadcast, sound
recording, and Sui Generis Database Rights, without regard to how the
rights are labeled or categorized. For purposes of this Public License,
the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights. Effective Technological Measures means those measures that,
in the absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright Treaty
adopted on December 20, 1996, and/or similar international agreements.
Exceptions and Limitations means fair use, fair dealing, and/or any other
exception or limitation to Copyright and Similar Rights that applies to
Your use of the Licensed Material. Licensed Material means the artistic
or literary work, database, or other material to which the Licensor
applied this Public License. Licensed Rights means the rights granted
to You subject to the terms and conditions of this Public License, which
are limited to all Copyright and Similar Rights that apply to Your use
of the Licensed Material and that the Licensor has authority to license.
Licensor means the individual(s) or entity(ies) granting rights under
this Public License. Share means to provide material to the public by
any means or process that requires permission under the Licensed Rights,
such as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the public
may access the material from a place and at a time individually chosen
by them. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of the
Council of 11 March 1996 on the legal protection of databases, as amended
and/or succeeded, as well as other essentially equivalent rights anywhere
in the world. You means the individual or entity exercising the Licensed
Rights under this Public License. Your has a corresponding meaning.
Section 2 Scope.
License grant. Subject to the terms and conditions of this Public
License, the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to exercise the
Licensed Rights in the Licensed Material to: reproduce and Share the
Licensed Material, in whole or in part; and produce, reproduce, and
Share Adapted Material. Exceptions and Limitations. For the avoidance
of doubt, where Exceptions and Limitations apply to Your use, this
Public License does not apply, and You do not need to comply with
its terms and conditions. Term. The term of this Public License is
specified in Section 6(a). Media and formats; technical modifications
allowed. The Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created, and to make
technical modifications necessary to do so. The Licensor waives and/or
agrees not to assert any right or authority to forbid You from making
technical modifications necessary to exercise the Licensed Rights,
including technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License, simply making
modifications authorized by this Section 2(a)(4) never produces Adapted
Material. Downstream recipients. Offer from the Licensor Licensed
Material. Every recipient of the Licensed Material automatically receives
an offer from the Licensor to exercise the Licensed Rights under the terms
and conditions of this Public License. No downstream restrictions. You
may not offer or impose any additional or different terms or conditions
on, or apply any Effective Technological Measures to, the Licensed
Material if doing so restricts exercise of the Licensed Rights by any
recipient of the Licensed Material. No endorsement. Nothing in this
Public License constitutes or may be construed as permission to assert
or imply that You are, or that Your use of the Licensed Material is,
connected with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as provided in
Section 3(a)(1)(A)(i). Other rights.
Moral rights, such as the right of integrity, are not licensed under
this Public License, nor are publicity, privacy, and/or other similar
personality rights; however, to the extent possible, the Licensor waives
and/or agrees not to assert any such rights held by the Licensor to the
limited extent necessary to allow You to exercise the Licensed Rights, but
not otherwise. Patent and trademark rights are not licensed under this
Public License. To the extent possible, the Licensor waives any right
to collect royalties from You for the exercise of the Licensed Rights,
whether directly or through a collecting society under any voluntary or
waivable statutory or compulsory licensing scheme. In all other cases
the Licensor expressly reserves any right to collect such royalties.
Section 3 License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
Attribution.
If You Share the Licensed Material (including in modified form), You must:
retain the following if it is supplied by the Licensor with the Licensed
Material: identification of the creator(s) of the Licensed Material and
any others designated to receive attribution, in any reasonable manner
requested by the Licensor (including by pseudonym if designated); a
copyright notice; a notice that refers to this Public License; a notice
that refers to the disclaimer of warranties; a URI or hyperlink to the
Licensed Material to the extent reasonably practicable; indicate if You
modified the Licensed Material and retain an indication of any previous
modifications; and indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or hyperlink to,
this Public License. You may satisfy the conditions in Section 3(a)(1)
in any reasonable manner based on the medium, means, and context in which
You Share the Licensed Material. For example, it may be reasonable to
satisfy the conditions by providing a URI or hyperlink to a resource
that includes the required information. If requested by the Licensor,
You must remove any of the information required by Section 3(a)(1)(A)
to the extent reasonably practicable. If You Share Adapted Material You
produce, the Adapter's License You apply must not prevent recipients of
the Adapted Material from complying with this Public License. Section 4
Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that apply
to Your use of the Licensed Material:
for the avoidance of doubt, Section 2(a)(1) grants You the right to
extract, reuse, reproduce, and Share all or a substantial portion of the
contents of the database; if You include all or a substantial portion of
the database contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database Rights
(but not its individual contents) is Adapted Material; and You must comply
with the conditions in Section 3(a) if You Share all or a substantial
portion of the contents of the database. For the avoidance of doubt,
this Section 4 supplements and does not replace Your obligations under
this Public License where the Licensed Rights include other Copyright and
Similar Rights. Section 5 Disclaimer of Warranties and Limitation
of Liability.
Unless otherwise separately undertaken by the Licensor, to the
extent possible, the Licensor offers the Licensed Material as-is and
as-available, and makes no representations or warranties of any kind
concerning the Licensed Material, whether express, implied, statutory,
or other. This includes, without limitation, warranties of title,
merchantability, fitness for a particular purpose, non-infringement,
absence of latent or other defects, accuracy, or the presence or absence
of errors, whether or not known or discoverable. Where disclaimers of
warranties are not allowed in full or in part, this disclaimer may not
apply to You. To the extent possible, in no event will the Licensor
be liable to You on any legal theory (including, without limitation,
negligence) or otherwise for any direct, special, indirect, incidental,
consequential, punitive, exemplary, or other losses, costs, expenses,
or damages arising out of this Public License or use of the Licensed
Material, even if the Licensor has been advised of the possibility of
such losses, costs, expenses, or damages. Where a limitation of liability
is not allowed in full or in part, this limitation may not apply to You.
The disclaimer of warranties and limitation of liability provided above
shall be interpreted in a manner that, to the extent possible, most
closely approximates an absolute disclaimer and waiver of all liability.
Section 6 Term and Termination.
This Public License applies for the term of the Copyright and Similar
Rights licensed here. However, if You fail to comply with this
Public License, then Your rights under this Public License terminate
automatically. Where Your right to use the Licensed Material has
terminated under Section 6(a), it reinstates:
automatically as of the date the violation is cured, provided it is
cured within 30 days of Your discovery of the violation; or upon express
reinstatement by the Licensor. For the avoidance of doubt, this Section
6(b) does not affect any right the Licensor may have to seek remedies
for Your violations of this Public License. For the avoidance of doubt,
the Licensor may also offer the Licensed Material under separate terms
or conditions or stop distributing the Licensed Material at any time;
however, doing so will not terminate this Public License. Sections 1,
5, 6, 7, and 8 survive termination of this Public License. Section 7
Other Terms and Conditions.
The Licensor shall not be bound by any additional or different terms or
conditions communicated by You unless expressly agreed. Any arrangements,
understandings, or agreements regarding the Licensed Material not stated
herein are separate from and independent of the terms and conditions of
this Public License. Section 8 Interpretation.
For the avoidance of doubt, this Public License does not, and shall not be
interpreted to, reduce, limit, restrict, or impose conditions on any use
of the Licensed Material that could lawfully be made without permission
under this Public License. To the extent possible, if any provision of
this Public License is deemed unenforceable, it shall be automatically
reformed to the minimum extent necessary to make it enforceable. If
the provision cannot be reformed, it shall be severed from this Public
License without affecting the enforceability of the remaining terms
and conditions. No term or condition of this Public License will be
waived and no failure to comply consented to unless expressly agreed
to by the Licensor. Nothing in this Public License constitutes or may
be interpreted as a limitation upon, or waiver of, any privileges and
immunities that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority. Creative Commons is not
a party to its public licenses. Notwithstanding, Creative Commons may
elect to apply one of its public licenses to material it publishes and
in those instances will be considered the “Licensor.” The text of
the Creative Commons public licenses is dedicated to the public domain
under the CC0 Public Domain Dedication. Except for the limited purpose of
indicating that material is shared under a Creative Commons public license
or as otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark “Creative Commons” or any other trademark or
logo of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements, understandings,
or agreements concerning use of licensed material. For the avoidance of
doubt, this paragraph does not form part of the public licenses.
Creative Commons may be contacted at creativecommons.org.
= LICENSE 6970290672b172c6ac6ce9de62f8fd574d48de35e68b48af45dc4edc =
================================================================================
================================================================================
= github.com/aliyun/aliyun-oss-go-sdk licensed under: =
@@ -1890,28 +1636,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= LICENSE.txt 726f1b8f64f7e439b1b12c7cbde7b1427752a00ddea15019e4156465 =
================================================================================
================================================================================
= github.com/davecgh/go-spew licensed under: =
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
= LICENSE 1df7eb862ea59e064cc5b27e5d88aad979fad02e3755973892829af8 =
================================================================================
================================================================================
= github.com/denisbrodbeck/machineid licensed under: =
@@ -2613,38 +2337,6 @@ SOFTWARE.
= LICENSE a33ad37999b0aa5d38b8bc56a9c6b2d6287a7e2478ee822af7fa7a11 =
================================================================================
================================================================================
= github.com/dolthub/mmap-go licensed under: =
Copyright (c) 2011, Evan Shaw <edsrzf@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 086af8ff5be785cbd4da914acec46f45197c2b0fd3b370cd140cedd3 =
================================================================================
================================================================================
= github.com/dolthub/vitess licensed under: =
@@ -3118,36 +2810,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= COPYING 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= github.com/go-fonts/liberation licensed under: =
Copyright ©2020 The go-fonts Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the go-fonts project nor the names of its authors and
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ec29fd02e2b1520da7df79262d6a977cb3986924353ae24b61affe9b =
================================================================================
================================================================================
= github.com/go-kit/kit licensed under: =
@@ -3177,36 +2839,6 @@ SOFTWARE.
= LICENSE 517fd017ba968d4bdbe3905b55314df7ea5e83d9d7422365dcee5566 =
================================================================================
================================================================================
= github.com/go-latex/latex licensed under: =
Copyright ©2020 The go-latex Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the go-latex project nor the names of its authors and
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 23be5c2f34988d946e85585e1099e63ff0962030b9612f582a793cda =
================================================================================
================================================================================
= github.com/go-logr/logr licensed under: =
@@ -3623,35 +3255,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 41cbff0d41b7d20dd9d70de1e0380fdca6ec1f42d2533c75c5c1bec3 =
================================================================================
================================================================================
= github.com/go-pdf/fpdf licensed under: =
MIT License
Copyright (c) 2020 David Barnes
Copyright (c) 2017 Kurt Jung and contributors acknowledged in the documentation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
= LICENSE 58283e72fa52eb34341bbd16492272bca575265e4b3a0c769f3aa978 =
================================================================================
================================================================================
= github.com/go-sql-driver/mysql licensed under: =
@@ -4059,25 +3662,6 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= LICENSE 58527ba2d199f39270f67bbe7b7878b370704ff4f3b715a9f7fa667b =
================================================================================
================================================================================
= github.com/golang/freetype licensed under: =
Use of the Freetype-Go software is subject to your choice of exactly one of
the following two licenses:
* The FreeType License, which is similar to the original BSD license with
an advertising clause, or
* The GNU General Public License (GPL), version 2 or later.
The text of these licenses are available in the licenses/ftl.txt and the
licenses/gpl.txt files respectively. They are also available at
http://freetype.sourceforge.net/license.html
The Luxi fonts in the testdata directory are licensed separately. See the
testdata/COPYING file for details.
= LICENSE 6c5ae159496bacd951e6cd937d5e6427b172a2a6284bfdf1954ae338 =
================================================================================
================================================================================
= github.com/golang/groupcache licensed under: =
@@ -4345,40 +3929,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 23da488bf4950b37bebc2eaa6de2a09e2301a6f4ed5ae2cd648aad9d =
================================================================================
================================================================================
= github.com/google/go-cmp licensed under: =
Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE bda64ae869be18b50125d9cfe5c370eb7248e84a2324823e4d7f2295 =
================================================================================
================================================================================
= github.com/google/uuid licensed under: =
@@ -5466,215 +5016,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 6681c42f6974591d2056518a26201323fa7d42bdc4d64bfc12c332b3 =
================================================================================
================================================================================
= github.com/pquerna/cachecontrol licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
================================================================================
================================================================================
= github.com/prometheus/client_golang licensed under: =
@@ -6681,34 +6022,6 @@ OTHER DEALINGS IN THE SOFTWARE.
= LICENSE 470f204648dd700d3c0229df525a9607693e6a3d9fd6422fe0212c62 =
================================================================================
================================================================================
= github.com/stretchr/testify licensed under: =
MIT License
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
= LICENSE 07f20b96549b71d39ebb2bf1e006f7b2885e3808423818000545119c =
================================================================================
================================================================================
= github.com/tealeg/xlsx licensed under: =
@@ -8374,40 +7687,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ed6066ae50f153e2965216c6d4b9335900f1f8b2b526527f49a619d7 =
================================================================================
================================================================================
= golang.org/x/image licensed under: =
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ed6066ae50f153e2965216c6d4b9335900f1f8b2b526527f49a619d7 =
================================================================================
================================================================================
= golang.org/x/net licensed under: =
@@ -8646,35 +7925,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE ed6066ae50f153e2965216c6d4b9335900f1f8b2b526527f49a619d7 =
================================================================================
================================================================================
= gonum.org/v1/plot licensed under: =
Copyright ©2013 The Gonum Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Gonum project nor the names of its authors and
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 318191149295a8b7a8dbac1ae8fb814d11997919e0484a5a995cb6ae =
================================================================================
================================================================================
= google.golang.org/api licensed under: =
@@ -9785,60 +9035,3 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= LICENSE 9820a37ca0fcacbc82c8eb2bdd3049706550a4ebf97ad7fde1310dec =
================================================================================
================================================================================
= gopkg.in/yaml.v3 licensed under: =
This project is covered by two different licenses: MIT and Apache.
#### MIT License ####
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original MIT license, with the additional
copyright staring in 2011 when the project was ported over:
apic.go emitterc.go parserc.go readerc.go scannerc.go
writerc.go yamlh.go yamlprivateh.go
Copyright (c) 2006-2010 Kirill Simonov
Copyright (c) 2006-2011 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
### Apache License ###
All the remaining project files are covered by the Apache license:
Copyright (c) 2011-2019 Canonical Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= LICENSE 1fcda9aa5c036a1d3975c8c4a007e1b3c05f0e450567d8bdb46a6d61 =
================================================================================
+1 -1
View File
@@ -302,7 +302,7 @@ func CreateLogArgParser() *argparser.ArgParser {
ap.SupportsFlag(ParentsFlag, "", "Shows all parents of each commit in the log.")
ap.SupportsString(DecorateFlag, "", "decorate_fmt", "Shows refs next to commits. Valid options are short, full, no, and auto")
ap.SupportsFlag(OneLineFlag, "", "Shows logs in a compact format.")
ap.SupportsString(NotFlag, "", "revision", "Excludes commits from revision.")
ap.SupportsStringList(NotFlag, "", "revision", "Excludes commits from revision.")
return ap
}
+1 -1
View File
@@ -40,7 +40,7 @@ import (
)
var commitDocs = cli.CommandDocumentationContent{
ShortDesc: "Record changes to the repository",
ShortDesc: "Record changes to the database",
LongDesc: `
Stores the current contents of the staged tables in a new commit along with a log message from the user describing the changes.
+2 -2
View File
@@ -124,7 +124,7 @@ func loadCred(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (creds.DoltCred
}
return dc, nil
} else {
dc, valid, err := dEnv.UserRPCCreds()
dc, valid, err := dEnv.UserDoltCreds()
if !valid {
return creds.EmptyCreds, errhand.BuildDError("error: no user credentials found").Build()
}
@@ -138,7 +138,7 @@ func loadCred(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (creds.DoltCred
func checkCredAndPrintSuccess(ctx context.Context, dEnv *env.DoltEnv, dc creds.DoltCreds, endpoint string) errhand.VerboseError {
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: endpoint,
Creds: dc,
Creds: dc.RPCCreds(),
})
if err != nil {
return errhand.BuildDError("error: unable to build server endpoint options.").AddCause(err).Build()
+1 -1
View File
@@ -163,7 +163,7 @@ func updateProfileWithCredentials(ctx context.Context, dEnv *env.DoltEnv, c cred
hostAndPort := fmt.Sprintf("%s:%s", host, port)
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: hostAndPort,
Creds: c,
Creds: c.RPCCreds(),
})
if err != nil {
return fmt.Errorf("error: unable to build dial options server with credentials: %w", err)
+1 -1
View File
@@ -93,7 +93,7 @@ func (cmd LsCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
}
func getJWKHandler(dEnv *env.DoltEnv) func(string, int64, bool) bool {
current, valid, _ := dEnv.UserRPCCreds()
current, valid, _ := dEnv.UserDoltCreds()
first := false
return func(path string, size int64, isDir bool) (stop bool) {
if strings.HasSuffix(path, creds.JWKFileExtension) {
+2 -2
View File
@@ -20,6 +20,6 @@ import (
var Commands = cli.NewSubCommandHandler("docs", "Commands for working with Dolt documents.", []cli.Command{
DiffCmd{},
WriteCmd{},
ReadCmd{},
PrintCmd{},
UploadCmd{},
})
+14 -14
View File
@@ -31,39 +31,39 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/argparser"
)
var readDocs = cli.CommandDocumentationContent{
ShortDesc: "Reads Dolt Docs from the file system into the database",
LongDesc: "Reads Dolt Docs from the file system into the database",
var uploadDocs = cli.CommandDocumentationContent{
ShortDesc: "Uploads Dolt Docs from the file system into the database",
LongDesc: "Uploads Dolt Docs from the file system into the database",
Synopsis: []string{
"{{.LessThan}}doc{{.GreaterThan}} {{.LessThan}}file{{.GreaterThan}}",
},
}
type ReadCmd struct{}
type UploadCmd struct{}
// Name implements cli.Command.
func (cmd ReadCmd) Name() string {
return "read"
func (cmd UploadCmd) Name() string {
return "upload"
}
// Description implements cli.Command.
func (cmd ReadCmd) Description() string {
return readDocs.ShortDesc
func (cmd UploadCmd) Description() string {
return uploadDocs.ShortDesc
}
// RequiresRepo implements cli.Command.
func (cmd ReadCmd) RequiresRepo() bool {
func (cmd UploadCmd) RequiresRepo() bool {
return true
}
// Docs implements cli.Command.
func (cmd ReadCmd) Docs() *cli.CommandDocumentation {
func (cmd UploadCmd) Docs() *cli.CommandDocumentation {
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(readDocs, ap)
return cli.NewCommandDocumentation(uploadDocs, ap)
}
// ArgParser implements cli.Command.
func (cmd ReadCmd) ArgParser() *argparser.ArgParser {
func (cmd UploadCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"doc", "Dolt doc name to be updated in the database."})
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"file", "file to read Dolt doc from."})
@@ -71,9 +71,9 @@ func (cmd ReadCmd) ArgParser() *argparser.ArgParser {
}
// Exec implements cli.Command.
func (cmd ReadCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
func (cmd UploadCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
ap := cmd.ArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, writeDocs, ap))
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, uploadDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() != 2 {
+14 -14
View File
@@ -30,48 +30,48 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/argparser"
)
var writeDocs = cli.CommandDocumentationContent{
ShortDesc: "Writes Dolt Docs to stdout",
LongDesc: "Writes Dolt Docs to stdout",
var printDocs = cli.CommandDocumentationContent{
ShortDesc: "Prints Dolt Docs to stdout",
LongDesc: "Prints Dolt Docs to stdout",
Synopsis: []string{
"{{.LessThan}}doc{{.GreaterThan}}",
},
}
type WriteCmd struct{}
type PrintCmd struct{}
// Name implements cli.Command.
func (cmd WriteCmd) Name() string {
return "write"
func (cmd PrintCmd) Name() string {
return "print"
}
// Description implements cli.Command.
func (cmd WriteCmd) Description() string {
return writeDocs.ShortDesc
func (cmd PrintCmd) Description() string {
return printDocs.ShortDesc
}
// RequiresRepo implements cli.Command.
func (cmd WriteCmd) RequiresRepo() bool {
func (cmd PrintCmd) RequiresRepo() bool {
return true
}
// Docs implements cli.Command.
func (cmd WriteCmd) Docs() *cli.CommandDocumentation {
func (cmd PrintCmd) Docs() *cli.CommandDocumentation {
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(writeDocs, ap)
return cli.NewCommandDocumentation(printDocs, ap)
}
// ArgParser implements cli.Command.
func (cmd WriteCmd) ArgParser() *argparser.ArgParser {
func (cmd PrintCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"doc", "Dolt doc to be read."})
return ap
}
// Exec implements cli.Command.
func (cmd WriteCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
func (cmd PrintCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
ap := cmd.ArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, writeDocs, ap))
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, printDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() != 1 {
+4 -1
View File
@@ -65,7 +65,10 @@ func validateJWT(config []JwksConfig, username, identity, token string, reqTime
if err != nil {
return false, err
}
vd := jwtauth.NewJWTValidator(pr)
vd, err := jwtauth.NewJWTValidator(pr)
if err != nil {
return false, err
}
claims, err := vd.ValidateJWT(token, reqTime)
if err != nil {
return false, err
+3 -3
View File
@@ -106,7 +106,7 @@ func (cmd InspectCmd) measureChunkIndexDistribution(ctx context.Context, dEnv *e
break
}
summary, err := cmd.processTableFile(path, dEnv.FS)
summary, err := cmd.processTableFile(ctx, path, dEnv.FS)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
@@ -120,7 +120,7 @@ func (cmd InspectCmd) measureChunkIndexDistribution(ctx context.Context, dEnv *e
return nil
}
func (cmd InspectCmd) processTableFile(path string, fs filesys.Filesys) (sum *chunkIndexSummary, err error) {
func (cmd InspectCmd) processTableFile(ctx context.Context, path string, fs filesys.Filesys) (sum *chunkIndexSummary, err error) {
var rdr io.ReadCloser
rdr, err = fs.OpenForRead(path)
if err != nil {
@@ -134,7 +134,7 @@ func (cmd InspectCmd) processTableFile(path string, fs filesys.Filesys) (sum *ch
}()
var prefixes []uint64
prefixes, err = nbs.GetTableIndexPrefixes(rdr.(io.ReadSeeker))
prefixes, err = nbs.GetTableIndexPrefixes(ctx, rdr.(io.ReadSeeker))
if err != nil {
return sum, err
}
+168 -171
View File
@@ -36,14 +36,14 @@ import (
)
type logOpts struct {
numLines int
showParents bool
minParents int
decoration string
oneLine bool
excludingCommitSpec *doltdb.CommitSpec
commitSpec *doltdb.CommitSpec
tableName string
numLines int
showParents bool
minParents int
decoration string
oneLine bool
excludingCommitSpecs []*doltdb.CommitSpec
commitSpecs []*doltdb.CommitSpec
tableName string
}
type logNode struct {
@@ -63,16 +63,20 @@ The command takes options to control what is shown and how.
{{.EmphasisLeft}}dolt log{{.EmphasisRight}}
Lists commit logs from current HEAD when no options provided.
{{.EmphasisLeft}}dolt log <revision>{{.EmphasisRight}}
Lists commit logs starting from revision.
{{.EmphasisLeft}}dolt log [<revisions>...]{{.EmphasisRight}}
Lists commit logs starting from revision. If multiple revisions provided, lists logs reachable by all revisions.
{{.EmphasisLeft}}dolt log <revision> <table>{{.EmphasisRight}}
Lists commit logs starting from revision, only including commits with changes to table.
{{.EmphasisLeft}}dolt log [<revisions>...] <table>{{.EmphasisRight}}
Lists commit logs starting from revisions, only including commits with changes to table.
{{.EmphasisLeft}}dolt log <revisionB>..<revisionA>{{.EmphasisRight}}
{{.EmphasisLeft}}dolt log <revisionA> --not <revisionB>{{.EmphasisRight}}
{{.EmphasisLeft}}dolt log ^<revisionB> <revisionA>{{.EmphasisRight}}
Different ways to list two dot logs. These will list commit logs for revisionA, while excluding commits from revisionB. The table option is not supported for two dot log.`,
Different ways to list two dot logs. These will list commit logs for revisionA, while excluding commits from revisionB. The table option is not supported for two dot log.
{{.EmphasisLeft}}dolt log <revisionB>...<revisionA>{{.EmphasisRight}}
{{.EmphasisLeft}}dolt log <revisionA> <revisionB> --not $(dolt merge-base <revisionA> <revisionB>){{.EmphasisRight}}
Different ways to list three dot logs. These will list commit logs reachable by revisionA OR revisionB, while excluding commits reachable by BOTH revisionA AND revisionB.`,
Synopsis: []string{
`[-n {{.LessThan}}num_commits{{.GreaterThan}}] [{{.LessThan}}revision-range{{.GreaterThan}}] [[--] {{.LessThan}}table{{.GreaterThan}}]`,
},
@@ -114,17 +118,12 @@ func (cmd LogCmd) logWithLoggerFunc(ctx context.Context, commandStr string, args
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, logDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() > 2 {
usage()
return 1
}
opts, err := parseLogArgs(ctx, dEnv, apr)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
if opts.commitSpec == nil {
opts.commitSpec = dEnv.RepoStateReader().CWBHeadSpec()
if len(opts.commitSpecs) == 0 {
opts.commitSpecs = append(opts.commitSpecs, dEnv.RepoStateReader().CWBHeadSpec())
}
if len(opts.tableName) > 0 {
return handleErrAndExit(logTableCommits(ctx, dEnv, opts))
@@ -152,146 +151,121 @@ func parseLogArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPars
oneLine: apr.Contains(cli.OneLineFlag),
decoration: decorateOption,
}
cs, notCs, tableName, err := parseRefsAndTable(ctx, apr, dEnv)
err := opts.parseRefsAndTable(ctx, apr, dEnv)
if err != nil {
return nil, err
}
opts.commitSpec = cs
opts.excludingCommitSpec = notCs
opts.tableName = tableName
excludingRef, ok := apr.GetValue(cli.NotFlag)
excludingRefs, ok := apr.GetValueList(cli.NotFlag)
if ok {
if opts.excludingCommitSpec != nil {
if len(opts.excludingCommitSpecs) > 0 {
return nil, fmt.Errorf("cannot use --not argument with two dots or ref with ^")
}
if len(opts.tableName) > 0 {
return nil, fmt.Errorf("cannot use --not argument with table")
}
cs, err := doltdb.NewCommitSpec(excludingRef)
if err != nil {
return nil, fmt.Errorf("invalid commit %s\n", excludingRef)
for _, excludingRef := range excludingRefs {
notCs, err := doltdb.NewCommitSpec(excludingRef)
if err != nil {
return nil, fmt.Errorf("invalid commit %s\n", excludingRef)
}
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
}
opts.excludingCommitSpec = cs
}
return opts, nil
}
func parseRefsAndTable(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv) (*doltdb.CommitSpec, *doltdb.CommitSpec, string, error) {
switch apr.NArg() {
// dolt log
case 0:
return nil, nil, "", nil
// dolt log <ref/^ref/revision-range/table>
case 1:
return getCommitOrTableFromString(ctx, apr.Arg(0), dEnv, true)
// dolt log <ref/^ref> <ref/^ref/table>
case 2:
firstCs, firstExNotCs, _, err := getCommitOrTableFromString(ctx, apr.Arg(0), dEnv, false)
if err != nil {
return nil, nil, "", err
}
secondCs, secondExNotCs, tableName, err := getCommitOrTableFromString(ctx, apr.Arg(1), dEnv, false)
if err != nil {
return nil, nil, "", err
}
if len(tableName) > 0 {
if firstExNotCs != nil {
return nil, nil, "", fmt.Errorf("Providing tableName for two dot log not yet supported")
}
// dolt log <ref> <table>
return firstCs, nil, tableName, nil
}
if firstCs != nil && secondCs != nil {
commit, err := dEnv.DoltDB.Resolve(ctx, firstCs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return nil, nil, "", err
}
// Handles table name matching branch name (dolt log <ref> <table>)
exists, err := tableExists(ctx, commit, apr.Arg(1))
if err != nil {
return nil, nil, "", err
}
if exists {
return firstCs, nil, apr.Arg(1), nil
}
return nil, nil, "", fmt.Errorf("Cannot provide two commit refs") // dolt log <ref> <ref>
}
if firstExNotCs != nil && secondExNotCs != nil {
return nil, nil, "", fmt.Errorf("Cannot exclude two commit refs") // dolt log ^<ref> ^<ref>
}
// dolt log ^<ref> <ref>
if firstExNotCs != nil && secondCs != nil {
return secondCs, firstExNotCs, "", nil
}
// dolt log <ref> ^<ref>
if firstCs != nil && secondExNotCs != nil {
return firstCs, secondExNotCs, "", nil
}
return nil, nil, "", nil
default:
return nil, nil, "", fmt.Errorf("Cannot provide more than 3 arguments")
}
}
func getCommitOrTableFromString(ctx context.Context, str string, dEnv *env.DoltEnv, canDot bool) (*doltdb.CommitSpec, *doltdb.CommitSpec, string, error) {
// <ref>...<ref>
if strings.Contains(str, "...") {
return nil, nil, "", fmt.Errorf("Three dot dolt log not supported")
func (opts *logOpts) parseRefsAndTable(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv) error {
// `dolt log`
if apr.NArg() == 0 {
return nil
}
// <ref>..<ref>
if strings.Contains(str, "..") {
if !canDot {
return nil, nil, "", fmt.Errorf("Cannot use two dot when 2 arguments provided")
if strings.Contains(apr.Arg(0), "..") {
if apr.NArg() > 1 {
return fmt.Errorf("Cannot use two or three dot syntax when 2 or more arguments provided")
}
refs := strings.Split(str, "..")
// `dolt log <ref>...<ref>`
if strings.Contains(apr.Arg(0), "...") {
refs := strings.Split(apr.Arg(0), "...")
for _, ref := range refs {
cs, err := getCommitSpec(ref)
if err != nil {
return err
}
opts.commitSpecs = append(opts.commitSpecs, cs)
}
mergeBase, verr := getMergeBaseFromStrings(ctx, dEnv, refs[0], refs[1])
if verr != nil {
return verr
}
notCs, err := getCommitSpec(mergeBase)
if err != nil {
return err
}
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
return nil
}
// `dolt log <ref>..<ref>`
refs := strings.Split(apr.Arg(0), "..")
notCs, err := getCommitSpec(refs[0])
if err != nil {
return nil, nil, "", err
return err
}
cs, err := getCommitSpec(refs[1])
if err != nil {
return nil, nil, "", err
return err
}
return cs, notCs, "", nil
opts.commitSpecs = append(opts.commitSpecs, cs)
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
return nil
}
// ^<ref>
if strings.HasPrefix(str, "^") {
commit := strings.TrimPrefix(str, "^")
notCs, err := getCommitSpec(commit)
if err != nil {
return nil, nil, "", err
seenRefs := make(map[string]bool)
for _, arg := range apr.Args {
// ^<ref>
if strings.HasPrefix(arg, "^") {
commit := strings.TrimPrefix(arg, "^")
notCs, err := getCommitSpec(commit)
if err != nil {
return err
}
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
} else {
argIsRef := actions.ValidateIsRef(ctx, arg, dEnv.DoltDB, dEnv.RepoStateReader())
// <ref>
if argIsRef && !seenRefs[arg] {
cs, err := getCommitSpec(arg)
if err != nil {
return err
}
seenRefs[arg] = true
opts.commitSpecs = append(opts.commitSpecs, cs)
} else {
// <table>
opts.tableName = arg
}
}
return nil, notCs, "", err
}
argIsRef := actions.ValidateIsRef(ctx, str, dEnv.DoltDB, dEnv.RepoStateReader())
// <ref>
if argIsRef {
cs, err := getCommitSpec(str)
if err != nil {
return nil, nil, "", err
}
return cs, nil, "", nil
if len(opts.tableName) > 0 && len(opts.excludingCommitSpecs) > 0 {
return fmt.Errorf("Cannot provide table name with excluding refs")
}
// <table>
return nil, nil, str, nil
return nil
}
func getCommitSpec(commit string) (*doltdb.CommitSpec, error) {
@@ -303,10 +277,22 @@ func getCommitSpec(commit string) (*doltdb.CommitSpec, error) {
}
func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
commit, err := dEnv.DoltDB.Resolve(ctx, opts.commitSpec, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get HEAD commit for current branch."))
return 1
hashes := make([]hash.Hash, len(opts.commitSpecs))
for i, cs := range opts.commitSpecs {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get HEAD commit for current branch."))
return 1
}
h, err := commit.HashOf()
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: failed to get commit hash"))
return 1
}
hashes[i] = h
}
cHashToRefs := map[hash.Hash][]string{}
@@ -356,39 +342,37 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
cHashToRefs[t.Hash] = append(cHashToRefs[t.Hash], tagName)
}
h, err := commit.HashOf()
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: failed to get commit hash"))
return 1
}
matchFunc := func(commit *doltdb.Commit) (bool, error) {
return commit.NumParents() >= opts.minParents, nil
matchFunc := func(c *doltdb.Commit) (bool, error) {
return c.NumParents() >= opts.minParents, nil
}
var commits []*doltdb.Commit
if opts.excludingCommitSpec == nil {
commits, err = commitwalk.GetTopNTopoOrderedCommitsMatching(ctx, dEnv.DoltDB, h, opts.numLines, matchFunc)
if len(opts.excludingCommitSpecs) == 0 {
commits, err = commitwalk.GetTopNTopoOrderedCommitsMatching(ctx, dEnv.DoltDB, hashes, opts.numLines, matchFunc)
} else {
excludingCommit, err := dEnv.DoltDB.Resolve(ctx, opts.excludingCommitSpec, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get excluding commit for current branch."))
return 1
excludingHashes := make([]hash.Hash, len(opts.excludingCommitSpecs))
for i, excludingSpec := range opts.excludingCommitSpecs {
excludingCommit, err := dEnv.DoltDB.Resolve(ctx, excludingSpec, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get excluding commit for current branch."))
return 1
}
excludingHash, err := excludingCommit.HashOf()
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: failed to get commit hash"))
return 1
}
excludingHashes[i] = excludingHash
}
excludingHash, err := excludingCommit.HashOf()
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: failed to get commit hash"))
return 1
}
commits, err = commitwalk.GetDotDotRevisions(ctx, dEnv.DoltDB, h, dEnv.DoltDB, excludingHash, opts.numLines)
commits, err = commitwalk.GetDotDotRevisions(ctx, dEnv.DoltDB, hashes, dEnv.DoltDB, excludingHashes, opts.numLines)
}
if err != nil {
cli.PrintErrln("Error retrieving commit.")
cli.PrintErrln(err)
return 1
}
@@ -417,7 +401,7 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
commitHash: cmHash,
parentHashes: pHashes,
branchNames: cHashToRefs[cmHash],
isHead: cmHash == h})
isHead: hashIsHead(cmHash, hashes)})
}
logToStdOut(opts, commitsInfo)
@@ -425,6 +409,13 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
return 0
}
func hashIsHead(cmHash hash.Hash, hashes []hash.Hash) bool {
if len(hashes) > 1 || len(hashes) == 0 {
return false
}
return cmHash == hashes[0]
}
func tableExists(ctx context.Context, commit *doltdb.Commit, tableName string) (bool, error) {
rv, err := commit.GetRootValue(ctx)
if err != nil {
@@ -440,31 +431,37 @@ func tableExists(ctx context.Context, commit *doltdb.Commit, tableName string) (
}
func logTableCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) error {
commit, err := dEnv.DoltDB.Resolve(ctx, opts.commitSpec, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return err
}
hashes := make([]hash.Hash, len(opts.commitSpecs))
// Check that the table exists in the head commit
exists, err := tableExists(ctx, commit, opts.tableName)
if err != nil {
return err
}
for i, cs := range opts.commitSpecs {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return err
}
if !exists {
return fmt.Errorf("error: table %s does not exist", opts.tableName)
}
h, err := commit.HashOf()
if err != nil {
return err
}
h, err := commit.HashOf()
if err != nil {
return err
// Check that the table exists in the head commits
exists, err := tableExists(ctx, commit, opts.tableName)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("error: table %s does not exist", opts.tableName)
}
hashes[i] = h
}
matchFunc := func(commit *doltdb.Commit) (bool, error) {
return commit.NumParents() >= opts.minParents, nil
}
itr, err := commitwalk.GetTopologicalOrderIterator(ctx, dEnv.DoltDB, h, matchFunc)
itr, err := commitwalk.GetTopologicalOrderIterator(ctx, dEnv.DoltDB, hashes, matchFunc)
if err != nil && err != io.EOF {
return err
}
+1 -1
View File
@@ -240,7 +240,7 @@ func openBrowserForCredsAdd(dc creds.DoltCreds, loginUrl string) {
func getCredentialsClient(dEnv *env.DoltEnv, dc creds.DoltCreds, authEndpoint string, insecure bool) (remotesapi.CredentialsServiceClient, errhand.VerboseError) {
cfg, err := dEnv.GetGRPCDialParams(grpcendpoint.Config{
Endpoint: authEndpoint,
Creds: dc,
Creds: dc.RPCCreds(),
Insecure: insecure,
})
if err != nil {
+1
View File
@@ -86,6 +86,7 @@ func (cmd MergeBaseCmd) Exec(ctx context.Context, commandStr string, args []stri
}
cli.Println(mergeBaseStr)
return 0
}
+1 -1
View File
@@ -124,7 +124,7 @@ func (cmd RootsCmd) processTableFile(ctx context.Context, path string, modified
defer rdCloser.Close()
return nbs.IterChunks(rdCloser.(io.ReadSeeker), func(chunk chunks.Chunk) (stop bool, err error) {
return nbs.IterChunks(ctx, rdCloser.(io.ReadSeeker), func(chunk chunks.Chunk) (stop bool, err error) {
//Want a clean db every loop
sp, _ := spec.ForDatabase("mem")
vrw := sp.GetVRW(ctx)
+29 -15
View File
@@ -22,6 +22,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
@@ -44,7 +45,9 @@ var tableName = "people"
// Smoke test: Console opens and exits
func TestSqlConsole(t *testing.T) {
t.Run("SQL console opens and exits", func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{}
commandStr := "dolt sql"
@@ -69,7 +72,8 @@ func TestSqlBatchMode(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-b", "-q", test.query}
@@ -106,7 +110,8 @@ func TestSqlSelect(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -130,7 +135,8 @@ func TestSqlShow(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -201,7 +207,8 @@ func TestShowTables(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
assert.NoError(t, err)
args := []string{"-q", test.query}
commandStr := "dolt sql"
@@ -230,7 +237,8 @@ func TestAlterTable(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
commandStr := "dolt sql"
@@ -255,7 +263,8 @@ func TestDropTable(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
commandStr := "dolt sql"
@@ -371,7 +380,8 @@ func TestInsert(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -386,7 +396,7 @@ func TestInsert(t *testing.T) {
// Assert that all expected IDs exist after the insert
for _, expectedid := range test.expectedIds {
q := fmt.Sprintf("SELECT * FROM %s WHERE id = '%s'", tableName, expectedid.String())
rows, err := sqle.ExecuteSelect(t, dEnv, root, q)
rows, err := sqle.ExecuteSelect(dEnv, root, q)
assert.NoError(t, err)
assert.True(t, len(rows) > 0)
}
@@ -450,7 +460,8 @@ func TestUpdate(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
ctx := context.Background()
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
@@ -465,7 +476,7 @@ func TestUpdate(t *testing.T) {
// Assert that all rows have been updated
for i, expectedid := range test.expectedIds {
q := fmt.Sprintf("SELECT * FROM %s WHERE id = '%s'", tableName, expectedid.String())
rows, err := sqle.ExecuteSelect(t, dEnv, root, q)
rows, err := sqle.ExecuteSelect(dEnv, root, q)
assert.NoError(t, err)
assert.True(t, len(rows) > 0)
assert.Equal(t, uint32(test.expectedAges[i]), rows[0][2])
@@ -522,11 +533,12 @@ func TestDelete(t *testing.T) {
for _, test := range tests {
t.Run(test.query, func(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
ctx := context.Background()
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
args := []string{"-q", test.query}
ctx := context.Background()
commandStr := "dolt sql"
result := SqlCmd{}.Exec(ctx, commandStr, args, dEnv)
assert.Equal(t, test.expectedRes, result)
@@ -538,7 +550,7 @@ func TestDelete(t *testing.T) {
// Assert that all rows have been deleted
for _, expectedid := range test.deletedIds {
q := fmt.Sprintf("SELECT * FROM %s WHERE id = '%s'", tableName, expectedid.String())
rows, err := sqle.ExecuteSelect(t, dEnv, root, q)
rows, err := sqle.ExecuteSelect(dEnv, root, q)
assert.NoError(t, err)
assert.True(t, len(rows) == 0)
}
@@ -548,7 +560,9 @@ func TestDelete(t *testing.T) {
}
func TestCommitHooksNoErrors(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
sqle.AddDoltSystemVariables()
sql.SystemVariables.SetGlobal(dsess.SkipReplicationErrors, true)
sql.SystemVariables.SetGlobal(dsess.ReplicateToRemote, "unknown")
+5 -3
View File
@@ -17,6 +17,7 @@ package sqlserver
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
@@ -202,12 +203,13 @@ func Serve(
)
}
if startError != nil {
if errors.Is(startError, server.UnixSocketInUseError) {
lgr.Warn("unix socket set up failed: file already in use: ", serverConf.Socket)
} else if startError != nil {
cli.PrintErr(startError)
return
} else {
sqlserver.SetRunningServer(mySQLServer)
}
sqlserver.SetRunningServer(mySQLServer)
var metSrv *http.Server
if serverConfig.MetricsHost() != "" && serverConfig.MetricsPort() > 0 {
+25 -10
View File
@@ -62,6 +62,9 @@ var (
func TestServerArgs(t *testing.T) {
serverController := NewServerController()
go func() {
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
startServer(context.Background(), "0.0.0", "dolt sql-server", []string{
"-H", "localhost",
"-P", "15200",
@@ -70,7 +73,7 @@ func TestServerArgs(t *testing.T) {
"-t", "5",
"-l", "info",
"-r",
}, sqle.CreateEnvWithSeedData(t), serverController)
}, dEnv, serverController)
}()
err := serverController.WaitForStart()
require.NoError(t, err)
@@ -102,7 +105,9 @@ listener:
`
serverController := NewServerController()
go func() {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
dEnv.FS.WriteFile("config.yaml", []byte(yamlConfig))
startServer(context.Background(), "0.0.0", "dolt sql-server", []string{
"--config", "config.yaml",
@@ -120,7 +125,8 @@ listener:
}
func TestServerBadArgs(t *testing.T) {
env := sqle.CreateEnvWithSeedData(t)
env, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
tests := [][]string{
{"-H", "127.0.0.0.1"},
@@ -148,7 +154,8 @@ func TestServerBadArgs(t *testing.T) {
}
func TestServerGoodParams(t *testing.T) {
env := sqle.CreateEnvWithSeedData(t)
env, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
tests := []ServerConfig{
DefaultServerConfig(),
@@ -186,7 +193,9 @@ func TestServerGoodParams(t *testing.T) {
}
func TestServerSelect(t *testing.T) {
env := sqle.CreateEnvWithSeedData(t)
env, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15300)
sc := NewServerController()
@@ -194,7 +203,7 @@ func TestServerSelect(t *testing.T) {
go func() {
_, _ = Serve(context.Background(), "0.0.0", serverConfig, sc, env)
}()
err := sc.WaitForStart()
err = sc.WaitForStart()
require.NoError(t, err)
const dbName = "dolt"
@@ -243,6 +252,9 @@ func TestServerFailsIfPortInUse(t *testing.T) {
Addr: ":15200",
Handler: http.DefaultServeMux,
}
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
go server.ListenAndServe()
go func() {
startServer(context.Background(), "test", "dolt sql-server", []string{
@@ -253,15 +265,18 @@ func TestServerFailsIfPortInUse(t *testing.T) {
"-t", "5",
"-l", "info",
"-r",
}, sqle.CreateEnvWithSeedData(t), serverController)
}, dEnv, serverController)
}()
err := serverController.WaitForStart()
err = serverController.WaitForStart()
require.Error(t, err)
server.Close()
}
func TestServerSetDefaultBranch(t *testing.T) {
dEnv := sqle.CreateEnvWithSeedData(t)
dEnv, err := sqle.CreateEnvWithSeedData()
require.NoError(t, err)
serverConfig := DefaultServerConfig().withLogLevel(LogLevel_Fatal).WithPort(15302)
sc := NewServerController()
@@ -269,7 +284,7 @@ func TestServerSetDefaultBranch(t *testing.T) {
go func() {
_, _ = Serve(context.Background(), "0.0.0", serverConfig, sc, dEnv)
}()
err := sc.WaitForStart()
err = sc.WaitForStart()
require.NoError(t, err)
const dbName = "dolt"
+1 -1
View File
@@ -219,7 +219,7 @@ func printRemoteRefTrackingInfo(ctx context.Context, dEnv *env.DoltEnv) error {
// countCommitsInRange returns the number of commits between the given starting point to trace back to the given target point.
// The starting commit must be a descendant of the target commit. Target commit must be a common ancestor commit.
func countCommitsInRange(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash, targetCommitHash hash.Hash) (int, error) {
itr, iErr := commitwalk.GetTopologicalOrderIterator(ctx, ddb, startCommitHash, nil)
itr, iErr := commitwalk.GetTopologicalOrderIterator(ctx, ddb, []hash.Hash{startCommitHash}, nil)
if iErr != nil {
return 0, iErr
}
+1 -1
View File
@@ -57,7 +57,7 @@ import (
)
const (
Version = "0.50.11"
Version = "0.50.14"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
+2 -4
View File
@@ -14,7 +14,6 @@ require (
github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi v0.0.0-20201005193433-3ee972b1d078
github.com/dolthub/fslock v0.0.3
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f
github.com/dustin/go-humanize v1.0.0
@@ -24,7 +23,7 @@ require (
github.com/gocraft/dbr/v2 v2.7.2
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.1
github.com/google/go-cmp v0.5.8
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.2.0
github.com/jpillora/backoff v1.0.0
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d
@@ -57,11 +56,10 @@ require (
require (
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible
github.com/cenkalti/backoff/v4 v4.1.3
github.com/dolthub/go-mysql-server v0.12.1-0.20221031173152-49134f16cad4
github.com/dolthub/go-mysql-server v0.14.1-0.20221109172918-53d8157d910d
github.com/google/flatbuffers v2.0.6+incompatible
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/mitchellh/go-ps v1.0.0
github.com/pquerna/cachecontrol v0.1.0
github.com/prometheus/client_golang v1.11.0
github.com/shirou/gopsutil/v3 v3.22.1
github.com/vbauerster/mpb v3.4.0+incompatible
+2 -7
View File
@@ -178,14 +178,12 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20221031173152-49134f16cad4 h1:j55/tHWE+PAT7WQjIlAKmIMFma7sVBpTqjtreBquXOc=
github.com/dolthub/go-mysql-server v0.12.1-0.20221031173152-49134f16cad4/go.mod h1:KtpU4Sf7J+SIat/nxoA733QTn3tdL34NtoGxEBFcTsA=
github.com/dolthub/go-mysql-server v0.14.1-0.20221109172918-53d8157d910d h1:anUvMZrtpJt8hEjtKz89Hd6FejlKxJXVOHCuK9OSTPE=
github.com/dolthub/go-mysql-server v0.14.1-0.20221109172918-53d8157d910d/go.mod h1:KtpU4Sf7J+SIat/nxoA733QTn3tdL34NtoGxEBFcTsA=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474/go.mod h1:kMz7uXOXq4qRriCEyZ/LUeTqraLJCjf0WVZcUi6TxUY=
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66 h1:WRPDbpJWEnPxPmiuOTndT+lUWUeGjx6eoNOK9O4tQQQ=
github.com/dolthub/mmap-go v1.0.4-0.20201107010347-f9f2a9588a66/go.mod h1:N5ZIbMGuDUpTpOFQ7HcsN6WSIpTGQjHP+Mz27AfmAgk=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f h1:2sNrQiE4pcdgCNp09RTOsmNeepgN5rL+ep8NF8Faw9U=
@@ -595,8 +593,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -924,7 +920,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+34 -15
View File
@@ -117,43 +117,62 @@ func (dc DoltCreds) Sign(data []byte) []byte {
return ed25519.Sign(dc.PrivKey, data)
}
func (dc DoltCreds) toBearerToken() (string, error) {
b32KIDStr := dc.KeyIDBase32Str()
key := jose.SigningKey{Algorithm: jose.EdDSA, Key: ed25519.PrivateKey(dc.PrivKey)}
type RPCCreds struct {
PrivKey ed25519.PrivateKey
KeyID string
Audience string
Issuer string
Subject string
RequireTLS bool
}
func (c *RPCCreds) toBearerToken() (string, error) {
key := jose.SigningKey{Algorithm: jose.EdDSA, Key: c.PrivKey}
opts := &jose.SignerOptions{ExtraHeaders: map[jose.HeaderKey]interface{}{
JWTKIDHeader: b32KIDStr,
JWTKIDHeader: c.KeyID,
}}
signer, err := jose.NewSigner(key, opts)
if err != nil {
return "", err
}
// Shouldn't be hard coded
jwtBuilder := jwt.Signed(signer)
jwtBuilder = jwtBuilder.Claims(jwt.Claims{
Audience: []string{"dolthub-remote-api.liquidata.co"},
Issuer: "dolt-client.liquidata.co",
Subject: "doltClientCredentials/" + b32KIDStr,
Audience: []string{c.Audience},
Issuer: c.Issuer,
Subject: c.Subject,
Expiry: jwt.NewNumericDate(datetime.Now().Add(30 * time.Second)),
})
return jwtBuilder.CompactSerialize()
}
func (dc DoltCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
t, err := dc.toBearerToken()
func (c *RPCCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
t, err := c.toBearerToken()
if err != nil {
return nil, err
}
return map[string]string{
"authorization": "Bearer " + t,
}, nil
}
func (dc DoltCreds) RequireTransportSecurity() bool {
return false
func (c *RPCCreds) RequireTransportSecurity() bool {
return c.RequireTLS
}
const RemotesAPIAudience = "dolthub-remote-api.liquidata.co"
const ClientIssuer = "dolt-client.liquidata.co"
func (dc DoltCreds) RPCCreds() *RPCCreds {
b32KIDStr := dc.KeyIDBase32Str()
return &RPCCreds{
PrivKey: ed25519.PrivateKey(dc.PrivKey),
KeyID: b32KIDStr,
Audience: RemotesAPIAudience,
Issuer: ClientIssuer,
Subject: "doltClientCredentials/" + b32KIDStr,
RequireTLS: false,
}
}
@@ -131,7 +131,9 @@ func (fk ForeignKey) DeepEquals(other ForeignKey) bool {
}
return fk.Name == other.Name &&
fk.TableName == other.TableName &&
fk.ReferencedTableName == other.ReferencedTableName
fk.ReferencedTableName == other.ReferencedTableName &&
fk.TableIndex == other.TableIndex &&
fk.ReferencedTableIndex == other.ReferencedTableIndex
}
// HashOf returns the Noms hash of a ForeignKey.
+1 -1
View File
@@ -138,7 +138,7 @@ func testGarbageCollection(t *testing.T, test gcTest) {
working, err = dEnv.WorkingRoot(ctx)
require.NoError(t, err)
// assert all out rows are present after gc
actual, err := sqle.ExecuteSelect(t, dEnv, working, test.query)
actual, err := sqle.ExecuteSelect(dEnv, working, test.query)
require.NoError(t, err)
assert.Equal(t, test.expected, actual)
}
+45 -188
View File
@@ -15,27 +15,21 @@
package dtestutils
import (
"context"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped"
"github.com/dolthub/dolt/go/store/types"
)
var UUIDS = []uuid.UUID{
var uuids = []uuid.UUID{
uuid.Must(uuid.Parse("00000000-0000-0000-0000-000000000000")),
uuid.Must(uuid.Parse("00000000-0000-0000-0000-000000000001")),
uuid.Must(uuid.Parse("00000000-0000-0000-0000-000000000002"))}
var Names = []string{"Bill Billerson", "John Johnson", "Rob Robertson"}
var Ages = []uint64{32, 25, 21}
var Titles = []string{"Senior Dufus", "Dufus", ""}
var MaritalStatus = []bool{true, false, false}
var names = []string{"Bill Billerson", "John Johnson", "Rob Robertson"}
var ages = []uint64{32, 25, 21}
var titles = []string{"Senior Dufus", "Dufus", ""}
var maritalStatus = []bool{true, false, false}
const (
IdTag uint64 = iota
@@ -47,201 +41,64 @@ const (
)
const (
TableName = "people"
IndexName = "idx_name"
)
var typedColColl = schema.NewColCollection(
schema.NewColumn("id", IdTag, types.StringKind, true, schema.NotNullConstraint{}),
schema.NewColumn("name", NameTag, types.StringKind, false, schema.NotNullConstraint{}),
schema.NewColumn("age", AgeTag, types.UintKind, false, schema.NotNullConstraint{}),
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false, schema.NotNullConstraint{}),
schema.NewColumn("title", TitleTag, types.StringKind, false),
)
// Schema returns the schema for the `people` test table.
func Schema() (schema.Schema, error) {
var typedColColl = schema.NewColCollection(
schema.NewColumn("id", IdTag, types.StringKind, true, schema.NotNullConstraint{}),
schema.NewColumn("name", NameTag, types.StringKind, false, schema.NotNullConstraint{}),
schema.NewColumn("age", AgeTag, types.UintKind, false, schema.NotNullConstraint{}),
schema.NewColumn("is_married", IsMarriedTag, types.IntKind, false, schema.NotNullConstraint{}),
schema.NewColumn("title", TitleTag, types.StringKind, false),
)
sch := schema.MustSchemaFromCols(typedColColl)
// modified by init()
var TypedSchema = schema.MustSchemaFromCols(typedColColl)
_, err := sch.Indexes().AddIndexByColTags(IndexName, []uint64{NameTag}, schema.IndexProperties{IsUnique: false, Comment: ""})
if err != nil {
return nil, err
}
// modified by init()
var UntypedSchema, _ = untyped.UntypeSchema(TypedSchema)
var TypedRows []row.Row
var UntypedRows []row.Row
_, err = sch.Checks().AddCheck("test-check", "age < 123", true)
if err != nil {
return nil, err
}
func init() {
for i := 0; i < len(UUIDS); i++ {
return sch, err
}
// RowsAndSchema returns the schema and rows for the `people` test table.
func RowsAndSchema() ([]row.Row, schema.Schema, error) {
sch, err := Schema()
if err != nil {
return nil, nil, err
}
rows := make([]row.Row, len(uuids))
for i := 0; i < len(uuids); i++ {
married := types.Int(0)
if MaritalStatus[i] {
if maritalStatus[i] {
married = types.Int(1)
}
taggedVals := row.TaggedValues{
IdTag: types.String(UUIDS[i].String()),
NameTag: types.String(Names[i]),
AgeTag: types.Uint(Ages[i]),
TitleTag: types.String(Titles[i]),
IdTag: types.String(uuids[i].String()),
NameTag: types.String(names[i]),
AgeTag: types.Uint(ages[i]),
TitleTag: types.String(titles[i]),
IsMarriedTag: married,
}
r, err := row.New(types.Format_Default, TypedSchema, taggedVals)
if err != nil {
panic(err)
}
TypedRows = append(TypedRows, r)
taggedVals = row.TaggedValues{
IdTag: types.String(UUIDS[i].String()),
NameTag: types.String(Names[i]),
AgeTag: types.Uint(Ages[i]),
TitleTag: types.String(Titles[i]),
IsMarriedTag: married,
}
r, err = row.New(types.Format_Default, TypedSchema, taggedVals)
if err != nil {
panic(err)
}
UntypedRows = append(UntypedRows, r)
}
_, err := TypedSchema.Indexes().AddIndexByColTags(IndexName, []uint64{NameTag}, schema.IndexProperties{IsUnique: false, Comment: ""})
if err != nil {
panic(err)
}
_, err = UntypedSchema.Indexes().AddIndexByColTags(IndexName, []uint64{NameTag}, schema.IndexProperties{IsUnique: false, Comment: ""})
if err != nil {
panic(err)
}
_, err = TypedSchema.Checks().AddCheck("test-check", "age < 123", true)
if err != nil {
panic(err)
}
_, err = UntypedSchema.Checks().AddCheck("test-check", "age < 123", true)
if err != nil {
panic(err)
}
}
func NewTypedRow(id uuid.UUID, name string, age uint, isMarried bool, title *string) row.Row {
var titleVal types.Value
if title != nil {
titleVal = types.String(*title)
}
married := types.Int(0)
if isMarried {
married = types.Int(1)
}
taggedVals := row.TaggedValues{
IdTag: types.String(id.String()),
NameTag: types.String(name),
AgeTag: types.Uint(age),
IsMarriedTag: married,
TitleTag: titleVal,
}
r, err := row.New(types.Format_Default, TypedSchema, taggedVals)
if err != nil {
panic(err)
}
return r
}
func CreateTestDataTable(typed bool) (*table.InMemTable, schema.Schema) {
sch := TypedSchema
rows := TypedRows
if !typed {
sch = UntypedSchema
rows = UntypedRows
}
imt := table.NewInMemTable(sch)
for _, r := range rows {
err := imt.AppendRow(r)
if err != nil {
panic(err)
}
}
return imt, sch
}
// AddColToRows adds a column to all the rows given and returns it. This method relies on the fact that
// noms_row.SetColVal doesn't need a full schema, just one that includes the column being set.
func AddColToRows(t *testing.T, rs []row.Row, tag uint64, val types.Value) []row.Row {
if types.IsNull(val) {
return rs
}
colColl := schema.NewColCollection(schema.NewColumn("unused", tag, val.Kind(), false))
fakeSch := schema.UnkeyedSchemaFromCols(colColl)
newRows := make([]row.Row, len(rs))
var err error
for i, r := range rs {
newRows[i], err = r.SetColVal(tag, val, fakeSch)
require.NoError(t, err)
}
return newRows
}
// Coerces the rows given into the schema given. Only possible if the types are equivalent.
func ConvertToSchema(sch schema.Schema, rs ...row.Row) []row.Row {
newRows := make([]row.Row, len(rs))
for i, r := range rs {
taggedVals := make(row.TaggedValues)
_, err := r.IterCols(func(tag uint64, val types.Value) (stop bool, err error) {
if _, ok := sch.GetAllCols().GetByTag(tag); ok {
taggedVals[tag] = val
}
return false, nil
})
if err != nil {
panic(err)
}
newRows[i], err = row.New(types.Format_Default, sch, taggedVals)
if err != nil {
panic(err)
}
}
return newRows
}
// MustRowData converts a slice of row.TaggedValues into a noms types.Map containing that data.
func MustRowData(t *testing.T, ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, colVals []row.TaggedValues) *types.Map {
m, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
me := m.Edit()
for _, taggedVals := range colVals {
r, err := row.New(types.Format_Default, sch, taggedVals)
require.NoError(t, err)
me = me.Set(r.NomsMapKey(sch), r.NomsMapValue(sch))
if err != nil {
panic(err)
}
rows[i] = r
}
m, err = me.Map(ctx)
require.NoError(t, err)
return &m
}
// MustMap contructs a types.Map for a slice of alternating key, value types.Value.
func MustMap(t *testing.T, vrw types.ValueReadWriter, kv ...types.Value) types.Map {
m, err := types.NewMap(context.Background(), vrw, kv...)
require.NoError(t, err)
return m
return rows, sch, err
}
// MustMap contructs a types.Tuple for a slice of types.Values.
+1 -63
View File
@@ -15,16 +15,6 @@
package dtestutils
import (
"context"
"math"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/types"
@@ -38,7 +28,7 @@ func CreateSchema(columns ...schema.Column) schema.Schema {
return sch
}
// Creates a row with the schema given, having the values given. Starts at tag 0 and counts up.
// NewRow creates a row with the schema given, having the values given. Starts at tag 0 and counts up.
func NewRow(sch schema.Schema, values ...types.Value) row.Row {
taggedVals := make(row.TaggedValues)
for i := range values {
@@ -65,58 +55,6 @@ func AddColumnToSchema(sch schema.Schema, col schema.Column) schema.Schema {
return newSch
}
// RemoveColumnFromSchema returns a new schema with the given tag missing, but otherwise identical. At least one
// primary column must remain.
func RemoveColumnFromSchema(sch schema.Schema, tagToRemove uint64) schema.Schema {
var newCols []schema.Column
err := sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
if tag != tagToRemove {
newCols = append(newCols, col)
}
return false, nil
})
if err != nil {
panic(err)
}
columns := schema.NewColCollection(newCols...)
newSch := schema.MustSchemaFromCols(columns)
newSch.SetCollation(sch.GetCollation())
return newSch
}
// Compares two noms Floats for approximate equality
var FloatComparer = cmp.Comparer(func(x, y types.Float) bool {
return math.Abs(float64(x)-float64(y)) < .001
})
var TimestampComparer = cmp.Comparer(func(x, y types.Timestamp) bool {
return x.Equals(y)
})
// CreateEmptyTestTable creates a new test table with the name, schema, and rows given.
func CreateEmptyTestTable(t *testing.T, dEnv *env.DoltEnv, tableName string, sch schema.Schema) {
ctx := context.Background()
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
vrw := dEnv.DoltDB.ValueReadWriter()
ns := dEnv.DoltDB.NodeStore()
rows, err := durable.NewEmptyIndex(ctx, vrw, ns, sch)
require.NoError(t, err)
indexSet, err := durable.NewIndexSetWithEmptyIndexes(ctx, vrw, ns, sch)
require.NoError(t, err)
tbl, err := doltdb.NewTable(ctx, vrw, ns, sch, rows, indexSet, nil)
require.NoError(t, err)
newRoot, err := root.PutTable(ctx, tableName, tbl)
require.NoError(t, err)
err = dEnv.UpdateWorkingRoot(ctx, newRoot)
require.NoError(t, err)
}
// MustSchema takes a variable number of columns and returns a schema.
func MustSchema(cols ...schema.Column) schema.Schema {
hasPKCols := false
@@ -1,347 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testcommands TODO: kill off this package, replace with the non-test commands directly
package testcommands
import (
"context"
"fmt"
"io"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/cmd/dolt/commands/cnfcmds"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
)
type Command interface {
CommandString() string
Exec(t *testing.T, dEnv *env.DoltEnv) error
}
type StageAll struct{}
// CommandString describes the StageAll command for debugging purposes.
func (a StageAll) CommandString() string { return "stage_all" }
// Exec executes a StageAll command on a test dolt environment.
func (a StageAll) Exec(t *testing.T, dEnv *env.DoltEnv) error {
roots, err := dEnv.Roots(context.Background())
require.NoError(t, err)
roots, err = actions.StageAllTables(context.Background(), roots)
require.NoError(t, err)
return dEnv.UpdateRoots(context.Background(), roots)
}
type CommitStaged struct {
Message string
}
// CommandString describes the CommitStaged command for debugging purposes.
func (c CommitStaged) CommandString() string { return fmt.Sprintf("commit_staged: %s", c.Message) }
// Exec executes a CommitStaged command on a test dolt environment.
func (c CommitStaged) Exec(t *testing.T, dEnv *env.DoltEnv) error {
roots, err := dEnv.Roots(context.Background())
require.NoError(t, err)
name, email, err := env.GetNameAndEmail(dEnv.Config)
if err != nil {
return err
}
dbData := dEnv.DbData()
ws, err := dEnv.WorkingSet(context.Background())
if err != nil {
return errhand.VerboseErrorFromError(err)
}
var mergeParentCommits []*doltdb.Commit
if ws.MergeActive() {
mergeParentCommits = []*doltdb.Commit{ws.MergeState().Commit()}
}
_, err = actions.CommitStaged(context.Background(), roots, ws.MergeActive(), mergeParentCommits, dbData, actions.CommitStagedProps{
Message: c.Message,
Date: time.Now(),
AllowEmpty: false,
Force: false,
Name: name,
Email: email,
})
if err != nil {
return err
}
return dEnv.ClearMerge(context.Background())
}
type CommitAll struct {
Message string
}
// CommandString describes the CommitAll command for debugging purposes.
func (c CommitAll) CommandString() string { return fmt.Sprintf("commit: %s", c.Message) }
// Exec executes a CommitAll command on a test dolt environment.
func (c CommitAll) Exec(t *testing.T, dEnv *env.DoltEnv) error {
roots, err := dEnv.Roots(context.Background())
require.NoError(t, err)
roots, err = actions.StageAllTables(context.Background(), roots)
require.NoError(t, err)
name, email, err := env.GetNameAndEmail(dEnv.Config)
if err != nil {
return err
}
ws, err := dEnv.WorkingSet(context.Background())
if err != nil {
return errhand.VerboseErrorFromError(err)
}
var mergeParentCommits []*doltdb.Commit
if ws.MergeActive() {
mergeParentCommits = []*doltdb.Commit{ws.MergeState().Commit()}
}
_, err = actions.CommitStaged(context.Background(), roots, ws.MergeActive(), mergeParentCommits, dEnv.DbData(), actions.CommitStagedProps{
Message: c.Message,
Date: time.Now(),
AllowEmpty: false,
Force: false,
Name: name,
Email: email,
})
if err != nil {
return err
}
return dEnv.ClearMerge(context.Background())
}
type ResetHard struct{}
// CommandString describes the ResetHard command for debugging purposes.
func (r ResetHard) CommandString() string { return "reset_hard" }
// NOTE: does not handle untracked tables
func (r ResetHard) Exec(t *testing.T, dEnv *env.DoltEnv) error {
headRoot, err := dEnv.HeadRoot(context.Background())
if err != nil {
return err
}
err = dEnv.UpdateWorkingRoot(context.Background(), headRoot)
if err != nil {
return err
}
return dEnv.UpdateStagedRoot(context.Background(), headRoot)
}
type Query struct {
Query string
}
// CommandString describes the Query command for debugging purposes.
func (q Query) CommandString() string { return fmt.Sprintf("query %s", q.Query) }
// Exec executes a Query command on a test dolt environment.
func (q Query) Exec(t *testing.T, dEnv *env.DoltEnv) error {
root, err := dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
tmpDir, err := dEnv.TempTableFilesDir()
require.NoError(t, err)
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: tmpDir}
sqlDb, err := dsqle.NewDatabase(context.Background(), "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := dsqle.NewTestEngine(t, dEnv, context.Background(), sqlDb, root)
require.NoError(t, err)
_, iter, err := engine.Query(sqlCtx, q.Query)
if err != nil {
return err
}
for {
_, err := iter.Next(sqlCtx)
if err == io.EOF {
break
}
if err != nil {
return err
}
}
err = iter.Close(sqlCtx)
if err != nil {
return err
}
newRoot, err := sqlDb.GetRoot(sqlCtx)
require.NoError(t, err)
err = dEnv.UpdateWorkingRoot(context.Background(), newRoot)
return err
}
type Branch struct {
BranchName string
}
// CommandString describes the Branch command for debugging purposes.
func (b Branch) CommandString() string { return fmt.Sprintf("branch: %s", b.BranchName) }
// Exec executes a Branch command on a test dolt environment.
func (b Branch) Exec(_ *testing.T, dEnv *env.DoltEnv) error {
cwb := dEnv.RepoStateReader().CWBHeadRef().String()
return actions.CreateBranchWithStartPt(context.Background(), dEnv.DbData(), b.BranchName, cwb, false)
}
type Checkout struct {
BranchName string
}
// CommandString describes the Checkout command for debugging purposes.
func (c Checkout) CommandString() string { return fmt.Sprintf("checkout: %s", c.BranchName) }
// Exec executes a Checkout command on a test dolt environment.
func (c Checkout) Exec(_ *testing.T, dEnv *env.DoltEnv) error {
return actions.CheckoutBranch(context.Background(), dEnv, c.BranchName, false)
}
type Merge struct {
BranchName string
}
// CommandString describes the Merge command for debugging purposes.
func (m Merge) CommandString() string { return fmt.Sprintf("merge: %s", m.BranchName) }
// Exec executes a Merge command on a test dolt environment.
func (m Merge) Exec(t *testing.T, dEnv *env.DoltEnv) error {
// Adapted from commands/merge.go:Exec()
dref, err := dEnv.FindRef(context.Background(), m.BranchName)
assert.NoError(t, err)
cm1 := resolveCommit(t, "HEAD", dEnv)
cm2 := resolveCommit(t, dref.String(), dEnv)
h1, err := cm1.HashOf()
assert.NoError(t, err)
h2, err := cm2.HashOf()
assert.NoError(t, err)
assert.NotEqual(t, h1, h2)
roots, err := dEnv.Roots(context.Background())
require.NoError(t, err)
tblNames, _, err := merge.MergeWouldStompChanges(context.Background(), roots, cm2)
if err != nil {
return err
}
if len(tblNames) != 0 {
return errhand.BuildDError("error: failed to determine mergability.").AddCause(err).Build()
}
if ok, err := cm1.CanFastForwardTo(context.Background(), cm2); ok {
if err != nil {
return err
}
err = dEnv.DoltDB.FastForward(context.Background(), dEnv.RepoStateReader().CWBHeadRef(), cm2)
if err != nil {
return err
}
workingSet, err := dEnv.WorkingSet(context.Background())
if err != nil {
return errhand.VerboseErrorFromError(err)
}
rv, err := cm2.GetRootValue(context.Background())
assert.NoError(t, err)
err = dEnv.UpdateWorkingSet(context.Background(), workingSet.WithWorkingRoot(rv))
require.NoError(t, err)
} else {
tmpDir, err := dEnv.TempTableFilesDir()
require.NoError(t, err)
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: tmpDir}
mergedRoot, tblToStats, err := merge.MergeCommits(context.Background(), cm1, cm2, opts)
require.NoError(t, err)
for _, stats := range tblToStats {
require.True(t, stats.Conflicts == 0)
}
err = dEnv.StartMerge(context.Background(), cm2, dref.String())
if err != nil {
return err
}
err = dEnv.UpdateWorkingRoot(context.Background(), mergedRoot)
if err != nil {
return err
}
err = dEnv.UpdateStagedRoot(context.Background(), mergedRoot)
if err != nil {
return err
}
}
return nil
}
func resolveCommit(t *testing.T, cSpecStr string, dEnv *env.DoltEnv) *doltdb.Commit {
cs, err := doltdb.NewCommitSpec(cSpecStr)
require.NoError(t, err)
cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, dEnv.RepoStateReader().CWBHeadRef())
require.NoError(t, err)
return cm
}
type ConflictsCat struct {
TableName string
}
// CommandString describes the ConflictsCat command for debugging purposes.
func (c ConflictsCat) CommandString() string { return fmt.Sprintf("conflicts_cat: %s", c.TableName) }
// Exec executes a ConflictsCat command on a test dolt environment.
func (c ConflictsCat) Exec(t *testing.T, wg *sync.WaitGroup, dEnv *env.DoltEnv) error {
out := cnfcmds.CatCmd{}.Exec(context.Background(), "dolt conflicts cat", []string{c.TableName}, dEnv)
require.Equal(t, 0, out)
return nil
}
@@ -28,15 +28,14 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/types"
)
const (
repoPrefix = "repo_*"
remotePrefix = "remote_*"
homePrefix = "home"
homePrefix = "home"
)
type MultiRepoTestSetup struct {
@@ -270,10 +269,28 @@ func (mr *MultiRepoTestSetup) CommitWithWorkingSet(dbName string) *doltdb.Commit
return commit
}
func createTestDataTable() (*table.InMemTable, schema.Schema) {
rows, sch, err := dtestutils.RowsAndSchema()
if err != nil {
panic(err)
}
imt := table.NewInMemTable(sch)
for _, r := range rows {
err := imt.AppendRow(r)
if err != nil {
panic(err)
}
}
return imt, sch
}
func (mr *MultiRepoTestSetup) CreateTable(dbName, tblName string) {
dEnv := mr.MrEnv.GetEnv(dbName)
imt, sch := dtestutils.CreateTestDataTable(true)
imt, sch := createTestDataTable()
rows := make([]row.Row, imt.NumRows())
for i := 0; i < imt.NumRows(); i++ {
r, err := imt.GetRow(i)
+4 -1
View File
@@ -28,6 +28,7 @@ type CommitStagedProps struct {
Message string
Date time.Time
AllowEmpty bool
Amend bool
Force bool
Name string
Email string
@@ -157,7 +158,9 @@ func GetCommitStaged(
stagedTblNames = append(stagedTblNames, n)
}
if len(staged) == 0 && !mergeActive && !props.AllowEmpty {
isEmpty := len(staged) == 0
allowEmpty := mergeActive || props.AllowEmpty || props.Amend
if isEmpty && !allowEmpty {
return nil, NothingStaged{notStaged}
}
+66 -74
View File
@@ -149,80 +149,64 @@ func newQueue() *q {
return &q{loaded: make(map[hash.Hash]*c)}
}
// GetDotDotRevisions returns the commits reachable from commit at hash
// `includedHead` that are not reachable from hash `excludedHead`.
// `includedHead` and `excludedHead` must be commits in `ddb`. Returns up
// to `num` commits, in reverse topological order starting at `includedHead`,
// GetDotDotRevisions returns the commits reachable from commit at hashes
// `includedHeads` that are not reachable from hashes `excludedHeads`.
// `includedHeads` and `excludedHeads` must be commits in `ddb`. Returns up
// to `num` commits, in reverse topological order starting at `includedHeads`,
// with tie breaking based on the height of commit graph between
// concurrent commits --- higher commits appear first. Remaining
// ties are broken by timestamp; newer commits appear first.
//
// Roughly mimics `git log main..feature`.
func GetDotDotRevisions(ctx context.Context, includedDB *doltdb.DoltDB, includedHead hash.Hash, excludedDB *doltdb.DoltDB, excludedHead hash.Hash, num int) ([]*doltdb.Commit, error) {
// Roughly mimics `git log main..feature` or `git log main...feature` (if
// more than one `includedHead` is provided).
func GetDotDotRevisions(ctx context.Context, includedDB *doltdb.DoltDB, includedHeads []hash.Hash, excludedDB *doltdb.DoltDB, excludedHeads []hash.Hash, num int) ([]*doltdb.Commit, error) {
itr, err := GetDotDotRevisionsIterator(ctx, includedDB, includedHeads, excludedDB, excludedHeads, nil)
if err != nil {
return nil, err
}
var commitList []*doltdb.Commit
q := newQueue()
if err := q.SetInvisible(ctx, excludedDB, excludedHead); err != nil {
return nil, err
}
if err := q.AddPendingIfUnseen(ctx, excludedDB, excludedHead); err != nil {
return nil, err
}
if err := q.AddPendingIfUnseen(ctx, includedDB, includedHead); err != nil {
return nil, err
}
for q.NumVisiblePending() > 0 {
nextC := q.PopPending()
parents, err := nextC.commit.ParentHashes(ctx)
if err != nil {
for num < 0 || len(commitList) < num {
_, commit, err := itr.Next(ctx)
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
for _, parentID := range parents {
if nextC.invisible {
if err := q.SetInvisible(ctx, nextC.ddb, parentID); err != nil {
return nil, err
}
}
if err := q.AddPendingIfUnseen(ctx, nextC.ddb, parentID); err != nil {
return nil, err
}
}
if !nextC.invisible {
commitList = append(commitList, nextC.commit)
if len(commitList) == num {
return commitList, nil
}
}
commitList = append(commitList, commit)
}
return commitList, nil
}
// GetTopologicalOrderCommits returns the commits reachable from the commit at hash `startCommitHash`
// GetTopologicalOrderCommits returns the commits reachable from the commits in `startCommitHashes`
// in reverse topological order, with tiebreaking done by the height of the commit graph -- higher commits
// appear first. Remaining ties are broken by timestamp; newer commits appear first.
func GetTopologicalOrderCommits(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash hash.Hash) ([]*doltdb.Commit, error) {
return GetTopNTopoOrderedCommitsMatching(ctx, ddb, startCommitHash, -1, nil)
func GetTopologicalOrderCommits(ctx context.Context, ddb *doltdb.DoltDB, startCommitHashes []hash.Hash) ([]*doltdb.Commit, error) {
return GetTopNTopoOrderedCommitsMatching(ctx, ddb, startCommitHashes, -1, nil)
}
// GetTopologicalOrderCommitIterator returns an iterator for commits generated with the same semantics as
// GetTopologicalOrderCommits
func GetTopologicalOrderIterator(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (doltdb.CommitItr, error) {
return newCommiterator(ctx, ddb, startCommitHash, matchFn)
func GetTopologicalOrderIterator(ctx context.Context, ddb *doltdb.DoltDB, startCommitHashes []hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (doltdb.CommitItr, error) {
return newCommiterator(ctx, ddb, startCommitHashes, matchFn)
}
type commiterator struct {
ddb *doltdb.DoltDB
startCommitHash hash.Hash
matchFn func(*doltdb.Commit) (bool, error)
q *q
ddb *doltdb.DoltDB
startCommitHashes []hash.Hash
matchFn func(*doltdb.Commit) (bool, error)
q *q
}
var _ doltdb.CommitItr = (*commiterator)(nil)
func newCommiterator(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (*commiterator, error) {
func newCommiterator(ctx context.Context, ddb *doltdb.DoltDB, startCommitHashes []hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (*commiterator, error) {
itr := &commiterator{
ddb: ddb,
startCommitHash: startCommitHash,
matchFn: matchFn,
ddb: ddb,
startCommitHashes: startCommitHashes,
matchFn: matchFn,
}
err := itr.Reset(ctx)
@@ -270,17 +254,19 @@ func (i *commiterator) Next(ctx context.Context) (hash.Hash, *doltdb.Commit, err
// Reset implements doltdb.CommitItr
func (i *commiterator) Reset(ctx context.Context) error {
i.q = newQueue()
if err := i.q.AddPendingIfUnseen(ctx, i.ddb, i.startCommitHash); err != nil {
return err
for _, startCommitHash := range i.startCommitHashes {
if err := i.q.AddPendingIfUnseen(ctx, i.ddb, startCommitHash); err != nil {
return err
}
}
return nil
}
// GetTopNTopoOrderedCommitsMatching returns the first N commits (If N <= 0 then all commits) reachable from the commit at hash
// `startCommitHash` in reverse topological order, with tiebreaking done by the height of the commit graph -- higher
// GetTopNTopoOrderedCommitsMatching returns the first N commits (If N <= 0 then all commits) reachable from the commits in
// `startCommitHashes` in reverse topological order, with tiebreaking done by the height of the commit graph -- higher
// commits appear first. Remaining ties are broken by timestamp; newer commits appear first.
func GetTopNTopoOrderedCommitsMatching(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash hash.Hash, n int, matchFn func(*doltdb.Commit) (bool, error)) ([]*doltdb.Commit, error) {
itr, err := GetTopologicalOrderIterator(ctx, ddb, startCommitHash, matchFn)
func GetTopNTopoOrderedCommitsMatching(ctx context.Context, ddb *doltdb.DoltDB, startCommitHashes []hash.Hash, n int, matchFn func(*doltdb.Commit) (bool, error)) ([]*doltdb.Commit, error) {
itr, err := GetTopologicalOrderIterator(ctx, ddb, startCommitHashes, matchFn)
if err != nil {
return nil, err
}
@@ -302,26 +288,28 @@ func GetTopNTopoOrderedCommitsMatching(ctx context.Context, ddb *doltdb.DoltDB,
// GetDotDotRevisionsIterator returns an iterator for commits generated with the same semantics as
// GetDotDotRevisions
func GetDotDotRevisionsIterator(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash, excludingCommitHash hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (doltdb.CommitItr, error) {
return newDotDotCommiterator(ctx, ddb, startCommitHash, excludingCommitHash, matchFn)
func GetDotDotRevisionsIterator(ctx context.Context, includedDdb *doltdb.DoltDB, startCommitHashes []hash.Hash, excludedDdb *doltdb.DoltDB, excludingCommitHashes []hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (doltdb.CommitItr, error) {
return newDotDotCommiterator(ctx, includedDdb, startCommitHashes, excludedDdb, excludingCommitHashes, matchFn)
}
type dotDotCommiterator struct {
ddb *doltdb.DoltDB
startCommitHash hash.Hash
excludingCommitHash hash.Hash
matchFn func(*doltdb.Commit) (bool, error)
q *q
includedDdb *doltdb.DoltDB
excludedDdb *doltdb.DoltDB
startCommitHashes []hash.Hash
excludingCommitHashes []hash.Hash
matchFn func(*doltdb.Commit) (bool, error)
q *q
}
var _ doltdb.CommitItr = (*dotDotCommiterator)(nil)
func newDotDotCommiterator(ctx context.Context, ddb *doltdb.DoltDB, startCommitHash, excludingCommitHash hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (*dotDotCommiterator, error) {
func newDotDotCommiterator(ctx context.Context, includedDdb *doltdb.DoltDB, startCommitHashes []hash.Hash, excludedDdb *doltdb.DoltDB, excludingCommitHashes []hash.Hash, matchFn func(*doltdb.Commit) (bool, error)) (*dotDotCommiterator, error) {
itr := &dotDotCommiterator{
ddb: ddb,
startCommitHash: startCommitHash,
excludingCommitHash: excludingCommitHash,
matchFn: matchFn,
includedDdb: includedDdb,
excludedDdb: excludedDdb,
startCommitHashes: startCommitHashes,
excludingCommitHashes: excludingCommitHashes,
matchFn: matchFn,
}
err := itr.Reset(ctx)
@@ -373,14 +361,18 @@ func (i *dotDotCommiterator) Next(ctx context.Context) (hash.Hash, *doltdb.Commi
// Reset implements doltdb.CommitItr
func (i *dotDotCommiterator) Reset(ctx context.Context) error {
i.q = newQueue()
if err := i.q.SetInvisible(ctx, i.ddb, i.excludingCommitHash); err != nil {
return err
for _, excludingCommitHash := range i.excludingCommitHashes {
if err := i.q.SetInvisible(ctx, i.excludedDdb, excludingCommitHash); err != nil {
return err
}
if err := i.q.AddPendingIfUnseen(ctx, i.excludedDdb, excludingCommitHash); err != nil {
return err
}
}
if err := i.q.AddPendingIfUnseen(ctx, i.ddb, i.excludingCommitHash); err != nil {
return err
}
if err := i.q.AddPendingIfUnseen(ctx, i.ddb, i.startCommitHash); err != nil {
return err
for _, startCommitHash := range i.startCommitHashes {
if err := i.q.AddPendingIfUnseen(ctx, i.includedDdb, startCommitHash); err != nil {
return err
}
}
return nil
}
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/datas"
@@ -113,15 +114,15 @@ func TestGetDotDotRevisions(t *testing.T) {
// Branches look like this:
//
// feature: *--*--*--*--*--*--*
// / /
// main: --*--*--*--*--*--------*--*--*--*
// feature: F1--F2--F3--F4--F5--F6--F7
// / /
// main: M0--M1--M2--M3--M4--M5/F0------------M6--M7--M8--M9
featureHash := mustGetHash(t, featureCommits[7])
mainHash := mustGetHash(t, mainCommits[6])
featurePreMergeHash := mustGetHash(t, featureCommits[3])
res, err := GetDotDotRevisions(context.Background(), dEnv.DoltDB, featureHash, dEnv.DoltDB, mainHash, 100)
res, err := GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featureHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 100)
require.NoError(t, err)
assert.Len(t, res, 7)
@@ -133,31 +134,57 @@ func TestGetDotDotRevisions(t *testing.T) {
assertEqualHashes(t, featureCommits[2], res[5])
assertEqualHashes(t, featureCommits[1], res[6])
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, mainHash, dEnv.DoltDB, featureHash, 100)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{mainHash}, dEnv.DoltDB, []hash.Hash{featureHash}, 100)
require.NoError(t, err)
assert.Len(t, res, 0)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, featureHash, dEnv.DoltDB, mainHash, 3)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featureHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 3)
require.NoError(t, err)
assert.Len(t, res, 3)
assertEqualHashes(t, featureCommits[7], res[0])
assertEqualHashes(t, featureCommits[6], res[1])
assertEqualHashes(t, featureCommits[5], res[2])
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, featurePreMergeHash, dEnv.DoltDB, mainHash, 3)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featurePreMergeHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 3)
require.NoError(t, err)
assert.Len(t, res, 3)
assertEqualHashes(t, featureCommits[3], res[0])
assertEqualHashes(t, featureCommits[2], res[1])
assertEqualHashes(t, featureCommits[1], res[2])
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, featurePreMergeHash, dEnv.DoltDB, mainHash, 3)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featurePreMergeHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 3)
require.NoError(t, err)
assert.Len(t, res, 3)
assertEqualHashes(t, featureCommits[3], res[0])
assertEqualHashes(t, featureCommits[2], res[1])
assertEqualHashes(t, featureCommits[1], res[2])
// Three dot
mergeBaseHash, err := merge.MergeBase(context.Background(), mainCommits[6], featureCommits[7])
require.NoError(t, err)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featureHash, mainHash}, dEnv.DoltDB, []hash.Hash{mergeBaseHash}, -1)
require.NoError(t, err)
assert.Len(t, res, 7)
assertEqualHashes(t, featureCommits[7], res[0])
assertEqualHashes(t, featureCommits[6], res[1])
assertEqualHashes(t, featureCommits[5], res[2])
assertEqualHashes(t, featureCommits[4], res[3])
assertEqualHashes(t, featureCommits[3], res[4])
assertEqualHashes(t, featureCommits[2], res[5])
assertEqualHashes(t, featureCommits[1], res[6])
mergeBaseHash, err = merge.MergeBase(context.Background(), mainCommits[6], featureCommits[3])
require.NoError(t, err)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featurePreMergeHash, mainHash}, dEnv.DoltDB, []hash.Hash{mergeBaseHash}, -1)
require.NoError(t, err)
assert.Len(t, res, 4)
assertEqualHashes(t, featureCommits[3], res[0])
assertEqualHashes(t, featureCommits[2], res[1])
assertEqualHashes(t, mainCommits[6], res[2])
assertEqualHashes(t, featureCommits[1], res[3])
// Create a similar branch to "feature" on a forked repository and GetDotDotRevisions using that as well.
forkEnv := mustForkDB(t, dEnv.DoltDB, "feature", featureCommits[4])
@@ -170,9 +197,9 @@ func TestGetDotDotRevisions(t *testing.T) {
mainHash = mustGetHash(t, mainCommits[6])
featurePreMergeHash = mustGetHash(t, featureCommits[3])
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, featureHash, dEnv.DoltDB, mainHash, 100)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featureHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 100)
require.Error(t, err)
res, err = GetDotDotRevisions(context.Background(), forkEnv.DoltDB, featureHash, dEnv.DoltDB, mainHash, 100)
res, err = GetDotDotRevisions(context.Background(), forkEnv.DoltDB, []hash.Hash{featureHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 100)
require.NoError(t, err)
assert.Len(t, res, 7)
assertEqualHashes(t, featureCommits[7], res[0])
@@ -183,27 +210,27 @@ func TestGetDotDotRevisions(t *testing.T) {
assertEqualHashes(t, featureCommits[2], res[5])
assertEqualHashes(t, featureCommits[1], res[6])
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, mainHash, dEnv.DoltDB, featureHash, 100)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{mainHash}, dEnv.DoltDB, []hash.Hash{featureHash}, 100)
require.Error(t, err)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, mainHash, forkEnv.DoltDB, featureHash, 100)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{mainHash}, forkEnv.DoltDB, []hash.Hash{featureHash}, 100)
require.NoError(t, err)
assert.Len(t, res, 0)
res, err = GetDotDotRevisions(context.Background(), forkEnv.DoltDB, featureHash, dEnv.DoltDB, mainHash, 3)
res, err = GetDotDotRevisions(context.Background(), forkEnv.DoltDB, []hash.Hash{featureHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 3)
require.NoError(t, err)
assert.Len(t, res, 3)
assertEqualHashes(t, featureCommits[7], res[0])
assertEqualHashes(t, featureCommits[6], res[1])
assertEqualHashes(t, featureCommits[5], res[2])
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, featurePreMergeHash, dEnv.DoltDB, mainHash, 3)
res, err = GetDotDotRevisions(context.Background(), dEnv.DoltDB, []hash.Hash{featurePreMergeHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 3)
require.NoError(t, err)
assert.Len(t, res, 3)
assertEqualHashes(t, featureCommits[3], res[0])
assertEqualHashes(t, featureCommits[2], res[1])
assertEqualHashes(t, featureCommits[1], res[2])
res, err = GetDotDotRevisions(context.Background(), forkEnv.DoltDB, featurePreMergeHash, dEnv.DoltDB, mainHash, 3)
res, err = GetDotDotRevisions(context.Background(), forkEnv.DoltDB, []hash.Hash{featurePreMergeHash}, dEnv.DoltDB, []hash.Hash{mainHash}, 3)
require.NoError(t, err)
assert.Len(t, res, 3)
assertEqualHashes(t, featureCommits[3], res[0])
+62 -9
View File
@@ -16,9 +16,10 @@ package actions
import (
"context"
"errors"
"fmt"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
@@ -49,6 +50,54 @@ func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, ro
}
}
// mirroring Git behavior, untracked tables are ignored on 'reset --hard',
// save the state of these tables and apply them to |newHead|'s root.
//
// as a special case, if an untracked table has a tag collision with any
// tables in |newHead| we silently drop it from the new working set.
// these tag collision is typically cause by table renames (bug #751).
untracked, err := roots.Working.GetAllSchemas(ctx)
if err != nil {
return nil, doltdb.Roots{}, err
}
// untracked tables exist in |working| but not in |staged|
staged, err := roots.Staged.GetTableNames(ctx)
if err != nil {
return nil, doltdb.Roots{}, err
}
for _, name := range staged {
delete(untracked, name)
}
newWkRoot := roots.Head
ws, err := newWkRoot.GetAllSchemas(ctx)
if err != nil {
return nil, doltdb.Roots{}, err
}
tags := mapColumnTags(ws)
for name, sch := range untracked {
for _, pk := range sch.GetAllCols().GetColumns() {
if _, ok := tags[pk.Tag]; ok {
// |pk.Tag| collides with a schema in |newWkRoot|
delete(untracked, name)
}
}
}
for name := range untracked {
tbl, _, err := roots.Working.GetTable(ctx, name)
if err != nil {
return nil, doltdb.Roots{}, err
}
newWkRoot, err = newWkRoot.PutTable(ctx, name, tbl)
if err != nil {
return nil, doltdb.Roots{}, fmt.Errorf("failed to write table back to database: %s", err)
}
}
// need to save the state of files that aren't tracked
untrackedTables := make(map[string]*doltdb.Table)
wTblNames, err := roots.Working.GetTableNames(ctx)
@@ -75,14 +124,6 @@ func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, ro
delete(untrackedTables, tblName)
}
newWkRoot := roots.Head
for tblName, tbl := range untrackedTables {
newWkRoot, err = newWkRoot.PutTable(ctx, tblName, tbl)
if err != nil {
return nil, doltdb.Roots{}, errors.New("error: failed to write table back to database")
}
}
roots.Working = newWkRoot
roots.Staged = roots.Head
@@ -276,3 +317,15 @@ func CleanUntracked(ctx context.Context, roots doltdb.Roots, tables []string, dr
return roots, nil
}
// mapColumnTags takes a map from table name to schema.Schema and generates
// a map from column tags to table names (see RootValue.GetAllSchemas).
func mapColumnTags(tables map[string]schema.Schema) (m map[uint64]string) {
m = make(map[uint64]string, len(tables))
for tbl, sch := range tables {
for _, tag := range sch.GetAllCols().Tags {
m[tag] = tbl
}
}
return
}
+2 -2
View File
@@ -811,7 +811,7 @@ func (dEnv *DoltEnv) CredsDir() (string, error) {
return getCredsDir(dEnv.hdp)
}
func (dEnv *DoltEnv) UserRPCCreds() (creds.DoltCreds, bool, error) {
func (dEnv *DoltEnv) UserDoltCreds() (creds.DoltCreds, bool, error) {
kid, err := dEnv.Config.GetString(UserCreds)
if err == nil && kid != "" {
@@ -826,7 +826,7 @@ func (dEnv *DoltEnv) UserRPCCreds() (creds.DoltCreds, bool, error) {
return c, c.IsPrivKeyValid() && c.IsPubKeyValid(), err
}
return creds.EmptyCreds, false, nil
return creds.DoltCreds{}, false, nil
}
// GetGRPCDialParams implements dbfactory.GRPCDialProvider
+2 -2
View File
@@ -109,14 +109,14 @@ func (p GRPCDialProvider) getRPCCreds() (credentials.PerRPCCredentials, error) {
return nil, nil
}
dCreds, valid, err := p.dEnv.UserRPCCreds()
dCreds, valid, err := p.dEnv.UserDoltCreds()
if err != nil {
return nil, ErrInvalidCredsFile
}
if !valid {
return nil, nil
}
return dCreds, nil
return dCreds.RPCCreds(), nil
}
// getUserAgentString returns a user agent string to use in GRPC requests.
@@ -132,7 +132,7 @@ func TestMerge(t *testing.T) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(t, dEnv, root, test.query)
actRows, err := sqle.ExecuteSelect(dEnv, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.expected), len(actRows))
@@ -251,7 +251,7 @@ func TestMergeConflicts(t *testing.T) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(t, dEnv, root, test.query)
actRows, err := sqle.ExecuteSelect(dEnv, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.expected), len(actRows))
@@ -119,7 +119,7 @@ func TestMigration(t *testing.T) {
root, err := postEnv.WorkingRoot(ctx)
require.NoError(t, err)
for _, a := range test.asserts {
actual, err := sqle.ExecuteSelect(t, postEnv, root, a.query)
actual, err := sqle.ExecuteSelect(postEnv, root, a.query)
assert.NoError(t, err)
assert.Equal(t, a.expected, actual)
}
@@ -227,7 +227,7 @@ func testFilterBranch(t *testing.T, test filterBranchTest) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(t, dEnv, root, a.query)
actRows, err := sqle.ExecuteSelect(dEnv, root, a.query)
require.NoError(t, err)
require.Equal(t, a.rows, actRows)
@@ -59,6 +59,8 @@ type ServerArgs struct {
ReadOnly bool
Options []grpc.ServerOption
HttpInterceptor func(http.Handler) http.Handler
// If supplied, the listener(s) returned from Listeners() will be TLS
// listeners. The scheme used in the URLs returned from the gRPC server
// will be https.
@@ -94,6 +96,9 @@ func NewServer(args ServerArgs) (*Server, error) {
remotesapi.RegisterChunkStoreServiceServer(s.grpcSrv, chnkSt)
var handler http.Handler = newFileHandler(args.Logger, args.DBCache, args.FS, args.ReadOnly, sealer)
if args.HttpInterceptor != nil {
handler = args.HttpInterceptor(handler)
}
if args.HttpPort == args.GrpcPort {
handler = grpcMultiplexHandler(s.grpcSrv, handler)
} else {
@@ -100,10 +100,10 @@ func getSqlTypes() []sql.Type {
//sql.Blob, //BLOB
sql.Boolean, //BOOLEAN
sql.MustCreateStringWithDefaults(sqltypes.Char, 10), //CHAR(10)
sql.Date, //DATE
sql.Datetime, //DATETIME
sql.MustCreateDecimalType(9, 5), //DECIMAL(9, 5)
sql.Float64, //DOUBLE
sql.Date, //DATE
sql.Datetime, //DATETIME
sql.MustCreateColumnDecimalType(9, 5), //DECIMAL(9, 5)
sql.Float64, //DOUBLE
sql.MustCreateEnumType([]string{"a", "b", "c"}, sql.Collation_Default), //ENUM('a','b','c')
sql.Float32, //FLOAT
sql.Int32, //INT
@@ -59,7 +59,7 @@ func CreateDecimalTypeFromParams(params map[string]string) (TypeInfo, error) {
} else {
return nil, fmt.Errorf(`create decimal type info is missing param "%v"`, decimalTypeParam_Scale)
}
sqlDecimalType, err := sql.CreateDecimalType(precision, scale)
sqlDecimalType, err := sql.CreateColumnDecimalType(precision, scale)
if err != nil {
return nil, err
}
@@ -337,7 +337,7 @@ func TestDecimalMarshal(t *testing.T) {
"16976349273982359874209023948672021737840592720387475.271912873754", false},
{65, 12, "99999999999999999999999999999999999999999999999999999.9999999999999", "", true},
{20, 10, []byte{32}, "", true},
{20, 10, []byte{32}, "32", false},
{20, 10, time.Date(2019, 12, 12, 12, 12, 12, 0, time.UTC), "", true},
}
+39 -13
View File
@@ -87,7 +87,7 @@ func TestRenameTable(t *testing.T) {
require.NoError(t, err)
// setup tests
root, err = ExecuteSql(t, dEnv, root, setup)
root, err = ExecuteSql(dEnv, root, setup)
require.NoError(t, err)
schemas, err := root.GetAllSchemas(ctx)
@@ -121,6 +121,8 @@ func TestRenameTable(t *testing.T) {
const tableName = "people"
func TestAddColumnToTable(t *testing.T) {
origRows, sch, err := dtestutils.RowsAndSchema()
require.NoError(t, err)
tests := []struct {
name string
@@ -140,9 +142,9 @@ func TestAddColumnToTable(t *testing.T) {
newColName: "newCol",
colKind: types.IntKind,
nullable: Null,
expectedSchema: dtestutils.AddColumnToSchema(dtestutils.TypedSchema,
expectedSchema: dtestutils.AddColumnToSchema(sch,
schema.NewColumn("newCol", dtestutils.NextTag, types.IntKind, false)),
expectedRows: dtestutils.TypedRows,
expectedRows: origRows,
},
{
name: "nullable with nil default",
@@ -150,9 +152,9 @@ func TestAddColumnToTable(t *testing.T) {
newColName: "newCol",
colKind: types.IntKind,
nullable: Null,
expectedSchema: dtestutils.AddColumnToSchema(dtestutils.TypedSchema,
expectedSchema: dtestutils.AddColumnToSchema(sch,
schemaNewColumnWithDefault("newCol", dtestutils.NextTag, types.IntKind, false, "")),
expectedRows: dtestutils.TypedRows,
expectedRows: origRows,
},
{
name: "nullable with non-nil default",
@@ -161,9 +163,9 @@ func TestAddColumnToTable(t *testing.T) {
colKind: types.IntKind,
nullable: Null,
defaultVal: mustStringToColumnDefault("42"),
expectedSchema: dtestutils.AddColumnToSchema(dtestutils.TypedSchema,
expectedSchema: dtestutils.AddColumnToSchema(sch,
schemaNewColumnWithDefault("newCol", dtestutils.NextTag, types.IntKind, false, "42")),
expectedRows: dtestutils.AddColToRows(t, dtestutils.TypedRows, dtestutils.NextTag, types.NullValue),
expectedRows: addColToRows(t, origRows, dtestutils.NextTag, types.NullValue),
},
{
name: "first order",
@@ -181,7 +183,7 @@ func TestAddColumnToTable(t *testing.T) {
schema.NewColumn("is_married", dtestutils.IsMarriedTag, types.IntKind, false, schema.NotNullConstraint{}),
schema.NewColumn("title", dtestutils.TitleTag, types.StringKind, false),
),
expectedRows: dtestutils.AddColToRows(t, dtestutils.TypedRows, dtestutils.NextTag, types.NullValue),
expectedRows: addColToRows(t, origRows, dtestutils.NextTag, types.NullValue),
},
{
name: "middle order",
@@ -199,7 +201,7 @@ func TestAddColumnToTable(t *testing.T) {
schema.NewColumn("is_married", dtestutils.IsMarriedTag, types.IntKind, false, schema.NotNullConstraint{}),
schema.NewColumn("title", dtestutils.TitleTag, types.StringKind, false),
),
expectedRows: dtestutils.AddColToRows(t, dtestutils.TypedRows, dtestutils.NextTag, types.NullValue),
expectedRows: addColToRows(t, origRows, dtestutils.NextTag, types.NullValue),
},
{
name: "tag collision",
@@ -255,19 +257,24 @@ func TestAddColumnToTable(t *testing.T) {
}
func makePeopleTable(ctx context.Context, dEnv *env.DoltEnv) (*env.DoltEnv, error) {
_, sch, err := dtestutils.RowsAndSchema()
if err != nil {
return nil, err
}
root, err := dEnv.WorkingRoot(ctx)
if err != nil {
return nil, err
}
rows, err := durable.NewEmptyIndex(ctx, root.VRW(), root.NodeStore(), dtestutils.TypedSchema)
rows, err := durable.NewEmptyIndex(ctx, root.VRW(), root.NodeStore(), sch)
if err != nil {
return nil, err
}
indexes, err := durable.NewIndexSetWithEmptyIndexes(ctx, root.VRW(), root.NodeStore(), dtestutils.TypedSchema)
indexes, err := durable.NewIndexSetWithEmptyIndexes(ctx, root.VRW(), root.NodeStore(), sch)
if err != nil {
return nil, err
}
tbl, err := doltdb.NewTable(ctx, root.VRW(), root.NodeStore(), dtestutils.TypedSchema, rows, indexes, nil)
tbl, err := doltdb.NewTable(ctx, root.VRW(), root.NodeStore(), sch, rows, indexes, nil)
if err != nil {
return nil, err
}
@@ -434,7 +441,7 @@ func TestDropPks(t *testing.T) {
require.NoError(t, err)
root, _ := dEnv.WorkingRoot(ctx)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := NewTestEngine(dEnv, ctx, db, root)
require.NoError(t, err)
for _, query := range tt.setup {
@@ -765,3 +772,22 @@ func TestModifyColumn(t *testing.T) {
})
}
}
// addColToRows adds a column to all the rows given and returns it. This method relies on the fact that
// noms_row.SetColVal doesn't need a full schema, just one that includes the column being set.
func addColToRows(t *testing.T, rs []row.Row, tag uint64, val types.Value) []row.Row {
if types.IsNull(val) {
return rs
}
colColl := schema.NewColCollection(schema.NewColumn("unused", tag, val.Kind(), false))
fakeSch := schema.UnkeyedSchemaFromCols(colColl)
newRows := make([]row.Row, len(rs))
var err error
for i, r := range rs {
newRows[i], err = r.SetColVal(tag, val, fakeSch)
require.NoError(t, err)
}
return newRows
}
@@ -122,7 +122,7 @@ func executeSelect(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *d
db, err := sqle.NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := sqle.NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := sqle.NewTestEngine(dEnv, ctx, db, root)
if err != nil {
return nil, err
}
@@ -155,7 +155,7 @@ func executeModify(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *d
db, err := sqle.NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := sqle.NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := sqle.NewTestEngine(dEnv, ctx, db, root)
if err != nil {
return nil, err
}
@@ -16,6 +16,8 @@ package cluster
import (
"context"
"crypto/ed25519"
"crypto/rand"
"errors"
"fmt"
"strconv"
@@ -25,7 +27,9 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/dolthub/dolt/go/libraries/doltcore/creds"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
@@ -55,6 +59,7 @@ type Controller struct {
sinterceptor serverinterceptor
cinterceptor clientinterceptor
lgr *logrus.Logger
grpcCreds credentials.PerRPCCredentials
provider dbProvider
iterSessions IterSessions
@@ -193,7 +198,7 @@ func (c *Controller) applyCommitHooks(ctx context.Context, name string, bt *sql.
}
func (c *Controller) gRPCDialProvider(denv *env.DoltEnv) dbfactory.GRPCDialProvider {
return grpcDialProvider{env.NewGRPCDialProviderFromDoltEnv(denv), &c.cinterceptor, c.cfg}
return grpcDialProvider{env.NewGRPCDialProviderFromDoltEnv(denv), &c.cinterceptor, c.cfg, c.grpcCreds}
}
func (c *Controller) RegisterStoredProcedures(store procedurestore) {
@@ -406,6 +411,25 @@ func (c *Controller) RemoteSrvServerArgs(ctx *sql.Context, args remotesrv.Server
args.Options = c.ServerOptions()
args = sqle.RemoteSrvServerArgs(ctx, args)
args.DBCache = remotesrvStoreCache{args.DBCache, c}
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
panic(err)
}
keyID := creds.PubKeyToKID(pub)
keyIDStr := creds.B32CredsEncoding.EncodeToString(keyID)
args.HttpInterceptor = JWKSHandlerInterceptor(keyIDStr, pub)
c.grpcCreds = &creds.RPCCreds{
PrivKey: priv,
Audience: creds.RemotesAPIAudience,
Issuer: creds.ClientIssuer,
KeyID: keyIDStr,
RequireTLS: false,
}
return args
}
@@ -23,6 +23,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/grpcendpoint"
@@ -34,9 +35,10 @@ import (
// - client interceptors for transmitting our replication role.
// - do not use environment credentials. (for now).
type grpcDialProvider struct {
orig dbfactory.GRPCDialProvider
ci *clientinterceptor
cfg Config
orig dbfactory.GRPCDialProvider
ci *clientinterceptor
cfg Config
creds credentials.PerRPCCredentials
}
func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfactory.GRPCRemoteConfig, error) {
@@ -45,6 +47,7 @@ func (p grpcDialProvider) GetGRPCDialParams(config grpcendpoint.Config) (dbfacto
return dbfactory.GRPCRemoteConfig{}, err
}
config.TLSConfig = tlsConfig
config.Creds = p.creds
config.WithEnvCreds = false
cfg, err := p.orig.GetGRPCDialParams(config)
if err != nil {
@@ -0,0 +1,57 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster
import (
"crypto/ed25519"
"encoding/json"
"net/http"
"gopkg.in/square/go-jose.v2"
)
type JWKSHandler struct {
KeyID string
PublicKey ed25519.PublicKey
}
func (h JWKSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
b, err := json.Marshal(jose.JSONWebKeySet{
Keys: []jose.JSONWebKey{
jose.JSONWebKey{
Key: h.PublicKey,
KeyID: h.KeyID,
},
},
})
if err != nil {
http.Error(w, "error marshaling json", http.StatusInternalServerError)
return
}
w.Write(b)
}
func JWKSHandlerInterceptor(keyID string, pub ed25519.PublicKey) func(http.Handler) http.Handler {
jh := JWKSHandler{KeyID: keyID, PublicKey: pub}
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.EscapedPath() == "/.well-known/jwks.json" {
jh.ServeHTTP(w, r)
return
}
h.ServeHTTP(w, r)
})
}
}
+4 -18
View File
@@ -31,7 +31,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/types"
)
// SetupFunc can be run to perform additional setup work before a test case
@@ -46,7 +45,7 @@ func executeSelect(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *d
db, err := NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := NewTestEngine(dEnv, ctx, db, root)
if err != nil {
return nil, nil, err
}
@@ -77,7 +76,7 @@ func executeModify(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *d
db, err := NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := NewTestEngine(dEnv, ctx, db, root)
if err != nil {
return nil, err
@@ -130,19 +129,6 @@ func equalSchemas(t *testing.T, expectedSch schema.Schema, sch schema.Schema) {
}
}
// TODO: this shouldn't be here
func CreateWorkingRootUpdate() map[string]TableUpdate {
return map[string]TableUpdate{
TableWithHistoryName: {
RowUpdates: []row.Row{
mustRow(row.New(types.Format_Default, ReaddAgeAt5HistSch, row.TaggedValues{
0: types.Int(6), 1: types.String("Katie"), 2: types.String("McCulloch"),
})),
},
},
}
}
// Returns the dolt schema given as a sql.Schema, or panics.
func mustSqlSchema(sch schema.Schema) sql.Schema {
sqlSchema, err := sqlutil.FromDoltSchema("", sch)
@@ -203,7 +189,7 @@ func CreateTestTable(t *testing.T, dEnv *env.DoltEnv, tableName string, sch sche
require.NoError(t, err)
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, queries)
root, err = ExecuteSql(dEnv, root, queries)
require.NoError(t, err)
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
@@ -213,7 +199,7 @@ func ExecuteSetupSQL(ctx context.Context, queries string) SetupFn {
return func(t *testing.T, dEnv *env.DoltEnv) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, queries)
root, err = ExecuteSql(dEnv, root, queries)
require.NoError(t, err)
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
+75 -2
View File
@@ -37,6 +37,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/globalstate"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/hash"
)
var ErrInvalidTableName = errors.NewKind("Invalid table name %s. Table names must match the regular expression " + doltdb.TableNameRegexStr)
@@ -98,6 +99,7 @@ var _ sql.CollatedDatabase = Database{}
var _ sql.Database = Database{}
var _ sql.StoredProcedureDatabase = Database{}
var _ sql.TableCreator = Database{}
var _ sql.IndexedTableCreator = Database{}
var _ sql.TableDropper = Database{}
var _ sql.TableRenamer = Database{}
var _ sql.TemporaryTableCreator = Database{}
@@ -511,12 +513,12 @@ func resolveAsOfTime(ctx *sql.Context, ddb *doltdb.DoltDB, head ref.DoltRef, asO
return nil, nil, err
}
hash, err := cm.HashOf()
h, err := cm.HashOf()
if err != nil {
return nil, nil, err
}
cmItr, err := commitwalk.GetTopologicalOrderIterator(ctx, ddb, hash, nil)
cmItr, err := commitwalk.GetTopologicalOrderIterator(ctx, ddb, []hash.Hash{h}, nil)
if err != nil {
return nil, nil, err
}
@@ -863,6 +865,27 @@ func (db Database) CreateTable(ctx *sql.Context, tableName string, sch sql.Prima
return db.createSqlTable(ctx, tableName, sch, collation)
}
// CreateIndexedTable creates a table with the name and schema given.
func (db Database) CreateIndexedTable(ctx *sql.Context, tableName string, sch sql.PrimaryKeySchema, idxDef sql.IndexDef, collation sql.CollationID) error {
if err := branch_control.CheckAccess(ctx, branch_control.Permissions_Write); err != nil {
return err
}
if strings.ToLower(tableName) == doltdb.DocTableName {
// validate correct schema
if !dtables.DoltDocsSqlSchema.Equals(sch.Schema) {
return fmt.Errorf("incorrect schema for dolt_docs table")
}
} else if doltdb.HasDoltPrefix(tableName) {
return ErrReservedTableName.New(tableName)
}
if !doltdb.IsValidTableName(tableName) {
return ErrInvalidTableName.New(tableName)
}
return db.createIndexedSqlTable(ctx, tableName, sch, idxDef, collation)
}
// Unlike the exported version CreateTable, createSqlTable doesn't enforce any table name checks.
func (db Database) createSqlTable(ctx *sql.Context, tableName string, sch sql.PrimaryKeySchema, collation sql.CollationID) error {
ws, err := db.GetWorkingSet(ctx)
@@ -892,6 +915,56 @@ func (db Database) createSqlTable(ctx *sql.Context, tableName string, sch sql.Pr
return schema.ErrUsingSpatialKey.New(tableName)
}
// Prevent any tables that use BINARY, CHAR, VARBINARY, VARCHAR prefixes
if schema.HasAutoIncrement(doltSch) {
ait, err := db.gs.GetAutoIncrementTracker(ctx)
if err != nil {
return err
}
ait.AddNewTable(tableName)
}
return db.createDoltTable(ctx, tableName, root, doltSch)
}
// Unlike the exported version CreateTable, createSqlTable doesn't enforce any table name checks.
func (db Database) createIndexedSqlTable(ctx *sql.Context, tableName string, sch sql.PrimaryKeySchema, idxDef sql.IndexDef, collation sql.CollationID) error {
ws, err := db.GetWorkingSet(ctx)
if err != nil {
return err
}
root := ws.WorkingRoot()
if exists, err := root.HasTable(ctx, tableName); err != nil {
return err
} else if exists {
return sql.ErrTableAlreadyExists.New(tableName)
}
headRoot, err := db.GetHeadRoot(ctx)
if err != nil {
return err
}
doltSch, err := sqlutil.ToDoltSchema(ctx, root, tableName, sch, headRoot, collation)
if err != nil {
return err
}
// Prevent any tables that use Spatial Types as Primary Key from being created
if schema.IsUsingSpatialColAsKey(doltSch) {
return schema.ErrUsingSpatialKey.New(tableName)
}
// Prevent any tables that use BINARY, CHAR, VARBINARY, VARCHAR prefixes in Primary Key
for _, idxCol := range idxDef.Columns {
col := sch.Schema[sch.Schema.IndexOfColName(idxCol.Name)]
if col.PrimaryKey && sql.IsText(col.Type) && idxCol.Length > 0 {
return sql.ErrUnsupportedIndexPrefix.New(col.Name)
}
}
if schema.HasAutoIncrement(doltSch) {
ait, err := db.gs.GetAutoIncrementTracker(ctx)
if err != nil {
@@ -92,9 +92,23 @@ func DoDoltCommit(ctx *sql.Context, args []string) (string, error) {
email = dSess.Email()
}
amend := apr.Contains(cli.AmendFlag)
msg, msgOk := apr.GetValue(cli.MessageArg)
if !msgOk {
return "", fmt.Errorf("Must provide commit message.")
if amend {
commit, err := dSess.GetHeadCommit(ctx, dbName)
if err != nil {
return "", err
}
commitMeta, err := commit.GetCommitMeta(ctx)
if err != nil {
return "", err
}
msg = commitMeta.Description
} else {
return "", fmt.Errorf("Must provide commit message.")
}
}
t := ctx.QueryTime()
@@ -111,6 +125,7 @@ func DoDoltCommit(ctx *sql.Context, args []string) (string, error) {
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag),
Amend: amend,
Force: apr.Contains(cli.ForceFlag),
Name: name,
Email: email,
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions/commitwalk"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -303,7 +304,7 @@ func (ltf *LogTableFunction) validateRevisionExpressions() error {
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "second revision must exist if first revision contains '^'")
}
if strings.Contains(revisionStr, "..") && strings.HasPrefix(revisionStr, "^") {
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "revision cannot contain both '..' and '^'")
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "revision cannot contain both '..' or '...' and '^'")
}
}
@@ -313,10 +314,10 @@ func (ltf *LogTableFunction) validateRevisionExpressions() error {
return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), ltf.secondRevisionExpr.String())
}
if strings.Contains(secondRevisionStr, "..") {
return ltf.invalidArgDetailsErr(ltf.secondRevisionExpr, "second revision cannot contain '..'")
return ltf.invalidArgDetailsErr(ltf.secondRevisionExpr, "second revision cannot contain '..' or '...'")
}
if strings.Contains(revisionStr, "..") {
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "revision cannot contain '..' if second revision exists")
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "revision cannot contain '..' or '...' if second revision exists")
}
}
@@ -334,7 +335,7 @@ func (ltf *LogTableFunction) validateRevisionExpressions() error {
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "must have revision in order to use --not")
}
if ltf.revisionExpr != nil && (strings.Contains(revisionStr, "..") || strings.HasPrefix(revisionStr, "^")) {
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "cannot use --not if '..' or '^' present in revision")
return ltf.invalidArgDetailsErr(ltf.revisionExpr, "cannot use --not if dots or '^' present in revision")
}
if ltf.secondRevisionExpr != nil && strings.HasPrefix(secondRevisionStr, "^") {
return ltf.invalidArgDetailsErr(ltf.secondRevisionExpr, "cannot use --not if '^' present in second revision")
@@ -352,7 +353,7 @@ func (ltf *LogTableFunction) validateRevisionExpressions() error {
// RowIter implements the sql.Node interface
func (ltf *LogTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.RowIter, error) {
revisionVal, excludingRevisionVal, err := ltf.evaluateArguments()
revisionVal, secondRevisionVal, threeDot, err := ltf.evaluateArguments()
if err != nil {
return nil, err
}
@@ -392,18 +393,40 @@ func (ltf *LogTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.RowIter
return nil, err
}
// Two dot log
if len(excludingRevisionVal) > 0 {
exCs, err := doltdb.NewCommitSpec(excludingRevisionVal)
// Two and three dot log
if len(secondRevisionVal) > 0 {
secondCs, err := doltdb.NewCommitSpec(secondRevisionVal)
if err != nil {
return nil, err
}
excludingCommit, err := sqledb.ddb.Resolve(ctx, exCs, nil)
secondCommit, err := sqledb.ddb.Resolve(ctx, secondCs, nil)
if err != nil {
return nil, err
}
return ltf.NewDotDotLogTableFunctionRowIter(ctx, sqledb.ddb, commit, excludingCommit, matchFunc, cHashToRefs)
if threeDot {
mergeBase, err := merge.MergeBase(ctx, commit, secondCommit)
if err != nil {
return nil, err
}
mergeCs, err := doltdb.NewCommitSpec(mergeBase.String())
if err != nil {
return nil, err
}
// Use merge base as excluding commit
mergeCommit, err := sqledb.ddb.Resolve(ctx, mergeCs, nil)
if err != nil {
return nil, err
}
return ltf.NewDotDotLogTableFunctionRowIter(ctx, sqledb.ddb, []*doltdb.Commit{commit, secondCommit}, mergeCommit, matchFunc, cHashToRefs)
}
return ltf.NewDotDotLogTableFunctionRowIter(ctx, sqledb.ddb, []*doltdb.Commit{commit}, secondCommit, matchFunc, cHashToRefs)
}
return ltf.NewLogTableFunctionRowIter(ctx, sqledb.ddb, commit, matchFunc, cHashToRefs)
@@ -455,39 +478,40 @@ func getCommitHashToRefs(ctx *sql.Context, ddb *doltdb.DoltDB, decoration string
return cHashToRefs, nil
}
// evaluateArguments returns revisionValStr and excludingRevisionValStr.
// evaluateArguments returns revisionValStr, secondRevisionValStr, and three dot boolean.
// It evaluates the argument expressions to turn them into values this LogTableFunction
// can use. Note that this method only evals the expressions, and doesn't validate the values.
func (ltf *LogTableFunction) evaluateArguments() (string, string, error) {
func (ltf *LogTableFunction) evaluateArguments() (string, string, bool, error) {
var revisionValStr string
var excludingRevisionValStr string
var secondRevisionValStr string
var err error
threeDot := false
if ltf.revisionExpr != nil {
revisionValStr, excludingRevisionValStr, err = getRevisionsFromExpr(ltf.ctx, ltf.revisionExpr, true)
revisionValStr, secondRevisionValStr, threeDot, err = getRevisionsFromExpr(ltf.ctx, ltf.revisionExpr, true)
if err != nil {
return "", "", err
return "", "", false, err
}
}
if ltf.secondRevisionExpr != nil {
rvs, ervs, err := getRevisionsFromExpr(ltf.ctx, ltf.secondRevisionExpr, false)
rvs, srvs, _, err := getRevisionsFromExpr(ltf.ctx, ltf.secondRevisionExpr, false)
if err != nil {
return "", "", err
return "", "", false, err
}
if len(rvs) > 0 {
revisionValStr = rvs
}
if len(ervs) > 0 {
excludingRevisionValStr = ervs
if len(srvs) > 0 {
secondRevisionValStr = srvs
}
}
if len(ltf.notRevision) > 0 {
excludingRevisionValStr = ltf.notRevision
secondRevisionValStr = ltf.notRevision
}
return revisionValStr, excludingRevisionValStr, nil
return revisionValStr, secondRevisionValStr, threeDot, nil
}
func mustExpressionToString(ctx *sql.Context, expr sql.Expression) string {
@@ -512,23 +536,28 @@ func expressionToString(ctx *sql.Context, expr sql.Expression) (string, error) {
return valStr, nil
}
// Gets revisionName and/or excludingRevisionName from sql expression
func getRevisionsFromExpr(ctx *sql.Context, expr sql.Expression, canDot bool) (string, string, error) {
// getRevisionsFromExpr returns the revisionName and/or secondRevisionName, as
// well as a threeDot boolean from sql expression
func getRevisionsFromExpr(ctx *sql.Context, expr sql.Expression, canDot bool) (string, string, bool, error) {
revisionValStr, err := expressionToString(ctx, expr)
if err != nil {
return "", "", err
return "", "", false, err
}
if canDot && strings.Contains(revisionValStr, "..") {
if strings.Contains(revisionValStr, "...") {
refs := strings.Split(revisionValStr, "...")
return refs[0], refs[1], true, nil
}
refs := strings.Split(revisionValStr, "..")
return refs[1], refs[0], nil
return refs[1], refs[0], false, nil
}
if strings.HasPrefix(revisionValStr, "^") {
return "", strings.TrimPrefix(revisionValStr, "^"), nil
return "", strings.TrimPrefix(revisionValStr, "^"), false, nil
}
return revisionValStr, "", nil
return revisionValStr, "", false, nil
}
//------------------------------------
@@ -547,12 +576,12 @@ type logTableFunctionRowIter struct {
}
func (ltf *LogTableFunction) NewLogTableFunctionRowIter(ctx *sql.Context, ddb *doltdb.DoltDB, commit *doltdb.Commit, matchFn func(*doltdb.Commit) (bool, error), cHashToRefs map[hash.Hash][]string) (*logTableFunctionRowIter, error) {
hash, err := commit.HashOf()
h, err := commit.HashOf()
if err != nil {
return nil, err
}
child, err := commitwalk.GetTopologicalOrderIterator(ctx, ddb, hash, matchFn)
child, err := commitwalk.GetTopologicalOrderIterator(ctx, ddb, []hash.Hash{h}, matchFn)
if err != nil {
return nil, err
}
@@ -562,14 +591,19 @@ func (ltf *LogTableFunction) NewLogTableFunctionRowIter(ctx *sql.Context, ddb *d
showParents: ltf.showParents,
decoration: ltf.decoration,
cHashToRefs: cHashToRefs,
headHash: hash,
headHash: h,
}, nil
}
func (ltf *LogTableFunction) NewDotDotLogTableFunctionRowIter(ctx *sql.Context, ddb *doltdb.DoltDB, commit, excludingCommit *doltdb.Commit, matchFn func(*doltdb.Commit) (bool, error), cHashToRefs map[hash.Hash][]string) (*logTableFunctionRowIter, error) {
hash, err := commit.HashOf()
if err != nil {
return nil, err
func (ltf *LogTableFunction) NewDotDotLogTableFunctionRowIter(ctx *sql.Context, ddb *doltdb.DoltDB, commits []*doltdb.Commit, excludingCommit *doltdb.Commit, matchFn func(*doltdb.Commit) (bool, error), cHashToRefs map[hash.Hash][]string) (*logTableFunctionRowIter, error) {
hashes := make([]hash.Hash, len(commits))
for i, commit := range commits {
h, err := commit.HashOf()
if err != nil {
return nil, err
}
hashes[i] = h
}
exHash, err := excludingCommit.HashOf()
@@ -577,17 +611,23 @@ func (ltf *LogTableFunction) NewDotDotLogTableFunctionRowIter(ctx *sql.Context,
return nil, err
}
child, err := commitwalk.GetDotDotRevisionsIterator(ctx, ddb, hash, exHash, matchFn)
child, err := commitwalk.GetDotDotRevisionsIterator(ctx, ddb, hashes, ddb, []hash.Hash{exHash}, matchFn)
if err != nil {
return nil, err
}
var headHash hash.Hash
if len(hashes) == 1 {
headHash = hashes[0]
}
return &logTableFunctionRowIter{
child: child,
showParents: ltf.showParents,
decoration: ltf.decoration,
cHashToRefs: cHashToRefs,
headHash: hash,
headHash: headHash,
}, nil
}
+27 -2
View File
@@ -468,14 +468,39 @@ func (d *DoltSession) NewPendingCommit(ctx *sql.Context, dbName string, roots do
return nil, err
}
headCommit := sessionState.headCommit
headHash, _ := headCommit.HashOf()
var mergeParentCommits []*doltdb.Commit
if sessionState.WorkingSet.MergeActive() {
mergeParentCommits = []*doltdb.Commit{sessionState.WorkingSet.MergeState().Commit()}
} else if props.Amend {
numParentsHeadForAmend := headCommit.NumParents()
for i := 0; i < numParentsHeadForAmend; i++ {
parentCommit, err := headCommit.GetParent(ctx, i)
if err != nil {
return nil, err
}
mergeParentCommits = append(mergeParentCommits, parentCommit)
}
err = actions.ResetSoftToRef(ctx, sessionState.dbData, "HEAD~1")
if err != nil {
return nil, err
}
}
pendingCommit, err := actions.GetCommitStaged(ctx, roots, sessionState.WorkingSet.MergeActive(), mergeParentCommits, sessionState.dbData.Ddb, props)
if _, ok := err.(actions.NothingStaged); err != nil && !ok {
return nil, err
if err != nil {
if props.Amend {
err = actions.ResetSoftToRef(ctx, sessionState.dbData, headHash.String())
if err != nil {
return nil, err
}
}
if _, ok := err.(actions.NothingStaged); err != nil && !ok {
return nil, err
}
}
return pendingCommit, nil
@@ -18,6 +18,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions/commitwalk"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
@@ -81,12 +82,12 @@ type LogItr struct {
// NewLogItr creates a LogItr from the current environment.
func NewLogItr(ctx *sql.Context, ddb *doltdb.DoltDB, head *doltdb.Commit) (*LogItr, error) {
hash, err := head.HashOf()
h, err := head.HashOf()
if err != nil {
return nil, err
}
child, err := commitwalk.GetTopologicalOrderIterator(ctx, ddb, hash, nil)
child, err := commitwalk.GetTopologicalOrderIterator(ctx, ddb, []hash.Hash{h}, nil)
if err != nil {
return nil, err
}
@@ -57,7 +57,7 @@ func TestInsertIntoQueryCatalogTable(t *testing.T) {
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
rows, err := sqle.ExecuteSelect(t, dEnv, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName)
rows, err := sqle.ExecuteSelect(dEnv, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName)
require.NoError(t, err)
expectedRows := []sql.Row{
{uint64(1), "select 1 from dual", "name", "description"},
@@ -80,7 +80,7 @@ func TestInsertIntoQueryCatalogTable(t *testing.T) {
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
rows, err = sqle.ExecuteSelect(t, dEnv, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName+" order by display_order")
rows, err = sqle.ExecuteSelect(dEnv, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName+" order by display_order")
require.NoError(t, err)
expectedRows = []sql.Row{
{uint64(1), "select 1 from dual", "name", "description"},
@@ -89,7 +89,7 @@ func TestInsertIntoQueryCatalogTable(t *testing.T) {
assert.Equal(t, expectedRows, rows)
rows, err = sqle.ExecuteSelect(t, dEnv, root, "select id from "+doltdb.DoltQueryCatalogTableName)
rows, err = sqle.ExecuteSelect(dEnv, root, "select id from "+doltdb.DoltQueryCatalogTableName)
require.NoError(t, err)
for _, r := range rows {
assert.NotEmpty(t, r)
@@ -46,7 +46,7 @@ var skipPrepared bool
// SkipPreparedsCount is used by the "ci-check-repo CI workflow
// as a reminder to consider prepareds when adding a new
// enginetest suite.
const SkipPreparedsCount = 82
const SkipPreparedsCount = 83
const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS"
@@ -505,6 +505,14 @@ func TestBlobs(t *testing.T) {
enginetest.TestBlobs(t, newDoltHarness(t))
}
func TestIndexPrefix(t *testing.T) {
harness := newDoltHarness(t)
enginetest.TestIndexPrefix(t, harness)
for _, script := range DoltIndexPrefixScripts {
enginetest.TestScript(t, harness, script)
}
}
func TestBigBlobs(t *testing.T) {
skipOldFormat(t)
@@ -683,13 +691,19 @@ func TestStoredProcedures(t *testing.T) {
}
func TestLargeJsonObjects(t *testing.T) {
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
harness := newDoltHarness(t)
for _, script := range LargeJsonObjectScriptTests {
enginetest.TestScript(t, harness, script)
}
}
func SkipByDefaultInCI(t *testing.T) {
if os.Getenv("CI") != "" && os.Getenv("DOLT_TEST_RUN_NON_RACE_TESTS") == "" {
t.Skip()
}
}
func TestTransactions(t *testing.T) {
for _, script := range queries.TransactionTests {
enginetest.TestTransactionScript(t, newDoltHarness(t), script)
@@ -5198,14 +5198,26 @@ var LogTableFunctionScriptTests = []queries.ScriptTest{
Query: "SELECT * from dolt_log('^main..branch1');",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_log('^main...branch1');",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_log(@Commit1, 'main..branch1');",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_log(@Commit1, 'main...branch1');",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_log('main..branch1', '--not', @Commit1);",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_log('main...branch1', '--not', @Commit1);",
ExpectedErr: sql.ErrInvalidArgumentDetails,
},
{
Query: "SELECT * from dolt_log('^main', '--not', @Commit1);",
ExpectedErr: sql.ErrInvalidArgumentDetails,
@@ -5366,30 +5378,39 @@ var LogTableFunctionScriptTests = []queries.ScriptTest{
"insert into t values (5, 'five', 'six');",
"set @Commit5 = dolt_commit('-am', 'inserting into t 5');",
},
/* Commit graph:
3 - 4 (new-branch)
/
0 - 1 - 2 - 5 (main)
*/
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT count(*) from dolt_log('^main', 'new-branch');",
Expected: []sql.Row{{2}},
Expected: []sql.Row{{2}}, // 4, 3
},
{
Query: "SELECT count(*) from dolt_log('main..new-branch');",
Expected: []sql.Row{{2}},
Expected: []sql.Row{{2}}, // 4, 3
},
{
Query: "SELECT count(*) from dolt_log('main...new-branch');",
Expected: []sql.Row{{3}}, // 5, 4, 3
},
{
Query: "SELECT count(*) from dolt_log('new-branch', '--not', 'main');",
Expected: []sql.Row{{2}},
Expected: []sql.Row{{2}}, // 4, 3
},
{
Query: "SELECT count(*) from dolt_log('new-branch', '^main');",
Expected: []sql.Row{{2}},
Expected: []sql.Row{{2}}, // 4, 3
},
{
Query: "SELECT count(*) from dolt_log('^new-branch', 'main');",
Expected: []sql.Row{{1}},
Expected: []sql.Row{{1}}, // 5
},
{
Query: "SELECT count(*) from dolt_log('main', '--not', 'new-branch');",
Expected: []sql.Row{{1}},
Expected: []sql.Row{{1}}, // 5
},
{
Query: "SELECT count(*) from dolt_log('^main', 'main');",
@@ -5399,6 +5420,10 @@ var LogTableFunctionScriptTests = []queries.ScriptTest{
Query: "SELECT count(*) from dolt_log('main..main');",
Expected: []sql.Row{{0}},
},
{
Query: "SELECT count(*) from dolt_log('main...main');",
Expected: []sql.Row{{0}},
},
{
Query: "SELECT count(*) from dolt_log('main', '--not', 'main');",
Expected: []sql.Row{{0}},
@@ -5409,7 +5434,7 @@ var LogTableFunctionScriptTests = []queries.ScriptTest{
},
{
Query: "SELECT count(*) from dolt_log('^main^', 'main');",
Expected: []sql.Row{{1}},
Expected: []sql.Row{{1}}, // 5
},
{
Query: "SELECT count(*) from dolt_log('^main', 'main^');",
@@ -5421,15 +5446,15 @@ var LogTableFunctionScriptTests = []queries.ScriptTest{
},
{
Query: "SELECT count(*) from dolt_log('^new-branch', @Commit5);",
Expected: []sql.Row{{1}},
Expected: []sql.Row{{1}}, // 5
},
{
Query: "SELECT count(*) from dolt_log(@Commit3, '--not', @Commit2);",
Expected: []sql.Row{{1}},
Expected: []sql.Row{{1}}, // 3
},
{
Query: "SELECT count(*) from dolt_log(@Commit4, '--not', @Commit2);",
Expected: []sql.Row{{2}},
Expected: []sql.Row{{2}}, // 4, 3
},
},
},
@@ -5492,6 +5517,11 @@ var LogTableFunctionScriptTests = []queries.ScriptTest{
"insert into t values (5, 'five', 'six');",
"set @Commit5 = dolt_commit('-am', 'inserting into t 5');",
},
/* Commit graph:
3 - 4 (new-branch)
/
0 - 1 - 2 - 5 (main)
*/
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT commit_hash = @Commit4, commit_hash = @Commit3, committer, email, message from dolt_log('^main', 'new-branch');",
@@ -5507,6 +5537,14 @@ var LogTableFunctionScriptTests = []queries.ScriptTest{
{false, true, "John Doe", "johndoe@example.com", "inserting into t 3"},
},
},
{
Query: "SELECT commit_hash = @Commit5, commit_hash = @Commit4, commit_hash = @Commit3, committer, email, message from dolt_log('main...new-branch');",
Expected: []sql.Row{
{true, false, false, "billy bob", "bigbillieb@fake.horse", "inserting into t 5"},
{false, true, false, "John Doe", "johndoe@example.com", "inserting into t 4"},
{false, false, true, "John Doe", "johndoe@example.com", "inserting into t 3"},
},
},
{
Query: "SELECT commit_hash = @Commit4, commit_hash = @Commit3, committer, email, message from dolt_log('new-branch', '--not', 'main');",
Expected: []sql.Row{
@@ -7703,9 +7741,24 @@ var DoltCommitTests = []queries.ScriptTest{
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "CALL DOLT_COMMIT('-ALL', '-m', 'update table t');",
Query: "CALL DOLT_COMMIT('-ALL', '-m', 'update table terminator');",
SkipResultsCheck: true,
},
// check last commit
{
Query: "select message from dolt_log limit 1",
Expected: []sql.Row{{"update table terminator"}},
},
// amend last commit
{
Query: "CALL DOLT_COMMIT('-amend', '-m', 'update table t');",
SkipResultsCheck: true,
},
// check amended commit
{
Query: "select message from dolt_log limit 1",
Expected: []sql.Row{{"update table t"}},
},
{
Query: "CALL DOLT_RESET('--hard');",
Expected: []sql.Row{{0}},
@@ -7737,9 +7790,19 @@ var DoltCommitTests = []queries.ScriptTest{
Expected: []sql.Row{{sql.NewOkResult(0)}},
},
{
Query: "CALL DOLT_COMMIT('-Am', 'add table 2');",
Query: "CALL DOLT_COMMIT('-Am', 'add table 21');",
SkipResultsCheck: true,
},
// amend last commit
{
Query: "CALL DOLT_COMMIT('-amend', '-m', 'add table 2');",
SkipResultsCheck: true,
},
// check amended commit
{
Query: "select message from dolt_log limit 1",
Expected: []sql.Row{{"add table 2"}},
},
{
Query: "CALL DOLT_RESET('--hard');",
Expected: []sql.Row{{0}},
@@ -7767,4 +7830,347 @@ var DoltCommitTests = []queries.ScriptTest{
},
},
},
{
Name: "CALL DOLT_COMMIT('-amend') works to update commit message",
SetUpScript: []string{
"SET @@AUTOCOMMIT=0;",
"CREATE TABLE test (id INT PRIMARY KEY );",
"INSERT INTO test (id) VALUES (2)",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-m', 'original commit message');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT message FROM dolt_log;",
Expected: []sql.Row{
{"original commit message"},
{"author: somebody"},
{"add table 2"},
{"drop table t"},
{"update table t"},
{"add table t"},
{"checkpoint enginetest database mydb"},
{"Initialize data repository"},
},
},
{
Query: "SELECT to_id, from_id, diff_type FROM dolt_diff_test;",
Expected: []sql.Row{{2, nil, "added"}},
},
{
Query: "CALL DOLT_COMMIT('--amend', '-m', 'amended commit message');",
SkipResultsCheck: true, // commit hash is being returned, skip check
},
{
Query: "SELECT message FROM dolt_log;",
Expected: []sql.Row{
{"amended commit message"},
{"author: somebody"},
{"add table 2"},
{"drop table t"},
{"update table t"},
{"add table t"},
{"checkpoint enginetest database mydb"},
{"Initialize data repository"},
},
},
{
Query: "SELECT to_id, from_id, diff_type FROM dolt_diff_test;",
Expected: []sql.Row{{2, nil, "added"}},
},
},
},
{
Name: "CALL DOLT_COMMIT('-amend') works to add changes to a commit",
SetUpScript: []string{
"SET @@AUTOCOMMIT=0;",
"INSERT INTO test (id) VALUES (3)",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-m', 'original commit message for adding changes to a commit');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT to_id, from_id, diff_type FROM dolt_diff_test;",
Expected: []sql.Row{
{3, nil, "added"},
{2, nil, "added"},
},
},
{
Query: "SELECT COUNT(*) FROM dolt_status;",
Expected: []sql.Row{{0}},
},
{
Query: "SELECT message FROM dolt_log;",
Expected: []sql.Row{
{"original commit message for adding changes to a commit"},
{"amended commit message"},
{"author: somebody"},
{"add table 2"},
{"drop table t"},
{"update table t"},
{"add table t"},
{"checkpoint enginetest database mydb"},
{"Initialize data repository"},
},
},
{
Query: "INSERT INTO test (id) VALUES (4)",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "SELECT COUNT(*) FROM dolt_status;",
Expected: []sql.Row{{1}},
},
{
Query: "CALL DOLT_ADD('.');",
Expected: []sql.Row{{0}},
},
{
Query: "CALL DOLT_COMMIT('--amend');",
SkipResultsCheck: true, // commit hash is being returned, skip check
},
{
Query: "SELECT message FROM dolt_log;",
Expected: []sql.Row{
{"original commit message for adding changes to a commit"},
{"amended commit message"},
{"author: somebody"},
{"add table 2"},
{"drop table t"},
{"update table t"},
{"add table t"},
{"checkpoint enginetest database mydb"},
{"Initialize data repository"},
},
},
{
Query: "SELECT to_id, from_id, diff_type FROM dolt_diff_test;",
Expected: []sql.Row{
{4, nil, "added"},
{3, nil, "added"},
{2, nil, "added"},
},
},
{
Query: "INSERT INTO test (id) VALUES (5)",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "SELECT COUNT(*) FROM dolt_status;",
Expected: []sql.Row{{1}},
},
{
Query: "CALL DOLT_ADD('.');",
Expected: []sql.Row{{0}},
},
{
Query: "CALL DOLT_COMMIT('--amend', '-m', 'amended commit with added changes');",
SkipResultsCheck: true, // commit hash is being returned, skip check
},
{
Query: "SELECT COUNT(*) FROM dolt_status;",
Expected: []sql.Row{{0}},
},
{
Query: "SELECT message FROM dolt_log;",
Expected: []sql.Row{
{"amended commit with added changes"},
{"amended commit message"},
{"author: somebody"},
{"add table 2"},
{"drop table t"},
{"update table t"},
{"add table t"},
{"checkpoint enginetest database mydb"},
{"Initialize data repository"},
},
},
{
Query: "SELECT to_id, from_id, diff_type FROM dolt_diff_test;",
Expected: []sql.Row{
{5, nil, "added"},
{4, nil, "added"},
{3, nil, "added"},
{2, nil, "added"},
},
},
},
},
{
Name: "CALL DOLT_COMMIT('-amend') works to remove changes from a commit",
SetUpScript: []string{
"SET @@AUTOCOMMIT=0;",
"INSERT INTO test (id) VALUES (6)",
"INSERT INTO test (id) VALUES (7)",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-m', 'original commit message');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * FROM test;",
Expected: []sql.Row{{2}, {3}, {4}, {5}, {6}, {7}},
},
{
Query: "SELECT to_id, from_id, diff_type FROM dolt_diff_test;",
Expected: []sql.Row{
{7, nil, "added"},
{6, nil, "added"},
{5, nil, "added"},
{4, nil, "added"},
{3, nil, "added"},
{2, nil, "added"},
},
},
{
Query: "DELETE FROM test WHERE id = 6",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "CALL DOLT_ADD('.');",
Expected: []sql.Row{{0}},
},
{
Query: "CALL DOLT_COMMIT('--amend', '-m', 'amended commit with removed changes');",
SkipResultsCheck: true, // commit hash is being returned, skip check
},
{
Query: "SELECT * FROM test;",
Expected: []sql.Row{{2}, {3}, {4}, {5}, {7}},
},
{
Query: "SELECT message FROM dolt_log;",
Expected: []sql.Row{
{"amended commit with removed changes"},
{"amended commit with added changes"},
{"amended commit message"},
{"author: somebody"},
{"add table 2"},
{"drop table t"},
{"update table t"},
{"add table t"},
{"checkpoint enginetest database mydb"},
{"Initialize data repository"},
},
},
{
Query: "SELECT to_id, from_id, diff_type FROM dolt_diff_test;",
Expected: []sql.Row{
{7, nil, "added"},
{5, nil, "added"},
{4, nil, "added"},
{3, nil, "added"},
{2, nil, "added"},
},
},
},
},
{
Name: "CALL DOLT_COMMIT('-amend') works to update a merge commit",
SetUpScript: []string{
"SET @@AUTOCOMMIT=0;",
"CREATE TABLE test2 (id INT PRIMARY KEY, id2 INT);",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-m', 'original table');",
"CALL DOLT_CHECKOUT('-b','test-branch');",
"INSERT INTO test2 (id, id2) VALUES (0, 2)",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-m', 'conflicting commit message');",
"CALL DOLT_CHECKOUT('main');",
"INSERT INTO test2 (id, id2) VALUES (0, 1)",
"CALL DOLT_ADD('.');",
"CALL DOLT_COMMIT('-m', 'original commit message');",
"CALL DOLT_MERGE('test-branch');",
"CALL DOLT_CONFLICTS_RESOLVE('--theirs', '.');",
"CALL DOLT_COMMIT('-m', 'final merge');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "CALL DOLT_COMMIT('--amend', '-m', 'new merge');",
SkipResultsCheck: true, // commit hash is being returned, skip check
},
{
Query: "SELECT message FROM dolt_log;",
Expected: []sql.Row{
{"new merge"},
{"original commit message"},
{"conflicting commit message"},
{"original table"},
{"amended commit with removed changes"},
{"amended commit with added changes"},
{"amended commit message"},
{"author: somebody"},
{"add table 2"},
{"drop table t"},
{"update table t"},
{"add table t"},
{"checkpoint enginetest database mydb"},
{"Initialize data repository"},
},
},
{
Query: "SET @hash=(SELECT commit_hash FROM dolt_log LIMIT 1);",
Expected: []sql.Row{{}},
},
{
Query: "SELECT COUNT(parent_hash) FROM dolt_commit_ancestors WHERE commit_hash= @hash;",
Expected: []sql.Row{{2}},
},
},
},
}
var DoltIndexPrefixScripts = []queries.ScriptTest{
{
Name: "varchar prefix",
SetUpScript: []string{
"create table t (v varchar(100))",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "alter table t add primary key (v(10))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
{
Query: "alter table t add index (v(10))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
{
Query: "create table v_tbl (v varchar(100), primary key (v(10)))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
{
Query: "create table v_tbl (i int primary key, v varchar(100), index (v(10)))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
},
},
{
Name: "char prefix",
SetUpScript: []string{
"create table t (c char(100))",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "alter table t add primary key (c(10))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
{
Query: "alter table t add index (c(10))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
{
Query: "create table c_tbl (c char(100), primary key (c(10)))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
{
Query: "create table c_tbl (i int primary key, c char(100), index (c(10)))",
ExpectedErr: sql.ErrUnsupportedIndexPrefix,
},
},
},
}
@@ -1335,7 +1335,7 @@ func doltIndexSetup(t *testing.T) (*sql.Context, *doltdb.RootValue, map[string]i
if err != nil {
panic(err)
}
root, err = sqle.ExecuteSql(t, dEnv, root, `
root, err = sqle.ExecuteSql(dEnv, root, `
CREATE TABLE onepk (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT,
@@ -46,7 +46,7 @@ func setupIndexes(t *testing.T, tableName, insertQuery string) (*sqle.Engine, *e
db, err := dsqle.NewDatabase(context.Background(), "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := dsqle.NewTestEngine(t, dEnv, context.Background(), db, root)
engine, sqlCtx, err := dsqle.NewTestEngine(dEnv, context.Background(), db, root)
require.NoError(t, err)
_, iter, err := engine.Query(sqlCtx, fmt.Sprintf(`CREATE TABLE %s (
@@ -181,7 +181,7 @@ func TestDbRevision(t *testing.T) {
func populateCommitHashes(t *testing.T, dEnv *env.DoltEnv, root *doltdb.RootValue) (cm1, cm2, cm3 hash.Hash) {
q := "SELECT commit_hash FROM dolt_log;"
rows, err := sqle.ExecuteSelect(t, dEnv, root, q)
rows, err := sqle.ExecuteSelect(dEnv, root, q)
require.NoError(t, err)
assert.Len(t, rows, 4)
cm3 = hash.Parse(rows[0][0].(string))
@@ -191,7 +191,7 @@ func populateCommitHashes(t *testing.T, dEnv *env.DoltEnv, root *doltdb.RootValu
}
func makeTestAssertion(t *testing.T, a testAssert, dEnv *env.DoltEnv, root *doltdb.RootValue) {
actRows, err := sqle.ExecuteSelect(t, dEnv, root, a.query)
actRows, err := sqle.ExecuteSelect(dEnv, root, a.query)
require.NoError(t, err)
require.Equal(t, len(a.rows), len(actRows))
@@ -17,6 +17,7 @@ package integration_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/dolthub/go-mysql-server/sql"
@@ -31,7 +32,7 @@ import (
)
func TestHistoryTable(t *testing.T) {
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
dEnv := setupHistoryTests(t)
for _, test := range historyTableTests() {
t.Run(test.name, func(t *testing.T) {
@@ -40,6 +41,15 @@ func TestHistoryTable(t *testing.T) {
}
}
// SkipByDefaultInCI skips the currently executing test as long as the CI env var is set
// (GitHub Actions sets this automatically) and the DOLT_TEST_RUN_NON_RACE_TESTS env var
// is not set. This is useful for filtering out tests that cause race detection to fail.
func SkipByDefaultInCI(t *testing.T) {
if os.Getenv("CI") != "" && os.Getenv("DOLT_TEST_RUN_NON_RACE_TESTS") == "" {
t.Skip()
}
}
type historyTableTest struct {
name string
setup []testCommand
@@ -212,7 +222,7 @@ func setupHistoryTests(t *testing.T) *env.DoltEnv {
// get commit hashes from the log table
q := "select commit_hash, date from dolt_log order by date desc;"
rows, err := sqle.ExecuteSelect(t, dEnv, root, q)
rows, err := sqle.ExecuteSelect(dEnv, root, q)
require.NoError(t, err)
require.Equal(t, 5, len(rows))
HEAD = rows[0][0].(string)
@@ -234,7 +244,7 @@ func testHistoryTable(t *testing.T, test historyTableTest, dEnv *env.DoltEnv) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(t, dEnv, root, test.query)
actRows, err := sqle.ExecuteSelect(dEnv, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.rows), len(actRows))
@@ -48,7 +48,7 @@ func TestJsonValues(t *testing.T) {
t.Skip() // todo: convert to enginetests
}
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
setupCommon := []testCommand{
{cmd.SqlCmd{}, args{"-q", `create table js (pk int primary key, js json);`}},
}
@@ -136,7 +136,7 @@ func testJsonValue(t *testing.T, test jsonValueTest, setupCommon []testCommand)
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(t, dEnv, root, test.query)
actRows, err := sqle.ExecuteSelect(dEnv, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.rows), len(actRows))
@@ -163,7 +163,7 @@ func TestLargeJsonObjects(t *testing.T) {
t.Skip() // todo: convert to enginetests
}
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
setupCommon := []testCommand{
{cmd.SqlCmd{}, args{"-q", `create table js (pk int primary key, js json);`}},
}
@@ -20126,13 +20126,13 @@ INSERT INTO join_result VALUES ('stock','ZYNE','us','2017-11-01',9.7,9.93,9.41,9
`
func TestCreateTables(t *testing.T) {
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
dEnv := dtestutils.CreateTestEnv()
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
root, err = sqle.ExecuteSql(dEnv, root, createTables)
require.NoError(t, err)
table, _, err := root.GetTable(ctx, "daily_summary")
@@ -20148,16 +20148,16 @@ func TestInserts(t *testing.T) {
if types.Format_Default != types.Format_LD_1 {
t.Skip() // todo: convert to enginetests
}
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
dEnv := dtestutils.CreateTestEnv()
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
root, err = sqle.ExecuteSql(dEnv, root, createTables)
require.NoError(t, err)
root, err = sqle.ExecuteSql(t, dEnv, root, insertRows)
root, err = sqle.ExecuteSql(dEnv, root, insertRows)
require.NoError(t, err)
table, _, err := root.GetTable(ctx, "daily_summary")
@@ -20177,19 +20177,19 @@ func TestInsertsWithIndexes(t *testing.T) {
if types.Format_Default != types.Format_LD_1 {
t.Skip() // todo: convert to enginetests
}
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
dEnv := dtestutils.CreateTestEnv()
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
root, err = sqle.ExecuteSql(dEnv, root, createTables)
require.NoError(t, err)
root, err = sqle.ExecuteSql(t, dEnv, root, createIndexes)
root, err = sqle.ExecuteSql(dEnv, root, createIndexes)
require.NoError(t, err)
root, err = sqle.ExecuteSql(t, dEnv, root, insertRows)
root, err = sqle.ExecuteSql(dEnv, root, insertRows)
require.NoError(t, err)
table, _, err := root.GetTable(ctx, "daily_summary")
@@ -20212,24 +20212,24 @@ func TestInsertsWithIndexes(t *testing.T) {
}
func TestJoin(t *testing.T) {
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
dEnv := dtestutils.CreateTestEnv()
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
root, err = sqle.ExecuteSql(dEnv, root, createTables)
require.NoError(t, err)
root, err = sqle.ExecuteSql(t, dEnv, root, insertRows)
root, err = sqle.ExecuteSql(dEnv, root, insertRows)
require.NoError(t, err)
rows, err := sqle.ExecuteSelect(t, dEnv, root, `select Type, d.Symbol, Country, TradingDate, Open, High, Low, Close, Volume, OpenInt, Name, Sector, IPOYear
rows, err := sqle.ExecuteSelect(dEnv, root, `select Type, d.Symbol, Country, TradingDate, Open, High, Low, Close, Volume, OpenInt, Name, Sector, IPOYear
from daily_summary d join symbols t on d.Symbol = t.Symbol order by d.Symbol, Country, TradingDate`)
require.NoError(t, err)
assert.Equal(t, 5210, len(rows))
expectedJoinRows, err := sqle.ExecuteSelect(t, dEnv, root, `select * from join_result order by symbol, country, TradingDate`)
expectedJoinRows, err := sqle.ExecuteSelect(dEnv, root, `select * from join_result order by symbol, country, TradingDate`)
require.NoError(t, err)
assertResultRowsEqual(t, expectedJoinRows, rows)
}
@@ -20260,16 +20260,16 @@ func assertResultRowsEqual(t *testing.T, expected, actual []sql.Row) {
}
func TestExplain(t *testing.T) {
sqle.SkipByDefaultInCI(t)
SkipByDefaultInCI(t)
dEnv := dtestutils.CreateTestEnv()
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
root, err = sqle.ExecuteSql(dEnv, root, createTables)
require.NoError(t, err)
rows, err := sqle.ExecuteSelect(t, dEnv, root, "explain select * from daily_summary d join symbols t on d.Symbol = t.Symbol")
rows, err := sqle.ExecuteSelect(dEnv, root, "explain select * from daily_summary d join symbols t on d.Symbol = t.Symbol")
require.NoError(t, err)
rowStrings := make([]string, len(rows))
for i, row := range rows {
@@ -20,6 +20,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/buffer"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
@@ -28,7 +29,9 @@ import (
)
func TestCommitHooksNoErrors(t *testing.T) {
dEnv := CreateEnvWithSeedData(t)
dEnv, err := CreateEnvWithSeedData()
require.NoError(t, err)
AddDoltSystemVariables()
sql.SystemVariables.SetGlobal(dsess.SkipReplicationErrors, true)
sql.SystemVariables.SetGlobal(dsess.ReplicateToRemote, "unknown")
+7 -7
View File
@@ -109,16 +109,16 @@ func GetOrCreateDoltSchemasTable(ctx *sql.Context, db Database) (retTbl *Writabl
return nil, sql.ErrTableNotFound.New("dolt_schemas")
}
// Create a unique index on the old primary key columns (type, name)
err = (&AlterableDoltTable{*tbl.(*WritableDoltTable)}).CreateIndex(ctx,
doltdb.SchemasTablesIndexName,
sql.IndexUsing_Default,
sql.IndexConstraint_Unique,
[]sql.IndexColumn{
t := (&AlterableDoltTable{*tbl.(*WritableDoltTable)})
err = t.CreateIndex(ctx, sql.IndexDef{
Name: doltdb.SchemasTablesIndexName,
Columns: []sql.IndexColumn{
{Name: doltdb.SchemasTablesTypeCol, Length: 0},
{Name: doltdb.SchemasTablesNameCol, Length: 0},
},
"",
)
Constraint: sql.IndexConstraint_Unique,
Storage: sql.IndexUsing_Default,
})
if err != nil {
return nil, err
}
@@ -43,7 +43,9 @@ func TestSchemaTableRecreationOlder(t *testing.T) {
db, err := NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
dbState := getDbState(t, db, dEnv)
dbState, err := getDbState(db, dEnv)
require.NoError(t, err)
err = dsess.DSessFromSess(ctx.Session).AddDB(ctx, dbState)
require.NoError(t, err)
ctx.SetCurrentDatabase(db.Name())
@@ -126,7 +128,9 @@ func TestSchemaTableRecreation(t *testing.T) {
db, err := NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
dbState := getDbState(t, db, dEnv)
dbState, err := getDbState(db, dEnv)
require.NoError(t, err)
err = dsess.DSessFromSess(ctx.Session).AddDB(ctx, dbState)
require.NoError(t, err)
ctx.SetCurrentDatabase(db.Name())
@@ -106,7 +106,7 @@ func (db *SingleTableInfoDatabase) LookupPartitions(context *sql.Context, lookup
}
// CreateIndexForForeignKey implements sql.ForeignKeyTable.
func (db *SingleTableInfoDatabase) CreateIndexForForeignKey(ctx *sql.Context, indexName string, using sql.IndexUsing, constraint sql.IndexConstraint, columns []sql.IndexColumn) error {
func (db *SingleTableInfoDatabase) CreateIndexForForeignKey(ctx *sql.Context, idx sql.IndexDef) error {
return fmt.Errorf("cannot create foreign keys on a single table information database")
}
+12 -6
View File
@@ -60,7 +60,9 @@ func TestSqlBatchInserts(t *testing.T) {
})
ctx := context.Background()
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
root, err := dEnv.WorkingRoot(ctx)
tmpDir, err := dEnv.TempTableFilesDir()
@@ -69,7 +71,7 @@ func TestSqlBatchInserts(t *testing.T) {
db, err := NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := NewTestEngine(dEnv, ctx, db, root)
require.NoError(t, err)
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
@@ -155,7 +157,9 @@ func TestSqlBatchInsertIgnoreReplace(t *testing.T) {
}
ctx := context.Background()
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
@@ -165,7 +169,7 @@ func TestSqlBatchInsertIgnoreReplace(t *testing.T) {
db, err := NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := NewTestEngine(dEnv, ctx, db, root)
require.NoError(t, err)
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
@@ -198,7 +202,9 @@ func TestSqlBatchInsertIgnoreReplace(t *testing.T) {
func TestSqlBatchInsertErrors(t *testing.T) {
ctx := context.Background()
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
@@ -208,7 +214,7 @@ func TestSqlBatchInsertErrors(t *testing.T) {
db, err := NewDatabase(ctx, "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
engine, sqlCtx, err := NewTestEngine(dEnv, ctx, db, root)
require.NoError(t, err)
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
+65 -47
View File
@@ -256,11 +256,13 @@ func TestCreateTable(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
dEnv := CreateEmptyTestDatabase(t)
dEnv, err := CreateEmptyTestDatabase()
require.NoError(t, err)
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
} else {
@@ -329,11 +331,13 @@ func TestDropTable(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -366,42 +370,42 @@ func TestAddColumn(t *testing.T) {
query: "alter table people add (newColumn varchar(80))",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumn(t, "newColumn", 4208, sql.MustCreateStringWithDefaults(sqltypes.VarChar, 80), false)),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 4208, nil),
expectedRows: addColToRows(t, AllPeopleRows, 4208, nil),
},
{
name: "alter add float column without default",
query: "alter table people add (newColumn float)",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumn(t, "newColumn", 4208, sql.Float32, false)),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 4208, nil),
expectedRows: addColToRows(t, AllPeopleRows, 4208, nil),
},
{
name: "alter add uint column without default",
query: "alter table people add (newColumn bigint unsigned)",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumn(t, "newColumn", 4208, sql.Uint64, false)),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 4208, nil),
expectedRows: addColToRows(t, AllPeopleRows, 4208, nil),
},
{
name: "alter add int column default",
query: "alter table people add (newColumn int default 2)",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumnWDefVal(t, "newColumn", 2803, sql.Int32, false, "2")),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 2803, types.Int(int32(2))),
expectedRows: addColToRows(t, AllPeopleRows, 2803, types.Int(int32(2))),
},
{
name: "alter add uint column default",
query: "alter table people add (newColumn bigint unsigned default 20)",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumnWDefVal(t, "newColumn", 517, sql.Uint64, false, "20")),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 517, types.Uint(uint64(20))),
expectedRows: addColToRows(t, AllPeopleRows, 517, types.Uint(uint64(20))),
},
{
name: "alter add string column with default",
query: "alter table people add (newColumn varchar(80) default 'hi')",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumnWDefVal(t, "newColumn", 13690, sql.MustCreateStringWithDefaults(sqltypes.VarChar, 80), false, `'hi'`)),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 13690, types.String("hi")),
expectedRows: addColToRows(t, AllPeopleRows, 13690, types.String("hi")),
},
{
name: "alter add column first",
@@ -417,7 +421,7 @@ func TestAddColumn(t *testing.T) {
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 4208, nil),
expectedRows: addColToRows(t, AllPeopleRows, 4208, nil),
},
{
name: "alter add column middle",
@@ -433,28 +437,28 @@ func TestAddColumn(t *testing.T) {
schema.NewColumn("uuid", UuidTag, types.StringKind, false),
schema.NewColumn("num_episodes", NumEpisodesTag, types.UintKind, false),
),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 4208, nil),
expectedRows: addColToRows(t, AllPeopleRows, 4208, nil),
},
{
name: "alter add column not null",
query: "alter table people add (newColumn varchar(80) not null default 'default')",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumnWDefVal(t, "newColumn", 13690, sql.MustCreateStringWithDefaults(sqltypes.VarChar, 80), false, `'default'`, schema.NotNullConstraint{})),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 13690, types.String("default")),
expectedRows: addColToRows(t, AllPeopleRows, 13690, types.String("default")),
},
{
name: "alter add column not null with expression default",
query: "alter table people add (newColumn int not null default (2+2/2))",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumnWDefVal(t, "newColumn", 2803, sql.Int32, false, "((2 + (2 / 2)))", schema.NotNullConstraint{})),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 2803, types.Int(3)),
expectedRows: addColToRows(t, AllPeopleRows, 2803, types.Int(3)),
},
{
name: "alter add column not null with negative expression",
query: "alter table people add (newColumn float not null default -1.1)",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumnWDefVal(t, "newColumn", 12469, sql.Float32, false, "-1.1", schema.NotNullConstraint{})),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 12469, types.Float(float32(-1.1))),
expectedRows: addColToRows(t, AllPeopleRows, 12469, types.Float(float32(-1.1))),
},
{
name: "alter add column not null with type mismatch in default",
@@ -476,7 +480,7 @@ func TestAddColumn(t *testing.T) {
query: "alter table people add (newColumn varchar(80) not null)",
expectedSchema: dtestutils.AddColumnToSchema(PeopleTestSchema,
schemaNewColumnWDefVal(t, "newColumn", 13690, sql.MustCreateStringWithDefaults(sqltypes.VarChar, 80), false, "", schema.NotNullConstraint{})),
expectedRows: dtestutils.AddColToRows(t, AllPeopleRows, 13690, types.String("")),
expectedRows: addColToRows(t, AllPeopleRows, 13690, types.String("")),
},
{
name: "alter add column nullable",
@@ -501,11 +505,14 @@ func TestAddColumn(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dEnv := CreateTestDatabase(t)
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
ctx := context.Background()
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -619,11 +626,13 @@ func TestRenameColumn(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -732,11 +741,14 @@ func TestRenameTableStatements(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dEnv := CreateTestDatabase(t)
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
ctx := context.Background()
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
if len(tt.expectedErr) > 0 {
require.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedErr)
@@ -786,10 +798,16 @@ func TestAlterSystemTables(t *testing.T) {
reservedTableNames := []string{"dolt_schemas", "dolt_query_catalog"}
var dEnv *env.DoltEnv
var err error
setup := func() {
dEnv = CreateTestDatabase(t)
dtestutils.CreateEmptyTestTable(t, dEnv, "dolt_docs", doltdb.DocsSchema)
dtestutils.CreateEmptyTestTable(t, dEnv, doltdb.SchemasTableName, SchemasTableSchema())
dEnv, err = CreateTestDatabase()
require.NoError(t, err)
err := CreateEmptyTestTable(dEnv, "dolt_docs", doltdb.DocsSchema)
require.NoError(t, err)
err = CreateEmptyTestTable(dEnv, doltdb.SchemasTableName, SchemasTableSchema())
require.NoError(t, err)
CreateTestTable(t, dEnv, "dolt_docs", doltdb.DocsSchema,
"INSERT INTO dolt_docs VALUES ('LICENSE.md','A license')")
@@ -1071,7 +1089,7 @@ func TestIndexOverwrite(t *testing.T) {
if err != nil {
panic(err)
}
root, err = ExecuteSql(t, dEnv, root, `
root, err = ExecuteSql(dEnv, root, `
CREATE TABLE parent (
pk bigint PRIMARY KEY,
v1 bigint,
@@ -1110,13 +1128,13 @@ INSERT INTO child_non_unq VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', 3), ('5'
`)
// test index creation
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, "CREATE INDEX abc ON child (parent_value);")
root, err = ExecuteSql(dEnv, root, "CREATE INDEX abc ON child (parent_value);")
require.NoError(t, err)
_, err = ExecuteSql(t, dEnv, root, "CREATE INDEX abc_idx ON child_idx (parent_value);")
_, err = ExecuteSql(dEnv, root, "CREATE INDEX abc_idx ON child_idx (parent_value);")
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX abc_unq ON child_unq (parent_value);")
root, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX abc_unq ON child_unq (parent_value);")
require.NoError(t, err)
_, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX abc_non_unq ON child_non_unq (parent_value);")
_, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX abc_non_unq ON child_non_unq (parent_value);")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "duplicate unique key given")
}
@@ -1138,31 +1156,31 @@ INSERT INTO child_non_unq VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', 3), ('5'
require.Equal(t, "parent_value", fkChildNonUnq.TableIndex)
// insert tests against index
root, err = ExecuteSql(t, dEnv, root, "INSERT INTO child VALUES ('6', 5)")
root, err = ExecuteSql(dEnv, root, "INSERT INTO child VALUES ('6', 5)")
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_idx VALUES ('6', 5)")
root, err = ExecuteSql(dEnv, root, "INSERT INTO child_idx VALUES ('6', 5)")
require.NoError(t, err)
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_unq VALUES ('6', 5)")
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_unq VALUES ('6', 5)")
if assert.Error(t, err) {
assert.True(t, sql.ErrUniqueKeyViolation.Is(err.(sql.WrappedInsertError).Cause))
}
root, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_non_unq VALUES ('6', 5)")
root, err = ExecuteSql(dEnv, root, "INSERT INTO child_non_unq VALUES ('6', 5)")
require.NoError(t, err)
// insert tests against foreign key
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child VALUES ('9', 9)")
_, err = ExecuteSql(dEnv, root, "INSERT INTO child VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_idx VALUES ('9', 9)")
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_idx VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_unq VALUES ('9', 9)")
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_unq VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_non_unq VALUES ('9', 9)")
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_non_unq VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
@@ -1174,7 +1192,7 @@ func TestCreateIndexUnique(t *testing.T) {
if err != nil {
panic(err)
}
root, err = ExecuteSql(t, dEnv, root, `
root, err = ExecuteSql(dEnv, root, `
CREATE TABLE pass_unique (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT,
@@ -1189,9 +1207,9 @@ INSERT INTO pass_unique VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);
INSERT INTO fail_unique VALUES (1, 1, 1), (2, 2, 2), (3, 2, 3);
`)
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON pass_unique(v1)")
root, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON pass_unique(v1)")
assert.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON fail_unique(v1)")
root, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON fail_unique(v1)")
if assert.Error(t, err) {
assert.Contains(t, strings.ToLower(err.Error()), "unique")
}
@@ -1200,7 +1218,7 @@ INSERT INTO fail_unique VALUES (1, 1, 1), (2, 2, 2), (3, 2, 3);
func assertFails(t *testing.T, dEnv *env.DoltEnv, query, expectedErr string) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
_, err := ExecuteSql(t, dEnv, root, query)
_, err := ExecuteSql(dEnv, root, query)
require.Error(t, err, query)
assert.Contains(t, err.Error(), expectedErr)
}
@@ -1208,6 +1226,6 @@ func assertFails(t *testing.T, dEnv *env.DoltEnv, query, expectedErr string) {
func assertSucceeds(t *testing.T, dEnv *env.DoltEnv, query string) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
_, err := ExecuteSql(t, dEnv, root, query)
_, err := ExecuteSql(dEnv, root, query)
assert.NoError(t, err, query)
}
+3 -2
View File
@@ -227,12 +227,13 @@ func testDeleteQuery(t *testing.T, test DeleteTest) {
t.Skip("Skipping tests until " + singleDeleteQueryTest)
}
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
if test.AdditionalSetup != nil {
test.AdditionalSetup(t, dEnv)
}
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(t, context.Background(), dEnv, root, test.DeleteQuery)
if len(test.ExpectedErr) > 0 {
@@ -511,6 +511,10 @@ func valueAsSqlString(ti typeinfo.TypeInfo, value types.Value) (string, error) {
}
func interfaceValueAsSqlString(ti typeinfo.TypeInfo, value interface{}) (string, error) {
if value == nil {
return "NULL", nil
}
str, err := sqlutil.SqlColToStr(ti.ToSqlType(), value)
if err != nil {
return "", err
@@ -88,27 +88,58 @@ func TestRenameTableStmt(t *testing.T) {
assert.Equal(t, expectedRenameTableSql, stmt)
}
func newRow(sch schema.Schema, id uuid.UUID, name string, age uint, isMarried bool, title *string) row.Row {
var titleVal types.Value
if title != nil {
titleVal = types.String(*title)
}
married := types.Int(0)
if isMarried {
married = types.Int(1)
}
taggedVals := row.TaggedValues{
dtestutils.IdTag: types.String(id.String()),
dtestutils.NameTag: types.String(name),
dtestutils.AgeTag: types.Uint(age),
dtestutils.IsMarriedTag: married,
dtestutils.TitleTag: titleVal,
}
r, err := row.New(types.Format_Default, sch, taggedVals)
if err != nil {
panic(err)
}
return r
}
func TestRowAsInsertStmt(t *testing.T) {
id := uuid.MustParse("00000000-0000-0000-0000-000000000000")
tableName := "people"
sch, err := dtestutils.Schema()
require.NoError(t, err)
tests := []test{
{
name: "simple row",
row: dtestutils.NewTypedRow(id, "some guy", 100, false, strPointer("normie")),
sch: dtestutils.TypedSchema,
row: newRow(sch, id, "some guy", 100, false, strPointer("normie")),
sch: sch,
expectedOutput: "INSERT INTO `people` (`id`,`name`,`age`,`is_married`,`title`) VALUES ('00000000-0000-0000-0000-000000000000','some guy',100,0,'normie');",
},
{
name: "embedded quotes",
row: dtestutils.NewTypedRow(id, `It's "Mister Perfect" to you`, 100, false, strPointer("normie")),
sch: dtestutils.TypedSchema,
row: newRow(sch, id, `It's "Mister Perfect" to you`, 100, false, strPointer("normie")),
sch: sch,
expectedOutput: "INSERT INTO `people` (`id`,`name`,`age`,`is_married`,`title`) VALUES ('00000000-0000-0000-0000-000000000000','It\\'s \\\"Mister Perfect\\\" to you',100,0,'normie');",
},
{
name: "null values",
row: dtestutils.NewTypedRow(id, "some guy", 100, false, nil),
sch: dtestutils.TypedSchema,
row: newRow(sch, id, "some guy", 100, false, nil),
sch: sch,
expectedOutput: "INSERT INTO `people` (`id`,`name`,`age`,`is_married`,`title`) VALUES ('00000000-0000-0000-0000-000000000000','some guy',100,0,NULL);",
},
}
@@ -163,32 +194,35 @@ func TestRowAsUpdateStmt(t *testing.T) {
id := uuid.MustParse("00000000-0000-0000-0000-000000000000")
tableName := "people"
sch, err := dtestutils.Schema()
require.NoError(t, err)
tests := []updateTest{
{
name: "simple row",
row: dtestutils.NewTypedRow(id, "some guy", 100, false, strPointer("normie")),
sch: dtestutils.TypedSchema,
row: newRow(sch, id, "some guy", 100, false, strPointer("normie")),
sch: sch,
expectedOutput: "UPDATE `people` SET `name`='some guy',`age`=100,`is_married`=0,`title`='normie' WHERE (`id`='00000000-0000-0000-0000-000000000000');",
collDiff: set.NewStrSet([]string{"name", "age", "is_married", "title"}),
},
{
name: "embedded quotes",
row: dtestutils.NewTypedRow(id, `It's "Mister Perfect" to you`, 100, false, strPointer("normie")),
sch: dtestutils.TypedSchema,
row: newRow(sch, id, `It's "Mister Perfect" to you`, 100, false, strPointer("normie")),
sch: sch,
expectedOutput: "UPDATE `people` SET `name`='It\\'s \\\"Mister Perfect\\\" to you',`age`=100,`is_married`=0,`title`='normie' WHERE (`id`='00000000-0000-0000-0000-000000000000');",
collDiff: set.NewStrSet([]string{"name", "age", "is_married", "title"}),
},
{
name: "null values",
row: dtestutils.NewTypedRow(id, "some guy", 100, false, nil),
sch: dtestutils.TypedSchema,
row: newRow(sch, id, "some guy", 100, false, nil),
sch: sch,
expectedOutput: "UPDATE `people` SET `name`='some guy',`age`=100,`is_married`=0,`title`=NULL WHERE (`id`='00000000-0000-0000-0000-000000000000');",
collDiff: set.NewStrSet([]string{"name", "age", "is_married", "title"}),
},
{
name: "partial update",
row: dtestutils.NewTypedRow(id, "some guy", 100, false, nil),
sch: dtestutils.TypedSchema,
row: newRow(sch, id, "some guy", 100, false, nil),
sch: sch,
expectedOutput: "UPDATE `people` SET `name`='some guy' WHERE (`id`='00000000-0000-0000-0000-000000000000');",
collDiff: set.NewStrSet([]string{"name"}),
},
+2 -2
View File
@@ -430,13 +430,13 @@ func testInsertQuery(t *testing.T, test InsertTest) {
t.Skip("Skipping test broken on SQL engine")
}
dEnv := CreateEmptyTestDatabase(t)
dEnv, err := CreateEmptyTestDatabase()
require.NoError(t, err)
if test.AdditionalSetup != nil {
test.AdditionalSetup(t, dEnv)
}
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(t, context.Background(), dEnv, root, test.InsertQuery)
if len(test.ExpectedErr) > 0 {
@@ -94,7 +94,8 @@ func TestExecutePersist(t *testing.T) {
// Tests the given query on a freshly created dataset, asserting that the result has the given schema and rows. If
// expectedErr is set, asserts instead that the execution returns an error that matches.
func testPersistQuery(t *testing.T, test PersistTest) {
dEnv := CreateEmptyTestDatabase(t)
dEnv, err := CreateEmptyTestDatabase()
require.NoError(t, err)
if test.AdditionalSetup != nil {
test.AdditionalSetup(t, dEnv)
@@ -102,7 +103,6 @@ func testPersistQuery(t *testing.T, test PersistTest) {
sql.InitSystemVariables()
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(t, context.Background(), dEnv, root, test.PersistQuery)
if len(test.ExpectedErr) > 0 {
@@ -300,13 +300,13 @@ func testReplaceQuery(t *testing.T, test ReplaceTest) {
t.Skip("Skipping tests until " + singleReplaceQueryTest)
}
dEnv := CreateEmptyTestDatabase(t)
dEnv, err := CreateEmptyTestDatabase()
require.NoError(t, err)
if test.AdditionalSetup != nil {
test.AdditionalSetup(t, dEnv)
}
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(t, context.Background(), dEnv, root, test.ReplaceQuery)
if len(test.ExpectedErr) > 0 {
+290 -18
View File
@@ -24,8 +24,10 @@ import (
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
@@ -544,29 +546,34 @@ func BasicSelectTests() []SelectTest {
ExpectedSchema: CompressSchema(PeopleTestSchema),
},
{
Name: "select *, binary + in where type mismatch",
Query: "select * from people where first_name + 1 = 41",
ExpectedErr: "Type mismatch evaluating expression 'first_name + 1'",
Name: "select *, binary + in where type mismatch",
Query: "select * from people where first_name + 1 = 41",
ExpectedRows: ToSqlRows(PeopleTestSchema),
ExpectedSchema: CompressSchema(PeopleTestSchema),
},
{
Name: "select *, binary - in where type mismatch",
Query: "select * from people where first_name - 1 = 39",
ExpectedErr: "Type mismatch evaluating expression 'first_name - 1'",
Name: "select *, binary - in where type mismatch",
Query: "select * from people where first_name - 1 = 39",
ExpectedRows: ToSqlRows(PeopleTestSchema),
ExpectedSchema: CompressSchema(PeopleTestSchema),
},
{
Name: "select *, binary / in where type mismatch",
Query: "select * from people where first_name / 2 = 20",
ExpectedErr: "Type mismatch evaluating expression 'first_name / 2'",
Name: "select *, binary / in where type mismatch",
Query: "select * from people where first_name / 2 = 20",
ExpectedRows: ToSqlRows(PeopleTestSchema),
ExpectedSchema: CompressSchema(PeopleTestSchema),
},
{
Name: "select *, binary * in where type mismatch",
Query: "select * from people where first_name * 2 = 80",
ExpectedErr: "Type mismatch evaluating expression 'first_name * 2'",
Name: "select *, binary * in where type mismatch",
Query: "select * from people where first_name * 2 = 80",
ExpectedRows: ToSqlRows(PeopleTestSchema),
ExpectedSchema: CompressSchema(PeopleTestSchema),
},
{
Name: "select *, binary % in where type mismatch",
Query: "select * from people where first_name % 4 = 0",
ExpectedErr: "Type mismatch evaluating expression 'first_name % 4'",
Name: "select *, binary % in where type mismatch",
Query: "select * from people where first_name % 4 = 0",
ExpectedRows: ToSqlRows(PeopleTestSchema, AllPeopleRows...), // invalid value is considered as 0
ExpectedSchema: CompressSchema(PeopleTestSchema),
},
{
Name: "select * with where, order by",
@@ -1355,7 +1362,9 @@ func testSelectQuery(t *testing.T, test SelectTest) {
cleanup := installTestCommitClock()
defer cleanup()
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
if test.AdditionalSetup != nil {
test.AdditionalSetup(t, dEnv)
}
@@ -1397,13 +1406,161 @@ func testSelectQuery(t *testing.T, test SelectTest) {
assertSchemasEqual(t, sqlSchema, sch)
}
const TableWithHistoryName = "test_table"
var InitialHistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr)
var AddAddrAt3HistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr, addrColTag3TypeStr)
var AddAgeAt4HistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr, ageColTag4TypeInt)
var ReaddAgeAt5HistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr, addrColTag3TypeStr, ageColTag5TypeUint)
// TableUpdate defines a list of modifications that should be made to a table
type TableUpdate struct {
// NewSch is an updated schema for this table. It overwrites the existing value. If not provided the existing value
// will not change
NewSch schema.Schema
// NewRowData if provided overwrites the entirety of the row data in the table.
NewRowData *types.Map
// RowUpdates are new values for rows that should be set in the map. They can be updates or inserts.
RowUpdates []row.Row
}
// HistoryNode represents a commit to be made
type HistoryNode struct {
// Branch the branch that the commit should be on
Branch string
// CommitMessag is the commit message that should be applied
CommitMsg string
// Updates are the changes that should be made to the table's states before committing
Updates map[string]TableUpdate
// Children are the child commits of this commit
Children []HistoryNode
}
// mustRowData converts a slice of row.TaggedValues into a noms types.Map containing that data.
func mustRowData(t *testing.T, ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, colVals []row.TaggedValues) *types.Map {
m, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
me := m.Edit()
for _, taggedVals := range colVals {
r, err := row.New(types.Format_Default, sch, taggedVals)
require.NoError(t, err)
me = me.Set(r.NomsMapKey(sch), r.NomsMapValue(sch))
}
m, err = me.Map(ctx)
require.NoError(t, err)
return &m
}
func CreateHistory(ctx context.Context, dEnv *env.DoltEnv, t *testing.T) []HistoryNode {
vrw := dEnv.DoltDB.ValueReadWriter()
return []HistoryNode{
{
Branch: "seed",
CommitMsg: "Seeding with initial user data",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: InitialHistSch,
NewRowData: mustRowData(t, ctx, vrw, InitialHistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son")},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks")},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn")},
}),
},
},
Children: []HistoryNode{
{
Branch: "add-age",
CommitMsg: "Adding int age to users with tag 3",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: AddAgeAt4HistSch,
NewRowData: mustRowData(t, ctx, vrw, AddAgeAt4HistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son"), 4: types.Int(35)},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks"), 4: types.Int(38)},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn"), 4: types.Int(37)},
{0: types.Int(3), 1: types.String("Zach"), 2: types.String("Musgrave"), 4: types.Int(37)},
}),
},
},
Children: nil,
},
{
Branch: env.DefaultInitBranch,
CommitMsg: "Adding string address to users with tag 3",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: AddAddrAt3HistSch,
NewRowData: mustRowData(t, ctx, vrw, AddAddrAt3HistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son"), 3: types.String("123 Fake St")},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks"), 3: types.String("456 Bull Ln")},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn"), 3: types.String("789 Not Real Ct")},
{0: types.Int(3), 1: types.String("Zach"), 2: types.String("Musgrave")},
{0: types.Int(4), 1: types.String("Matt"), 2: types.String("Jesuele")},
}),
},
},
Children: []HistoryNode{
{
Branch: env.DefaultInitBranch,
CommitMsg: "Re-add age as a uint with tag 4",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: ReaddAgeAt5HistSch,
NewRowData: mustRowData(t, ctx, vrw, ReaddAgeAt5HistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son"), 3: types.String("123 Fake St"), 5: types.Uint(35)},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks"), 3: types.String("456 Bull Ln"), 5: types.Uint(38)},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn"), 3: types.String("789 Not Real Ct"), 5: types.Uint(37)},
{0: types.Int(3), 1: types.String("Zach"), 2: types.String("Musgrave"), 3: types.String("-1 Imaginary Wy"), 5: types.Uint(37)},
{0: types.Int(4), 1: types.String("Matt"), 2: types.String("Jesuele")},
{0: types.Int(5), 1: types.String("Daylon"), 2: types.String("Wilkins")},
}),
},
},
Children: nil,
},
},
},
},
},
}
}
var idColTag0TypeUUID = schema.NewColumn("id", 0, types.IntKind, true)
var firstColTag1TypeStr = schema.NewColumn("first_name", 1, types.StringKind, false)
var lastColTag2TypeStr = schema.NewColumn("last_name", 2, types.StringKind, false)
var addrColTag3TypeStr = schema.NewColumn("addr", 3, types.StringKind, false)
var ageColTag4TypeInt = schema.NewColumn("age", 4, types.IntKind, false)
var ageColTag5TypeUint = schema.NewColumn("age", 5, types.UintKind, false)
var DiffSchema = dtestutils.MustSchema(
schema.NewColumn("to_id", 0, types.IntKind, false),
schema.NewColumn("to_first_name", 1, types.StringKind, false),
schema.NewColumn("to_last_name", 2, types.StringKind, false),
schema.NewColumn("to_addr", 3, types.StringKind, false),
schema.NewColumn("from_id", 7, types.IntKind, false),
schema.NewColumn("from_first_name", 8, types.StringKind, false),
schema.NewColumn("from_last_name", 9, types.StringKind, false),
schema.NewColumn("from_addr", 10, types.StringKind, false),
schema.NewColumn("diff_type", 14, types.StringKind, false),
)
func testSelectDiffQuery(t *testing.T, test SelectTest) {
validateTest(t, test)
ctx := context.Background()
cleanup := installTestCommitClock()
defer cleanup()
dEnv := dtestutils.CreateTestEnv()
InitializeWithHistory(t, ctx, dEnv, CreateHistory(ctx, dEnv, t)...)
initializeWithHistory(t, ctx, dEnv, CreateHistory(ctx, dEnv, t)...)
if test.AdditionalSetup != nil {
test.AdditionalSetup(t, dEnv)
}
@@ -1426,7 +1583,7 @@ func testSelectDiffQuery(t *testing.T, test SelectTest) {
root, err = dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
root = UpdateTables(t, ctx, root, CreateWorkingRootUpdate())
root = updateTables(t, ctx, root, createWorkingRootUpdate())
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
@@ -1451,6 +1608,121 @@ func testSelectDiffQuery(t *testing.T, test SelectTest) {
assertSchemasEqual(t, sqlSchema, sch)
}
// TODO: this shouldn't be here
func createWorkingRootUpdate() map[string]TableUpdate {
return map[string]TableUpdate{
TableWithHistoryName: {
RowUpdates: []row.Row{
mustRow(row.New(types.Format_Default, ReaddAgeAt5HistSch, row.TaggedValues{
0: types.Int(6), 1: types.String("Katie"), 2: types.String("McCulloch"),
})),
},
},
}
}
func updateTables(t *testing.T, ctx context.Context, root *doltdb.RootValue, tblUpdates map[string]TableUpdate) *doltdb.RootValue {
for tblName, updates := range tblUpdates {
tbl, ok, err := root.GetTable(ctx, tblName)
require.NoError(t, err)
var sch schema.Schema
if updates.NewSch != nil {
sch = updates.NewSch
} else {
sch, err = tbl.GetSchema(ctx)
require.NoError(t, err)
}
var rowData types.Map
if updates.NewRowData == nil {
if ok {
rowData, err = tbl.GetNomsRowData(ctx)
require.NoError(t, err)
} else {
rowData, err = types.NewMap(ctx, root.VRW())
require.NoError(t, err)
}
} else {
rowData = *updates.NewRowData
}
if updates.RowUpdates != nil {
me := rowData.Edit()
for _, r := range updates.RowUpdates {
me = me.Set(r.NomsMapKey(sch), r.NomsMapValue(sch))
}
rowData, err = me.Map(ctx)
require.NoError(t, err)
}
var indexData durable.IndexSet
require.NoError(t, err)
if tbl != nil {
indexData, err = tbl.GetIndexSet(ctx)
require.NoError(t, err)
}
tbl, err = doltdb.NewNomsTable(ctx, root.VRW(), root.NodeStore(), sch, rowData, indexData, nil)
require.NoError(t, err)
root, err = root.PutTable(ctx, tblName, tbl)
require.NoError(t, err)
}
return root
}
// initializeWithHistory will go through the provided historyNodes and create the intended commit graph
func initializeWithHistory(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, historyNodes ...HistoryNode) {
for _, node := range historyNodes {
cs, err := doltdb.NewCommitSpec(env.DefaultInitBranch)
require.NoError(t, err)
cm, err := dEnv.DoltDB.Resolve(ctx, cs, nil)
require.NoError(t, err)
processNode(t, ctx, dEnv, node, cm)
}
}
func processNode(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, node HistoryNode, parent *doltdb.Commit) {
branchRef := ref.NewBranchRef(node.Branch)
ok, err := dEnv.DoltDB.HasRef(ctx, branchRef)
require.NoError(t, err)
if !ok {
err = dEnv.DoltDB.NewBranchAtCommit(ctx, branchRef, parent)
require.NoError(t, err)
}
cs, err := doltdb.NewCommitSpec(branchRef.String())
require.NoError(t, err)
cm, err := dEnv.DoltDB.Resolve(ctx, cs, nil)
require.NoError(t, err)
root, err := cm.GetRootValue(ctx)
require.NoError(t, err)
root = updateTables(t, ctx, root, node.Updates)
r, h, err := dEnv.DoltDB.WriteRootValue(ctx, root)
require.NoError(t, err)
root = r
meta, err := datas.NewCommitMeta("Ash Ketchum", "ash@poke.mon", node.CommitMsg)
require.NoError(t, err)
cm, err = dEnv.DoltDB.Commit(ctx, h, branchRef, meta)
require.NoError(t, err)
for _, child := range node.Children {
processNode(t, ctx, dEnv, child, cm)
}
}
func validateTest(t *testing.T, test SelectTest) {
if (test.ExpectedRows == nil) != (test.ExpectedSchema == nil && test.ExpectedSqlSchema == nil) {
require.Fail(t, "Incorrect test setup: schema and rows must both be provided if one is")
+3 -2
View File
@@ -397,12 +397,13 @@ func testUpdateQuery(t *testing.T, test UpdateTest) {
t.Skip("Skipping tests until " + singleUpdateQueryTest)
}
dEnv := CreateTestDatabase(t)
dEnv, err := CreateTestDatabase()
require.NoError(t, err)
if test.AdditionalSetup != nil {
test.AdditionalSetup(t, dEnv)
}
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(t, context.Background(), dEnv, root, test.UpdateQuery)
if len(test.ExpectedErr) > 0 {
@@ -247,7 +247,6 @@ func SqlColToStr(sqlType sql.Type, col interface{}) (string, error) {
return "", err
}
return res.ToString(), nil
}
}
@@ -38,7 +38,7 @@ func setupEditorFkTest(t *testing.T) (*env.DoltEnv, *doltdb.RootValue) {
if err != nil {
panic(err)
}
initialRoot, err := ExecuteSql(t, dEnv, root, `
initialRoot, err := ExecuteSql(dEnv, root, `
CREATE TABLE one (
pk BIGINT PRIMARY KEY,
v1 BIGINT,
@@ -154,7 +154,7 @@ func TestTableEditorForeignKeyCascade(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(t, dEnv, initialRoot, `
testRoot, err := ExecuteSql(dEnv, initialRoot, `
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) ON DELETE CASCADE ON UPDATE CASCADE;
ALTER TABLE three ADD FOREIGN KEY (v1, v2) REFERENCES two(v1, v2) ON DELETE CASCADE ON UPDATE CASCADE;
`)
@@ -203,7 +203,7 @@ func TestTableEditorForeignKeySetNull(t *testing.T) {
t.Run(test.sqlStatement, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(t, dEnv, initialRoot, `
testRoot, err := ExecuteSql(dEnv, initialRoot, `
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) ON DELETE SET NULL ON UPDATE SET NULL;`)
require.NoError(t, err)
@@ -285,7 +285,7 @@ func TestTableEditorForeignKeyRestrict(t *testing.T) {
t.Run(test.setup+test.trigger, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(t, dEnv, initialRoot, fmt.Sprintf(`
testRoot, err := ExecuteSql(dEnv, initialRoot, fmt.Sprintf(`
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) %s;
INSERT INTO one VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);
INSERT INTO two VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);`, referenceOption))
@@ -356,7 +356,7 @@ func TestTableEditorForeignKeyViolations(t *testing.T) {
t.Run(test.setup+test.trigger, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(t, dEnv, initialRoot, `
testRoot, err := ExecuteSql(dEnv, initialRoot, `
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) ON DELETE CASCADE ON UPDATE CASCADE;
ALTER TABLE three ADD FOREIGN KEY (v1, v2) REFERENCES two(v1, v2) ON DELETE CASCADE ON UPDATE CASCADE;
`)
@@ -740,7 +740,7 @@ func setupEditorKeylessFkTest(t *testing.T) (*env.DoltEnv, *doltdb.RootValue) {
if err != nil {
panic(err)
}
initialRoot, err := ExecuteSql(t, dEnv, root, `
initialRoot, err := ExecuteSql(dEnv, root, `
CREATE TABLE one (
pk BIGINT,
v1 BIGINT,
@@ -858,7 +858,7 @@ func TestTableEditorKeylessFKCascade(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
dEnv, initialRoot := setupEditorKeylessFkTest(t)
testRoot, err := ExecuteSql(t, dEnv, initialRoot, `
testRoot, err := ExecuteSql(dEnv, initialRoot, `
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) ON DELETE CASCADE ON UPDATE CASCADE;
ALTER TABLE three ADD FOREIGN KEY (v1, v2) REFERENCES two(v1, v2) ON DELETE CASCADE ON UPDATE CASCADE;
`)
@@ -36,7 +36,7 @@ func setupEditorIndexTest(t *testing.T) (*env.DoltEnv, *doltdb.RootValue) {
root, err := index_dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
index_initialRoot, err := ExecuteSql(t, index_dEnv, root, `
index_initialRoot, err := ExecuteSql(index_dEnv, root, `
CREATE TABLE onepk (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT,
+31 -22
View File
@@ -940,7 +940,7 @@ func (t *DoltTable) GetReferencedForeignKeys(ctx *sql.Context) ([]sql.ForeignKey
}
// CreateIndexForForeignKey implements sql.ForeignKeyTable
func (t DoltTable) CreateIndexForForeignKey(ctx *sql.Context, indexName string, using sql.IndexUsing, constraint sql.IndexConstraint, columns []sql.IndexColumn) error {
func (t DoltTable) CreateIndexForForeignKey(ctx *sql.Context, idx sql.IndexDef) error {
return fmt.Errorf("no foreign key operations on a read-only table")
}
@@ -1271,11 +1271,12 @@ func (t *AlterableDoltTable) RewriteInserter(
newSchema sql.PrimaryKeySchema,
oldColumn *sql.Column,
newColumn *sql.Column,
idxCols []sql.IndexColumn,
) (sql.RowInserter, error) {
if err := branch_control.CheckAccess(ctx, branch_control.Permissions_Write); err != nil {
return nil, err
}
err := validateSchemaChange(t.Name(), oldSchema, newSchema, oldColumn, newColumn)
err := validateSchemaChange(t.Name(), oldSchema, newSchema, oldColumn, newColumn, idxCols)
if err != nil {
return nil, err
}
@@ -1496,7 +1497,15 @@ func validateSchemaChange(
newSchema sql.PrimaryKeySchema,
oldColumn *sql.Column,
newColumn *sql.Column,
idxCols []sql.IndexColumn,
) error {
for _, idxCol := range idxCols {
col := newSchema.Schema[newSchema.Schema.IndexOfColName(idxCol.Name)]
if idxCol.Length > 0 && sql.IsText(col.Type) {
return sql.ErrUnsupportedIndexPrefix.New(col.Name)
}
}
if newColumn != nil {
newCol, err := sqlutil.ToDoltCol(schema.SystemTableReservedMin, newColumn)
if err != nil {
@@ -1749,22 +1758,15 @@ func (t *AlterableDoltTable) getFirstAutoIncrementValue(
}
// CreateIndex implements sql.IndexAlterableTable
func (t *AlterableDoltTable) CreateIndex(
ctx *sql.Context,
indexName string,
using sql.IndexUsing,
constraint sql.IndexConstraint,
indexColumns []sql.IndexColumn,
comment string,
) error {
func (t *AlterableDoltTable) CreateIndex(ctx *sql.Context, idx sql.IndexDef) error {
if err := branch_control.CheckAccess(ctx, branch_control.Permissions_Write); err != nil {
return err
}
if constraint != sql.IndexConstraint_None && constraint != sql.IndexConstraint_Unique {
if idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
return fmt.Errorf("only the following types of index constraints are supported: none, unique")
}
columns := make([]string, len(indexColumns))
for i, indexCol := range indexColumns {
columns := make([]string, len(idx.Columns))
for i, indexCol := range idx.Columns {
columns[i] = indexCol.Name
}
@@ -1773,14 +1775,21 @@ func (t *AlterableDoltTable) CreateIndex(
return err
}
for _, idxCol := range idx.Columns {
col := t.DoltTable.sqlSch.Schema[t.DoltTable.sqlSch.IndexOfColName(idxCol.Name)]
if idxCol.Length > 0 && sql.IsText(col.Type) {
return sql.ErrUnsupportedIndexPrefix.New(col.Name)
}
}
ret, err := creation.CreateIndex(
ctx,
table,
indexName,
idx.Name,
columns,
constraint == sql.IndexConstraint_Unique,
idx.Constraint == sql.IndexConstraint_Unique,
true,
comment,
idx.Comment,
t.opts,
)
if err != nil {
@@ -2228,12 +2237,12 @@ func (t *AlterableDoltTable) UpdateForeignKey(ctx *sql.Context, fkName string, s
}
// CreateIndexForForeignKey implements sql.ForeignKeyTable
func (t *AlterableDoltTable) CreateIndexForForeignKey(ctx *sql.Context, indexName string, using sql.IndexUsing, constraint sql.IndexConstraint, indexColumns []sql.IndexColumn) error {
if constraint != sql.IndexConstraint_None && constraint != sql.IndexConstraint_Unique {
func (t *AlterableDoltTable) CreateIndexForForeignKey(ctx *sql.Context, idx sql.IndexDef) error {
if idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
return fmt.Errorf("only the following types of index constraints are supported: none, unique")
}
columns := make([]string, len(indexColumns))
for i, indexCol := range indexColumns {
columns := make([]string, len(idx.Columns))
for i, indexCol := range idx.Columns {
columns[i] = indexCol.Name
}
@@ -2245,9 +2254,9 @@ func (t *AlterableDoltTable) CreateIndexForForeignKey(ctx *sql.Context, indexNam
ret, err := creation.CreateIndex(
ctx,
table,
indexName,
idx.Name,
columns,
constraint == sql.IndexConstraint_Unique,
idx.Constraint == sql.IndexConstraint_Unique,
false,
"",
t.opts,
+8 -8
View File
@@ -258,23 +258,23 @@ func (t *TempTable) IndexedAccess(idx sql.Index) sql.IndexedTable {
return t
}
func (t *TempTable) CreateIndex(ctx *sql.Context, indexName string, using sql.IndexUsing, constraint sql.IndexConstraint, columns []sql.IndexColumn, comment string) error {
if constraint != sql.IndexConstraint_None && constraint != sql.IndexConstraint_Unique {
func (t *TempTable) CreateIndex(ctx *sql.Context, idx sql.IndexDef) error {
if idx.Constraint != sql.IndexConstraint_None && idx.Constraint != sql.IndexConstraint_Unique {
return fmt.Errorf("only the following types of index constraints are supported: none, unique")
}
cols := make([]string, len(columns))
for i, c := range columns {
cols := make([]string, len(idx.Columns))
for i, c := range idx.Columns {
cols[i] = c.Name
}
ret, err := creation.CreateIndex(
ctx,
t.table,
indexName,
idx.Name,
cols,
constraint == sql.IndexConstraint_Unique,
idx.Constraint == sql.IndexConstraint_Unique,
true,
comment,
idx.Comment,
t.opts,
)
if err != nil {
@@ -331,7 +331,7 @@ func (t *TempTable) GetReferencedForeignKeys(ctx *sql.Context) ([]sql.ForeignKey
return nil, nil
}
func (t *TempTable) CreateIndexForForeignKey(ctx *sql.Context, indexName string, using sql.IndexUsing, constraint sql.IndexConstraint, columns []sql.IndexColumn) error {
func (t *TempTable) CreateIndexForForeignKey(ctx *sql.Context, idx sql.IndexDef) error {
return sql.ErrTemporaryTablesForeignKeySupport.New()
}
-238
View File
@@ -18,22 +18,15 @@ import (
"context"
"fmt"
"reflect"
"testing"
"time"
"github.com/dolthub/go-mysql-server/sql"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/types"
)
@@ -334,234 +327,3 @@ func GetAllRows(root *doltdb.RootValue, tableName string) ([]sql.Row, error) {
return SqlRowsFromDurableIndex(rowIdx, sch)
}
var idColTag0TypeUUID = schema.NewColumn("id", 0, types.IntKind, true)
var firstColTag1TypeStr = schema.NewColumn("first_name", 1, types.StringKind, false)
var lastColTag2TypeStr = schema.NewColumn("last_name", 2, types.StringKind, false)
var addrColTag3TypeStr = schema.NewColumn("addr", 3, types.StringKind, false)
var ageColTag4TypeInt = schema.NewColumn("age", 4, types.IntKind, false)
var ageColTag5TypeUint = schema.NewColumn("age", 5, types.UintKind, false)
var DiffSchema = dtestutils.MustSchema(
schema.NewColumn("to_id", 0, types.IntKind, false),
schema.NewColumn("to_first_name", 1, types.StringKind, false),
schema.NewColumn("to_last_name", 2, types.StringKind, false),
schema.NewColumn("to_addr", 3, types.StringKind, false),
schema.NewColumn("from_id", 7, types.IntKind, false),
schema.NewColumn("from_first_name", 8, types.StringKind, false),
schema.NewColumn("from_last_name", 9, types.StringKind, false),
schema.NewColumn("from_addr", 10, types.StringKind, false),
schema.NewColumn("diff_type", 14, types.StringKind, false),
)
const TableWithHistoryName = "test_table"
var InitialHistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr)
var AddAddrAt3HistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr, addrColTag3TypeStr)
var AddAgeAt4HistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr, ageColTag4TypeInt)
var ReaddAgeAt5HistSch = dtestutils.MustSchema(idColTag0TypeUUID, firstColTag1TypeStr, lastColTag2TypeStr, addrColTag3TypeStr, ageColTag5TypeUint)
// TableUpdate defines a list of modifications that should be made to a table
type TableUpdate struct {
// NewSch is an updated schema for this table. It overwrites the existing value. If not provided the existing value
// will not change
NewSch schema.Schema
// NewRowData if provided overwrites the entirety of the row data in the table.
NewRowData *types.Map
// RowUpdates are new values for rows that should be set in the map. They can be updates or inserts.
RowUpdates []row.Row
}
// HistoryNode represents a commit to be made
type HistoryNode struct {
// Branch the branch that the commit should be on
Branch string
// CommitMessag is the commit message that should be applied
CommitMsg string
// Updates are the changes that should be made to the table's states before committing
Updates map[string]TableUpdate
// Children are the child commits of this commit
Children []HistoryNode
}
func CreateHistory(ctx context.Context, dEnv *env.DoltEnv, t *testing.T) []HistoryNode {
vrw := dEnv.DoltDB.ValueReadWriter()
return []HistoryNode{
{
Branch: "seed",
CommitMsg: "Seeding with initial user data",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: InitialHistSch,
NewRowData: dtestutils.MustRowData(t, ctx, vrw, InitialHistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son")},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks")},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn")},
}),
},
},
Children: []HistoryNode{
{
Branch: "add-age",
CommitMsg: "Adding int age to users with tag 3",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: AddAgeAt4HistSch,
NewRowData: dtestutils.MustRowData(t, ctx, vrw, AddAgeAt4HistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son"), 4: types.Int(35)},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks"), 4: types.Int(38)},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn"), 4: types.Int(37)},
{0: types.Int(3), 1: types.String("Zach"), 2: types.String("Musgrave"), 4: types.Int(37)},
}),
},
},
Children: nil,
},
{
Branch: env.DefaultInitBranch,
CommitMsg: "Adding string address to users with tag 3",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: AddAddrAt3HistSch,
NewRowData: dtestutils.MustRowData(t, ctx, vrw, AddAddrAt3HistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son"), 3: types.String("123 Fake St")},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks"), 3: types.String("456 Bull Ln")},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn"), 3: types.String("789 Not Real Ct")},
{0: types.Int(3), 1: types.String("Zach"), 2: types.String("Musgrave")},
{0: types.Int(4), 1: types.String("Matt"), 2: types.String("Jesuele")},
}),
},
},
Children: []HistoryNode{
{
Branch: env.DefaultInitBranch,
CommitMsg: "Re-add age as a uint with tag 4",
Updates: map[string]TableUpdate{
TableWithHistoryName: {
NewSch: ReaddAgeAt5HistSch,
NewRowData: dtestutils.MustRowData(t, ctx, vrw, ReaddAgeAt5HistSch, []row.TaggedValues{
{0: types.Int(0), 1: types.String("Aaron"), 2: types.String("Son"), 3: types.String("123 Fake St"), 5: types.Uint(35)},
{0: types.Int(1), 1: types.String("Brian"), 2: types.String("Hendriks"), 3: types.String("456 Bull Ln"), 5: types.Uint(38)},
{0: types.Int(2), 1: types.String("Tim"), 2: types.String("Sehn"), 3: types.String("789 Not Real Ct"), 5: types.Uint(37)},
{0: types.Int(3), 1: types.String("Zach"), 2: types.String("Musgrave"), 3: types.String("-1 Imaginary Wy"), 5: types.Uint(37)},
{0: types.Int(4), 1: types.String("Matt"), 2: types.String("Jesuele")},
{0: types.Int(5), 1: types.String("Daylon"), 2: types.String("Wilkins")},
}),
},
},
Children: nil,
},
},
},
},
},
}
}
func UpdateTables(t *testing.T, ctx context.Context, root *doltdb.RootValue, tblUpdates map[string]TableUpdate) *doltdb.RootValue {
for tblName, updates := range tblUpdates {
tbl, ok, err := root.GetTable(ctx, tblName)
require.NoError(t, err)
var sch schema.Schema
if updates.NewSch != nil {
sch = updates.NewSch
} else {
sch, err = tbl.GetSchema(ctx)
require.NoError(t, err)
}
var rowData types.Map
if updates.NewRowData == nil {
if ok {
rowData, err = tbl.GetNomsRowData(ctx)
require.NoError(t, err)
} else {
rowData, err = types.NewMap(ctx, root.VRW())
require.NoError(t, err)
}
} else {
rowData = *updates.NewRowData
}
if updates.RowUpdates != nil {
me := rowData.Edit()
for _, r := range updates.RowUpdates {
me = me.Set(r.NomsMapKey(sch), r.NomsMapValue(sch))
}
rowData, err = me.Map(ctx)
require.NoError(t, err)
}
var indexData durable.IndexSet
require.NoError(t, err)
if tbl != nil {
indexData, err = tbl.GetIndexSet(ctx)
require.NoError(t, err)
}
tbl, err = doltdb.NewNomsTable(ctx, root.VRW(), root.NodeStore(), sch, rowData, indexData, nil)
require.NoError(t, err)
root, err = root.PutTable(ctx, tblName, tbl)
require.NoError(t, err)
}
return root
}
// InitializeWithHistory will go through the provided historyNodes and create the intended commit graph
func InitializeWithHistory(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, historyNodes ...HistoryNode) {
for _, node := range historyNodes {
cs, err := doltdb.NewCommitSpec(env.DefaultInitBranch)
require.NoError(t, err)
cm, err := dEnv.DoltDB.Resolve(ctx, cs, nil)
require.NoError(t, err)
processNode(t, ctx, dEnv, node, cm)
}
}
func processNode(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, node HistoryNode, parent *doltdb.Commit) {
branchRef := ref.NewBranchRef(node.Branch)
ok, err := dEnv.DoltDB.HasRef(ctx, branchRef)
require.NoError(t, err)
if !ok {
err = dEnv.DoltDB.NewBranchAtCommit(ctx, branchRef, parent)
require.NoError(t, err)
}
cs, err := doltdb.NewCommitSpec(branchRef.String())
require.NoError(t, err)
cm, err := dEnv.DoltDB.Resolve(ctx, cs, nil)
require.NoError(t, err)
root, err := cm.GetRootValue(ctx)
require.NoError(t, err)
root = UpdateTables(t, ctx, root, node.Updates)
r, h, err := dEnv.DoltDB.WriteRootValue(ctx, root)
require.NoError(t, err)
root = r
meta, err := datas.NewCommitMeta("Ash Ketchum", "ash@poke.mon", node.CommitMsg)
require.NoError(t, err)
cm, err = dEnv.DoltDB.Commit(ctx, h, branchRef, meta)
require.NoError(t, err)
for _, child := range node.Children {
processNode(t, ctx, dEnv, child, cm)
}
}
+163 -49
View File
@@ -19,19 +19,15 @@ import (
"errors"
"fmt"
"io"
"os"
"strings"
"testing"
sqle "github.com/dolthub/go-mysql-server"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/vitess/go/vt/sqlparser"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
@@ -40,20 +36,26 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
config2 "github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
// ExecuteSql executes all the SQL non-select statements given in the string against the root value given and returns
// the updated root, or an error. Statements in the input string are split by `;\n`
func ExecuteSql(t *testing.T, dEnv *env.DoltEnv, root *doltdb.RootValue, statements string) (*doltdb.RootValue, error) {
func ExecuteSql(dEnv *env.DoltEnv, root *doltdb.RootValue, statements string) (*doltdb.RootValue, error) {
tmpDir, err := dEnv.TempTableFilesDir()
require.NoError(t, err)
if err != nil {
return nil, err
}
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: tmpDir}
db, err := NewDatabase(context.Background(), "dolt", dEnv.DbData(), opts)
require.NoError(t, err)
if err != nil {
return nil, err
}
engine, ctx, err := NewTestEngine(t, dEnv, context.Background(), db, root)
engine, ctx, err := NewTestEngine(dEnv, context.Background(), db, root)
dsess.DSessFromSess(ctx.Session).EnableBatchedMode()
err = ctx.Session.SetSessionVariable(ctx, sql.AutoCommitSessionVar, false)
if err != nil {
@@ -132,14 +134,22 @@ func NewTestSQLCtxWithProvider(ctx context.Context, pro dsess.DoltDatabaseProvid
}
// NewTestEngine creates a new default engine, and a *sql.Context and initializes indexes and schema fragments.
func NewTestEngine(t *testing.T, dEnv *env.DoltEnv, ctx context.Context, db Database, root *doltdb.RootValue) (*sqle.Engine, *sql.Context, error) {
func NewTestEngine(dEnv *env.DoltEnv, ctx context.Context, db Database, root *doltdb.RootValue) (*sqle.Engine, *sql.Context, error) {
b := env.GetDefaultInitBranch(dEnv.Config)
pro, err := NewDoltDatabaseProviderWithDatabase(b, dEnv.FS, db, dEnv.FS)
require.NoError(t, err)
if err != nil {
return nil, nil, err
}
engine := sqle.NewDefault(pro)
sqlCtx := NewTestSQLCtxWithProvider(ctx, pro)
err = dsess.DSessFromSess(sqlCtx.Session).AddDB(sqlCtx, getDbState(t, db, dEnv))
dbState, err := getDbState(db, dEnv)
if err != nil {
return nil, nil, err
}
err = dsess.DSessFromSess(sqlCtx.Session).AddDB(sqlCtx, dbState)
if err != nil {
return nil, nil, err
}
@@ -153,24 +163,19 @@ func NewTestEngine(t *testing.T, dEnv *env.DoltEnv, ctx context.Context, db Data
return engine, sqlCtx, nil
}
// SkipByDefaultInCI skips the currently executing test as long as the CI env var is set
// (GitHub Actions sets this automatically) and the DOLT_TEST_RUN_NON_RACE_TESTS env var
// is not set. This is useful for filtering out tests that cause race detection to fail.
func SkipByDefaultInCI(t *testing.T) {
if os.Getenv("CI") != "" && os.Getenv("DOLT_TEST_RUN_NON_RACE_TESTS") == "" {
t.Skip()
}
}
func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) dsess.InitialDbState {
func getDbState(db sql.Database, dEnv *env.DoltEnv) (dsess.InitialDbState, error) {
ctx := context.Background()
head := dEnv.RepoStateReader().CWBHeadSpec()
headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef())
require.NoError(t, err)
if err != nil {
return dsess.InitialDbState{}, err
}
ws, err := dEnv.WorkingSet(ctx)
require.NoError(t, err)
if err != nil {
return dsess.InitialDbState{}, err
}
return dsess.InitialDbState{
Db: db,
@@ -178,12 +183,11 @@ func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) dsess.InitialD
WorkingSet: ws,
DbData: dEnv.DbData(),
Remotes: dEnv.RepoState.Remotes,
}
}, nil
}
// ExecuteSelect executes the select statement given and returns the resulting rows, or an error if one is encountered.
func ExecuteSelect(t *testing.T, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) ([]sql.Row, error) {
func ExecuteSelect(dEnv *env.DoltEnv, root *doltdb.RootValue, query string) ([]sql.Row, error) {
dbData := env.DbData{
Ddb: dEnv.DoltDB,
Rsw: dEnv.RepoStateWriter(),
@@ -191,12 +195,17 @@ func ExecuteSelect(t *testing.T, dEnv *env.DoltEnv, root *doltdb.RootValue, quer
}
tmpDir, err := dEnv.TempTableFilesDir()
require.NoError(t, err)
if err != nil {
return nil, err
}
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: tmpDir}
db, err := NewDatabase(context.Background(), "dolt", dbData, opts)
require.NoError(t, err)
if err != nil {
return nil, err
}
engine, ctx, err := NewTestEngine(t, dEnv, context.Background(), db, root)
engine, ctx, err := NewTestEngine(dEnv, context.Background(), db, root)
if err != nil {
return nil, err
}
@@ -358,7 +367,7 @@ func drainIter(ctx *sql.Context, iter sql.RowIter) error {
return iter.Close(ctx)
}
func CreateEnvWithSeedData(t *testing.T) *env.DoltEnv {
func CreateEnvWithSeedData() (*env.DoltEnv, error) {
const seedData = `
CREATE TABLE people (
id varchar(36) primary key,
@@ -374,29 +383,125 @@ func CreateEnvWithSeedData(t *testing.T) *env.DoltEnv {
('00000000-0000-0000-0000-000000000002', 'Rob Robertson', 21, 0, '');`
ctx := context.Background()
dEnv := dtestutils.CreateTestEnv()
dEnv := CreateTestEnv()
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, seedData)
require.NoError(t, err)
if err != nil {
return nil, err
}
root, err = ExecuteSql(dEnv, root, seedData)
if err != nil {
return nil, err
}
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
return dEnv
if err != nil {
return nil, err
}
return dEnv, nil
}
// CreateEmptyTestDatabase creates a test database without any data in it.
func CreateEmptyTestDatabase(t *testing.T) *env.DoltEnv {
dEnv := dtestutils.CreateTestEnv()
dtestutils.CreateEmptyTestTable(t, dEnv, PeopleTableName, PeopleTestSchema)
dtestutils.CreateEmptyTestTable(t, dEnv, EpisodesTableName, EpisodesTestSchema)
dtestutils.CreateEmptyTestTable(t, dEnv, AppearancesTableName, AppearancesTestSchema)
func CreateEmptyTestDatabase() (*env.DoltEnv, error) {
dEnv := CreateTestEnv()
err := CreateEmptyTestTable(dEnv, PeopleTableName, PeopleTestSchema)
if err != nil {
return nil, err
}
err = CreateEmptyTestTable(dEnv, EpisodesTableName, EpisodesTestSchema)
if err != nil {
return nil, err
}
err = CreateEmptyTestTable(dEnv, AppearancesTableName, AppearancesTestSchema)
if err != nil {
return nil, err
}
return dEnv, nil
}
const (
TestHomeDirPrefix = "/user/dolt/"
WorkingDirPrefix = "/user/dolt/datasets/"
)
// CreateTestEnv creates a new DoltEnv suitable for testing. The CreateTestEnvWithName
// function should generally be preferred over this method, especially when working
// with tests using multiple databases within a MultiRepoEnv.
func CreateTestEnv() *env.DoltEnv {
return CreateTestEnvWithName("test")
}
// CreateTestEnvWithName creates a new DoltEnv suitable for testing and uses
// the specified name to distinguish it from other test envs. This function
// should generally be preferred over CreateTestEnv, especially when working with
// tests using multiple databases within a MultiRepoEnv.
func CreateTestEnvWithName(envName string) *env.DoltEnv {
const name = "billy bob"
const email = "bigbillieb@fake.horse"
initialDirs := []string{TestHomeDirPrefix + envName, WorkingDirPrefix + envName}
homeDirFunc := func() (string, error) { return TestHomeDirPrefix + envName, nil }
fs := filesys.NewInMemFS(initialDirs, nil, WorkingDirPrefix+envName)
dEnv := env.Load(context.Background(), homeDirFunc, fs, doltdb.InMemDoltDB+envName, "test")
cfg, _ := dEnv.Config.GetConfig(env.GlobalConfig)
cfg.SetStrings(map[string]string{
env.UserNameKey: name,
env.UserEmailKey: email,
})
err := dEnv.InitRepo(context.Background(), types.Format_Default, name, email, env.DefaultInitBranch)
if err != nil {
panic("Failed to initialize environment:" + err.Error())
}
return dEnv
}
// CreateTestDatabase creates a test database with the test data set in it.
func CreateTestDatabase(t *testing.T) *env.DoltEnv {
// CreateEmptyTestTable creates a new test table with the name, schema, and rows given.
func CreateEmptyTestTable(dEnv *env.DoltEnv, tableName string, sch schema.Schema) error {
ctx := context.Background()
dEnv := CreateEmptyTestDatabase(t)
root, err := dEnv.WorkingRoot(ctx)
if err != nil {
return err
}
vrw := dEnv.DoltDB.ValueReadWriter()
ns := dEnv.DoltDB.NodeStore()
rows, err := durable.NewEmptyIndex(ctx, vrw, ns, sch)
if err != nil {
return err
}
indexSet, err := durable.NewIndexSetWithEmptyIndexes(ctx, vrw, ns, sch)
if err != nil {
return err
}
tbl, err := doltdb.NewTable(ctx, vrw, ns, sch, rows, indexSet, nil)
if err != nil {
return err
}
newRoot, err := root.PutTable(ctx, tableName, tbl)
if err != nil {
return err
}
return dEnv.UpdateWorkingRoot(ctx, newRoot)
}
// CreateTestDatabase creates a test database with the test data set in it.
func CreateTestDatabase() (*env.DoltEnv, error) {
ctx := context.Background()
dEnv, err := CreateEmptyTestDatabase()
if err != nil {
return nil, err
}
const simpsonsRowData = `
INSERT INTO people VALUES
@@ -424,12 +529,21 @@ func CreateTestDatabase(t *testing.T) *env.DoltEnv {
(5, 3, "I'm making this all up");`
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, simpsonsRowData)
require.NoError(t, err)
if err != nil {
return nil, err
}
root, err = ExecuteSql(dEnv, root, simpsonsRowData)
if err != nil {
return nil, err
}
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
return dEnv
if err != nil {
return nil, err
}
return dEnv, nil
}
func SqlRowsFromDurableIndex(idx durable.Index, sch schema.Schema) ([]sql.Row, error) {
+4 -4
View File
@@ -33,13 +33,13 @@ func TestViews(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = ExecuteSql(t, dEnv, root, "create table test (a int primary key)")
root, err = ExecuteSql(dEnv, root, "create table test (a int primary key)")
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, "insert into test values (1), (2), (3)")
root, err = ExecuteSql(dEnv, root, "insert into test values (1), (2), (3)")
require.NoError(t, err)
root, err = ExecuteSql(t, dEnv, root, "create view plus1 as select a + 1 from test")
root, err = ExecuteSql(dEnv, root, "create view plus1 as select a + 1 from test")
require.NoError(t, err)
expectedRows := []sql.Row{
@@ -51,6 +51,6 @@ func TestViews(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, expectedRows, rows)
root, err = ExecuteSql(t, dEnv, root, "drop view plus1")
root, err = ExecuteSql(dEnv, root, "drop view plus1")
require.NoError(t, err)
}
@@ -161,9 +161,13 @@ func TestTableEditor(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
expectedErr = nil
dEnv := sqle.CreateTestDatabase(t)
dEnv, err := sqle.CreateTestDatabase()
require.NoError(t, err)
ctx := sqle.NewTestSQLCtx(context.Background())
root, _ := dEnv.WorkingRoot(context.Background())
root, err := dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
tmpDir, err := dEnv.TempTableFilesDir()
require.NoError(t, err)
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: tmpDir}
@@ -196,7 +200,7 @@ func TestTableEditor(t *testing.T) {
require.NoError(t, dEnv.UpdateWorkingRoot(context.Background(), root))
actualRows, err := sqle.ExecuteSelect(t, dEnv, root, test.selectQuery)
actualRows, err := sqle.ExecuteSelect(dEnv, root, test.selectQuery)
require.NoError(t, err)
assert.Equal(t, test.expectedRows, actualRows)
@@ -53,6 +53,9 @@ func TestEndToEnd(t *testing.T) {
" CONSTRAINT `test-check` CHECK ((`age` < 123))\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"
sch, err := dtestutils.Schema()
require.NoError(t, err)
type test struct {
name string
rows []sql.Row
@@ -67,7 +70,7 @@ func TestEndToEnd(t *testing.T) {
{"00000000-0000-0000-0000-000000000000", "some guy", 100, 0, "normie"},
{"00000000-0000-0000-0000-000000000000", "guy personson", 0, 1, "officially a person"},
},
sch: dtestutils.TypedSchema,
sch: sch,
expectedOutput: dropCreateStatement + "\n" +
"INSERT INTO `people` (`id`,`name`,`age`,`is_married`,`title`) " +
`VALUES ('00000000-0000-0000-0000-000000000000','some guy',100,0,'normie');` + "\n" +
@@ -76,7 +79,7 @@ func TestEndToEnd(t *testing.T) {
},
{
name: "no rows",
sch: dtestutils.TypedSchema,
sch: sch,
expectedOutput: dropCreateStatement + "\n",
},
}
+11 -3
View File
@@ -22,9 +22,10 @@ import (
"github.com/stretchr/testify/require"
)
var forceOpt = &Option{"force", "f", "", OptionalFlag, "force desc", nil}
var messageOpt = &Option{"message", "m", "msg", OptionalValue, "msg desc", nil}
var fileTypeOpt = &Option{"file-type", "", "", OptionalValue, "file type", nil}
var forceOpt = &Option{"force", "f", "", OptionalFlag, "force desc", nil, false}
var messageOpt = &Option{"message", "m", "msg", OptionalValue, "msg desc", nil, false}
var fileTypeOpt = &Option{"file-type", "", "", OptionalValue, "file type", nil, false}
var notOpt = &Option{"not", "", "", OptionalValue, "not desc", nil, true}
func TestParsing(t *testing.T) {
tests := []struct {
@@ -158,6 +159,13 @@ func TestParsing(t *testing.T) {
expectedOpts: map[string]string{"message": "f"},
expectedArgs: []string{"value"},
},
{
name: "--not string list value",
options: []*Option{forceOpt, messageOpt, notOpt},
args: []string{"-mf", "value", "--not", "main", "branch"},
expectedOpts: map[string]string{"message": "f", "not": "main,branch"},
expectedArgs: []string{"value"},
},
{
name: "-fm value",
options: []*Option{forceOpt, messageOpt},
+2
View File
@@ -63,4 +63,6 @@ type Option struct {
Desc string
// Function to validate an Option after parsing, returning any error.
Validator ValidationFunc
// Allows more than one arg to an Option.
AllowMultipleOptions bool
}
+35 -8
View File
@@ -89,7 +89,7 @@ func (ap *ArgParser) SupportOption(opt *Option) {
// SupportsFlag adds support for a new flag (argument with no value). See SupportOpt for details on params.
func (ap *ArgParser) SupportsFlag(name, abbrev, desc string) *ArgParser {
opt := &Option{name, abbrev, "", OptionalFlag, desc, nil}
opt := &Option{name, abbrev, "", OptionalFlag, desc, nil, false}
ap.SupportOption(opt)
return ap
@@ -97,7 +97,15 @@ func (ap *ArgParser) SupportsFlag(name, abbrev, desc string) *ArgParser {
// SupportsString adds support for a new string argument with the description given. See SupportOpt for details on params.
func (ap *ArgParser) SupportsString(name, abbrev, valDesc, desc string) *ArgParser {
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, nil}
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, nil, false}
ap.SupportOption(opt)
return ap
}
// SupportsStringList adds support for a new string list argument with the description given. See SupportOpt for details on params.
func (ap *ArgParser) SupportsStringList(name, abbrev, valDesc, desc string) *ArgParser {
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, nil, true}
ap.SupportOption(opt)
return ap
@@ -105,7 +113,7 @@ func (ap *ArgParser) SupportsString(name, abbrev, valDesc, desc string) *ArgPars
// SupportsOptionalString adds support for a new string argument with the description given and optional empty value.
func (ap *ArgParser) SupportsOptionalString(name, abbrev, valDesc, desc string) *ArgParser {
opt := &Option{name, abbrev, valDesc, OptionalEmptyValue, desc, nil}
opt := &Option{name, abbrev, valDesc, OptionalEmptyValue, desc, nil, false}
ap.SupportOption(opt)
return ap
@@ -113,7 +121,7 @@ func (ap *ArgParser) SupportsOptionalString(name, abbrev, valDesc, desc string)
// SupportsValidatedString adds support for a new string argument with the description given and defined validation function.
func (ap *ArgParser) SupportsValidatedString(name, abbrev, valDesc, desc string, validator ValidationFunc) *ArgParser {
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, validator}
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, validator, false}
ap.SupportOption(opt)
return ap
@@ -121,7 +129,7 @@ func (ap *ArgParser) SupportsValidatedString(name, abbrev, valDesc, desc string,
// SupportsUint adds support for a new uint argument with the description given. See SupportOpt for details on params.
func (ap *ArgParser) SupportsUint(name, abbrev, valDesc, desc string) *ArgParser {
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, isUintStr}
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, isUintStr, false}
ap.SupportOption(opt)
return ap
@@ -129,7 +137,7 @@ func (ap *ArgParser) SupportsUint(name, abbrev, valDesc, desc string) *ArgParser
// SupportsInt adds support for a new int argument with the description given. See SupportOpt for details on params.
func (ap *ArgParser) SupportsInt(name, abbrev, valDesc, desc string) *ArgParser {
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, isIntStr}
opt := &Option{name, abbrev, valDesc, OptionalValue, desc, isIntStr, false}
ap.SupportOption(opt)
return ap
@@ -280,9 +288,14 @@ func (ap *ArgParser) Parse(args []string) (*ArgParseResults, error) {
return nil, errors.New("error: no value for option `" + opt.Name + "'")
}
} else {
valueStr = args[i]
if opt.AllowMultipleOptions {
list := getListValues(args[i:])
valueStr = strings.Join(list, ",")
i += len(list) - 1
} else {
valueStr = args[i]
}
}
value = &valueStr
}
@@ -303,3 +316,17 @@ func (ap *ArgParser) Parse(args []string) (*ArgParseResults, error) {
return &ArgParseResults{results, list, ap}, nil
}
func getListValues(args []string) []string {
var values []string
for _, arg := range args {
// Stop if another option found
if arg[0] == '-' || arg == "--" {
return values
}
values = append(values, arg)
}
return values
}
+6
View File
@@ -17,6 +17,7 @@ package argparser
import (
"math"
"strconv"
"strings"
"github.com/dolthub/dolt/go/libraries/utils/set"
)
@@ -96,6 +97,11 @@ func (res *ArgParseResults) GetValue(name string) (string, bool) {
return val, ok
}
func (res *ArgParseResults) GetValueList(name string) ([]string, bool) {
val, ok := res.options[name]
return strings.Split(val, ","), ok
}
func (res *ArgParseResults) GetValues(names ...string) map[string]string {
vals := make(map[string]string)
@@ -1 +0,0 @@
Creates files with a jwks and a jwt that can be validating using the jwks. Used in bats.
+25 -15
View File
@@ -24,8 +24,6 @@ import (
jose "gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/json"
"github.com/pquerna/cachecontrol"
)
type cachedJWKS struct {
@@ -39,12 +37,32 @@ func newCachedJWKS() *cachedJWKS {
}
type fetchedJWKS struct {
URL string
cache *cachedJWKS
URL string
HTTPTransport *http.Transport
cache *cachedJWKS
}
func newJWKS(provider JWTProvider) *fetchedJWKS {
return &fetchedJWKS{URL: provider.URL, cache: newCachedJWKS()}
func newJWKS(provider JWTProvider) (*fetchedJWKS, error) {
return newFetchedJWKS(provider.URL)
}
func newFetchedJWKS(url string) (*fetchedJWKS, error) {
ret := &fetchedJWKS{
URL: url,
cache: newCachedJWKS(),
}
pwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Allows use of file:// for jwks location url for tests
tr := &http.Transport{}
tr.RegisterProtocol("file", http.NewFileTransport(http.Dir(pwd)))
ret.HTTPTransport = tr
return ret, nil
}
func (f *fetchedJWKS) needsRefresh() bool {
@@ -55,14 +73,7 @@ func (f *fetchedJWKS) GetJWKS() (*jose.JSONWebKeySet, error) {
f.cache.mutex.Lock()
defer f.cache.mutex.Unlock()
if f.needsRefresh() {
tr := &http.Transport{}
pwd, err := os.Getwd()
if err != nil {
return nil, err
}
// Allows use of file:// for jwks location url for tests
tr.RegisterProtocol("file", http.NewFileTransport(http.Dir(pwd)))
client := &http.Client{Transport: tr}
client := &http.Client{Transport: f.HTTPTransport}
request, err := http.NewRequest("GET", f.URL, nil)
if err != nil {
@@ -87,7 +98,6 @@ func (f *fetchedJWKS) GetJWKS() (*jose.JSONWebKeySet, error) {
return nil, err
}
f.cache.value = &jwks
_, _, err = cachecontrol.CachableResponse(request, response, cachecontrol.Options{})
}
}
return f.cache.value, nil
+6 -2
View File
@@ -29,9 +29,13 @@ type fetchingJWTValidator struct {
expected jwt.Expected
}
func NewJWTValidator(provider JWTProvider) JWTValidator {
func NewJWTValidator(provider JWTProvider) (JWTValidator, error) {
expected := jwt.Expected{Issuer: provider.Issuer, Audience: jwt.Audience{provider.Audience}}
return &fetchingJWTValidator{jwks: newJWKS(provider), expected: expected}
jwks, err := newJWKS(provider)
if err != nil {
return nil, err
}
return &fetchingJWTValidator{jwks: jwks, expected: expected}, nil
}
func (v *fetchingJWTValidator) ValidateJWT(unparsed string, reqTime time.Time) (*Claims, error) {
-113
View File
@@ -1,113 +0,0 @@
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright 2016 Attic Labs, Inc. All rights reserved.
// Licensed under the Apache License, version 2.0:
// http://www.apache.org/licenses/LICENSE-2.0
package chunks
import (
"bytes"
"encoding/binary"
"io"
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/hash"
)
/*
Chunk Serialization:
Chunk 0
Chunk 1
..
Chunk N
Chunk:
Hash // 20-byte hash
Len // 4-byte int
Data // len(Data) == Len
*/
// Serialize a single Chunk to writer.
func Serialize(chunk Chunk, writer io.Writer) {
d.PanicIfFalse(chunk.data != nil)
h := chunk.Hash()
n, err := io.Copy(writer, bytes.NewReader(h[:]))
d.Chk.NoError(err)
d.PanicIfFalse(int64(hash.ByteLen) == n)
// Because of chunking at higher levels, no chunk should never be more than 4GB
chunkSize := uint32(len(chunk.Data()))
err = binary.Write(writer, binary.BigEndian, chunkSize)
d.Chk.NoError(err)
n, err = io.Copy(writer, bytes.NewReader(chunk.Data()))
d.Chk.NoError(err)
d.PanicIfFalse(uint32(n) == chunkSize)
}
// Deserialize reads off of |reader| until EOF, sending chunks to
// chunkChan in the order they are read. Objects sent over chunkChan are
// *Chunk.
func Deserialize(reader io.Reader, chunkChan chan<- *Chunk) (err error) {
for {
var c Chunk
c, err = deserializeChunk(reader)
if err != nil {
break
}
d.Chk.NotEqual(EmptyChunk.Hash(), c.Hash())
chunkChan <- &c
}
if err == io.EOF {
err = nil
}
return
}
// DeserializeData deserializes a chunk from a byte array
func DeserializeData(data []byte) (Chunk, error) {
reader := bytes.NewReader(data)
return deserializeChunk(reader)
}
func deserializeChunk(reader io.Reader) (Chunk, error) {
h := hash.Hash{}
n, err := io.ReadFull(reader, h[:])
if err != nil {
return EmptyChunk, err
}
d.PanicIfFalse(int(hash.ByteLen) == n)
chunkSize := uint32(0)
if err = binary.Read(reader, binary.BigEndian, &chunkSize); err != nil {
return EmptyChunk, err
}
data := make([]byte, int(chunkSize))
if n, err = io.ReadFull(reader, data); err != nil {
return EmptyChunk, err
}
d.PanicIfFalse(int(chunkSize) == n)
c := NewChunk(data)
if h != c.Hash() {
d.Panic("%s != %s", h, c.Hash().String())
}
return c, nil
}
-62
View File
@@ -1,62 +0,0 @@
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file incorporates work covered by the following copyright and
// permission notice:
//
// Copyright 2016 Attic Labs, Inc. All rights reserved.
// Licensed under the Apache License, version 2.0:
// http://www.apache.org/licenses/LICENSE-2.0
package chunks
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSerializeRoundTrip(t *testing.T) {
assert := assert.New(t)
inputs := [][]byte{[]byte("abc"), []byte("def")}
chnx := make([]Chunk, len(inputs))
for i, data := range inputs {
chnx[i] = NewChunk(data)
}
buf := &bytes.Buffer{}
Serialize(chnx[0], buf)
Serialize(chnx[1], buf)
chunkChan := make(chan *Chunk)
go func() {
defer close(chunkChan)
err := Deserialize(bytes.NewReader(buf.Bytes()), chunkChan)
assert.NoError(err)
}()
for c := range chunkChan {
assert.Equal(chnx[0].Hash(), c.Hash())
chnx = chnx[1:]
}
assert.Len(chnx, 0)
}
func TestBadSerialization(t *testing.T) {
bad := []byte{0, 1} // Not enough bytes to read first length
ch := make(chan *Chunk)
defer close(ch)
assert.Error(t, Deserialize(bytes.NewReader(bad), ch))
}
@@ -24,6 +24,7 @@ package chunks
import (
"context"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/dolthub/dolt/go/store/constants"
@@ -136,3 +137,16 @@ func (suite *ChunkStoreTestSuite) TestChunkStoreCommitUnchangedRoot() {
// Now, reading c from store2 via the API should work...
assertInputInStore(input, h, store2, suite.Assert())
}
func assertInputInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) {
chunk, err := s.Get(context.Background(), h)
assert.NoError(err)
assert.False(chunk.IsEmpty(), "Shouldn't get empty chunk for %s", h.String())
assert.Equal(input, string(chunk.Data()))
}
func assertInputNotInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) {
chunk, err := s.Get(context.Background(), h)
assert.NoError(err)
assert.True(chunk.IsEmpty(), "Shouldn't get non-empty chunk for %s: %v", h.String(), chunk)
}
-15
View File
@@ -25,25 +25,10 @@ import (
"context"
"sync/atomic"
"github.com/stretchr/testify/assert"
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/hash"
)
func assertInputInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) {
chunk, err := s.Get(context.Background(), h)
assert.NoError(err)
assert.False(chunk.IsEmpty(), "Shouldn't get empty chunk for %s", h.String())
assert.Equal(input, string(chunk.Data()))
}
func assertInputNotInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) {
chunk, err := s.Get(context.Background(), h)
assert.NoError(err)
assert.True(chunk.IsEmpty(), "Shouldn't get non-empty chunk for %s: %v", h.String(), chunk)
}
type TestStorage struct {
MemoryStorage
}

Some files were not shown because too many files have changed in this diff Show More