mirror of
https://github.com/dolthub/dolt.git
synced 2026-05-13 03:10:03 -05:00
Merge branch 'main' into fulghum/schema-merge
This commit is contained in:
@@ -3,8 +3,6 @@ name: Check Formatting, Committers and Generated Code
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
# pull_request_target:
|
||||
# types: [opened, edited, reopened]
|
||||
|
||||
concurrency:
|
||||
group: ci-check-repo-${{ github.event.pull_request.number || github.ref }}
|
||||
@@ -107,7 +105,7 @@ jobs:
|
||||
ref: ${{ github.event.pull_request.head.ref || github.ref }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }}
|
||||
submodules: true
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
token: ${{ secrets.REPO_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
- name: Run go mod tidy
|
||||
run: go mod tidy
|
||||
working-directory: ./go
|
||||
|
||||
Generated
+417
@@ -2072,6 +2072,215 @@ Library.
|
||||
= LICENSE bcd6f24ec7cb31e4eac53a4e067489b1ddd360b968ceb45faf5645ec =
|
||||
================================================================================
|
||||
|
||||
================================================================================
|
||||
= github.com/dolthub/go-icu-regex licensed under: =
|
||||
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
= LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 =
|
||||
================================================================================
|
||||
|
||||
================================================================================
|
||||
= github.com/dolthub/go-mysql-server licensed under: =
|
||||
|
||||
@@ -6892,6 +7101,214 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
= LICENSE 8324b31a3793e08aae6a3c5bad20c4f41d089fd801d4d24c21aa6ea2 =
|
||||
================================================================================
|
||||
|
||||
================================================================================
|
||||
= github.com/tetratelabs/wazero licensed under: =
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2020-2021 wazero authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
= LICENSE 06e378fe3dca2626ad24b9814356eb2d02a762c13234b0d13cf821e0 =
|
||||
================================================================================
|
||||
|
||||
================================================================================
|
||||
= github.com/tidwall/gjson licensed under: =
|
||||
|
||||
|
||||
@@ -138,7 +138,10 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
|
||||
return HandleVErrAndExitCode(errhand.BuildDError("error: failed to read refs from db").AddCause(err).Build(), nil)
|
||||
}
|
||||
|
||||
currentBranch := dEnv.RepoStateReader().CWBHeadRef()
|
||||
currentBranch, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.BuildDError("error: failed to read refs from db").AddCause(err).Build(), nil)
|
||||
}
|
||||
sort.Slice(branches, func(i, j int) bool {
|
||||
return branches[i].String() < branches[j].String()
|
||||
})
|
||||
@@ -172,7 +175,7 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
|
||||
}
|
||||
|
||||
if verbose {
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, currentBranch)
|
||||
|
||||
if err == nil {
|
||||
h, err := cm.HashOf()
|
||||
@@ -195,7 +198,11 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
|
||||
}
|
||||
|
||||
func printCurrentBranch(dEnv *env.DoltEnv) int {
|
||||
cli.Println(dEnv.RepoStateReader().CWBHeadRef().GetPath())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), nil)
|
||||
}
|
||||
cli.Println(headRef.GetPath())
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -245,7 +252,7 @@ func moveBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseR
|
||||
force := apr.Contains(cli.ForceFlag)
|
||||
src := apr.Arg(0)
|
||||
dest := apr.Arg(1)
|
||||
err := actions.RenameBranch(ctx, dEnv.DbData(), src, apr.Arg(1), dEnv, force)
|
||||
err := actions.RenameBranch(ctx, dEnv.DbData(), src, apr.Arg(1), dEnv, force, nil)
|
||||
|
||||
var verr errhand.VerboseError
|
||||
if err != nil {
|
||||
@@ -306,7 +313,7 @@ func deleteBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPa
|
||||
err := actions.DeleteBranch(ctx, dEnv.DbData(), brName, actions.DeleteOptions{
|
||||
Force: force,
|
||||
Remote: apr.Contains(cli.RemoteParam),
|
||||
}, dEnv)
|
||||
}, dEnv, nil)
|
||||
|
||||
if err != nil {
|
||||
var verr errhand.VerboseError
|
||||
@@ -379,7 +386,7 @@ func createBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPars
|
||||
}
|
||||
}
|
||||
|
||||
err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, apr.Contains(cli.ForceFlag))
|
||||
err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, apr.Contains(cli.ForceFlag), nil)
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usage)
|
||||
}
|
||||
|
||||
@@ -119,7 +119,10 @@ func (cmd CheckoutCmd) Exec(ctx context.Context, commandStr string, args []strin
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usagePrt)
|
||||
}
|
||||
headRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), nil)
|
||||
}
|
||||
ws, err := dEnv.WorkingSet(ctx)
|
||||
if err != nil {
|
||||
HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usagePrt)
|
||||
@@ -177,9 +180,14 @@ func checkoutNewBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.Ar
|
||||
return verr
|
||||
}
|
||||
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return errhand.BuildDError(err.Error()).Build()
|
||||
}
|
||||
|
||||
// the new branch is checked out at this point
|
||||
if setTrackUpstream {
|
||||
verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, dEnv.RepoStateReader().CWBHeadRef())
|
||||
verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, headRef)
|
||||
if verr != nil {
|
||||
return verr
|
||||
}
|
||||
@@ -195,7 +203,7 @@ func checkoutNewBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.Ar
|
||||
if !remoteOk {
|
||||
return nil
|
||||
}
|
||||
verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, dEnv.RepoStateReader().CWBHeadRef())
|
||||
verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, headRef)
|
||||
if verr != nil {
|
||||
return verr
|
||||
}
|
||||
@@ -230,7 +238,11 @@ func checkoutRemoteBranchOrSuggestNew(ctx context.Context, dEnv *env.DoltEnv, na
|
||||
if verr != nil {
|
||||
return verr
|
||||
}
|
||||
return SetRemoteUpstreamForBranchRef(dEnv, remoteRefs[0].GetRemote(), remoteRefs[0].GetBranch(), dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return errhand.BuildDError(err.Error()).Build()
|
||||
}
|
||||
return SetRemoteUpstreamForBranchRef(dEnv, remoteRefs[0].GetRemote(), remoteRefs[0].GetBranch(), headRef)
|
||||
} else {
|
||||
// TODO : add hint of using `dolt checkout --track <remote>/<branch>` when --track flag is supported
|
||||
return errhand.BuildDError("'%s' matched multiple (%v) remote tracking branches", name, len(remoteRefs)).Build()
|
||||
@@ -238,7 +250,7 @@ func checkoutRemoteBranchOrSuggestNew(ctx context.Context, dEnv *env.DoltEnv, na
|
||||
}
|
||||
|
||||
func checkoutNewBranchFromStartPt(ctx context.Context, dEnv *env.DoltEnv, newBranch, startPt string) errhand.VerboseError {
|
||||
err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, false)
|
||||
err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, false, nil)
|
||||
if err != nil {
|
||||
return errhand.BuildDError(err.Error()).Build()
|
||||
}
|
||||
|
||||
@@ -183,7 +183,11 @@ func getCherryPickedRootValue(ctx context.Context, dEnv *env.DoltEnv, workingRoo
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
cherryCm, err := dEnv.DoltDB.Resolve(ctx, cherrySpec, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
cherryCm, err := dEnv.DoltDB.Resolve(ctx, cherrySpec, headRef)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
@@ -221,14 +221,19 @@ func performCommit(ctx context.Context, commandStr string, args []string, dEnv *
|
||||
return handleCommitErr(ctx, dEnv, err, usage)
|
||||
}
|
||||
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return handleCommitErr(ctx, dEnv, err, usage)
|
||||
}
|
||||
_, err = dEnv.DoltDB.CommitWithWorkingSet(
|
||||
ctx,
|
||||
dEnv.RepoStateReader().CWBHeadRef(),
|
||||
headRef,
|
||||
ws.Ref(),
|
||||
pendingCommit,
|
||||
ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(),
|
||||
prevHash,
|
||||
dEnv.NewWorkingSetMeta(fmt.Sprintf("Updated by %s %s", commandStr, strings.Join(args, " "))),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
if apr.Contains(cli.AmendFlag) {
|
||||
@@ -380,7 +385,10 @@ func buildInitalCommitMsg(ctx context.Context, dEnv *env.DoltEnv, suggestedMsg s
|
||||
return "", err
|
||||
}
|
||||
|
||||
currBranch := dEnv.RepoStateReader().CWBHeadRef()
|
||||
currBranch, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
initialCommitMessage := fmt.Sprintf("%s\n# Please enter the commit message for your changes. Lines starting"+
|
||||
"\n# with '#' will be ignored, and an empty message aborts the commit."+
|
||||
"\n# On branch %s\n#\n", suggestedMsg, currBranch)
|
||||
|
||||
@@ -186,7 +186,12 @@ func getNerf(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseResu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -123,7 +123,11 @@ func (cmd LogCmd) logWithLoggerFunc(ctx context.Context, commandStr string, args
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
if len(opts.commitSpecs) == 0 {
|
||||
opts.commitSpecs = append(opts.commitSpecs, dEnv.RepoStateReader().CWBHeadSpec())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadSpec()
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
opts.commitSpecs = append(opts.commitSpecs, headRef)
|
||||
}
|
||||
if len(opts.tableName) > 0 {
|
||||
return handleErrAndExit(logTableCommits(ctx, dEnv, opts))
|
||||
@@ -245,7 +249,10 @@ func (opts *logOpts) parseRefsAndTable(ctx context.Context, apr *argparser.ArgPa
|
||||
|
||||
opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs)
|
||||
} else {
|
||||
argIsRef := actions.IsValidRef(ctx, arg, dEnv.DoltDB, dEnv.RepoStateReader())
|
||||
argIsRef, err := actions.IsValidRef(ctx, arg, dEnv.DoltDB, dEnv.RepoStateReader())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// <ref>
|
||||
if argIsRef && !seenRefs[arg] {
|
||||
cs, err := getCommitSpec(arg)
|
||||
@@ -327,8 +334,12 @@ func getHashToRefs(ctx context.Context, dEnv *env.DoltEnv, decorationLevel strin
|
||||
func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
|
||||
hashes := make([]hash.Hash, len(opts.commitSpecs))
|
||||
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return handleErrAndExit(err)
|
||||
}
|
||||
for i, cs := range opts.commitSpecs {
|
||||
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
|
||||
commit, err := dEnv.DoltDB.Resolve(ctx, cs, headRef)
|
||||
if err != nil {
|
||||
cli.PrintErrln(color.HiRedString("Fatal error: cannot get HEAD commit for current branch."))
|
||||
return 1
|
||||
@@ -360,7 +371,7 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
|
||||
excludingHashes := make([]hash.Hash, len(opts.excludingCommitSpecs))
|
||||
|
||||
for i, excludingSpec := range opts.excludingCommitSpecs {
|
||||
excludingCommit, err := dEnv.DoltDB.Resolve(ctx, excludingSpec, dEnv.RepoStateReader().CWBHeadRef())
|
||||
excludingCommit, err := dEnv.DoltDB.Resolve(ctx, excludingSpec, headRef)
|
||||
if err != nil {
|
||||
cli.PrintErrln(color.HiRedString("Fatal error: cannot get excluding commit for current branch."))
|
||||
return 1
|
||||
@@ -383,7 +394,6 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
headRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
cwbHash, err := dEnv.DoltDB.GetHashForRefStr(ctx, headRef.String())
|
||||
|
||||
if err != nil {
|
||||
@@ -441,8 +451,13 @@ func tableExists(ctx context.Context, commit *doltdb.Commit, tableName string) (
|
||||
func logTableCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) error {
|
||||
hashes := make([]hash.Hash, len(opts.commitSpecs))
|
||||
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, cs := range opts.commitSpecs {
|
||||
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
|
||||
commit, err := dEnv.DoltDB.Resolve(ctx, cs, headRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -121,7 +121,12 @@ func getRootForCommitSpecStr(ctx context.Context, csStr string, dEnv *env.DoltEn
|
||||
return "", nil, bdr.AddCause(err).Build()
|
||||
}
|
||||
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return "", nil, errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef)
|
||||
|
||||
if err != nil {
|
||||
return "", nil, errhand.BuildDError(`Unable to resolve "%s"`, csStr).AddCause(err).Build()
|
||||
|
||||
@@ -159,7 +159,12 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
|
||||
return handleCommitErr(ctx, dEnv, err, usage)
|
||||
}
|
||||
|
||||
suggestedMsg := fmt.Sprintf("Merge branch '%s' into %s", commitSpecStr, dEnv.RepoStateReader().CWBHeadRef().GetPath())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return handleCommitErr(ctx, dEnv, err, usage)
|
||||
}
|
||||
|
||||
suggestedMsg := fmt.Sprintf("Merge branch '%s' into %s", commitSpecStr, headRef.GetPath())
|
||||
msg := ""
|
||||
if m, ok := apr.GetValue(cli.MessageArg); ok {
|
||||
msg = m
|
||||
@@ -529,15 +534,21 @@ func executeNoFFMergeAndCommit(ctx context.Context, dEnv *env.DoltEnv, spec *mer
|
||||
Email: spec.Email,
|
||||
})
|
||||
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return tblToStats, err
|
||||
}
|
||||
|
||||
wsHash, err := ws.HashOf()
|
||||
_, err = dEnv.DoltDB.CommitWithWorkingSet(
|
||||
ctx,
|
||||
dEnv.RepoStateReader().CWBHeadRef(),
|
||||
headRef,
|
||||
ws.Ref(),
|
||||
pendingCommit,
|
||||
ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(),
|
||||
wsHash,
|
||||
dEnv.NewWorkingSetMeta(msg),
|
||||
nil,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -119,7 +119,12 @@ func ResolveCommitWithVErr(dEnv *env.DoltEnv, cSpecStr string) (*doltdb.Commit,
|
||||
return nil, errhand.BuildDError("'%s' is not a valid commit", cSpecStr).Build()
|
||||
}
|
||||
|
||||
cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
|
||||
cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, headRef)
|
||||
if err != nil {
|
||||
if errors.Is(err, doltdb.ErrInvalidAncestorSpec) {
|
||||
return nil, errhand.BuildDError("'%s' could not resolve ancestor spec", cSpecStr).Build()
|
||||
|
||||
@@ -202,7 +202,12 @@ func pullHelper(ctx context.Context, dEnv *env.DoltEnv, pullSpec *env.PullSpec)
|
||||
return err
|
||||
}
|
||||
|
||||
suggestedMsg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, dEnv.RepoStateReader().CWBHeadRef().GetPath())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
suggestedMsg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, headRef.GetPath())
|
||||
tblStats, err := performMerge(ctx, dEnv, mergeSpec, suggestedMsg)
|
||||
printSuccessStats(tblStats)
|
||||
if err != nil {
|
||||
|
||||
@@ -96,16 +96,20 @@ func (cmd PushCmd) Exec(ctx context.Context, commandStr string, args []string, d
|
||||
var verr errhand.VerboseError
|
||||
switch err {
|
||||
case env.ErrNoUpstreamForBranch:
|
||||
currentBranch := dEnv.RepoStateReader().CWBHeadRef()
|
||||
remoteName := "<remote>"
|
||||
if defRemote, verr := env.GetDefaultRemote(dEnv.RepoStateReader()); verr == nil {
|
||||
remoteName = defRemote.Name
|
||||
currentBranch, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
verr = errhand.BuildDError("fatal: The current branch could not be identified").AddCause(err).Build()
|
||||
} else {
|
||||
remoteName := "<remote>"
|
||||
if defRemote, verr := env.GetDefaultRemote(dEnv.RepoStateReader()); verr == nil {
|
||||
remoteName = defRemote.Name
|
||||
}
|
||||
verr = errhand.BuildDError("fatal: The current branch " + currentBranch.GetPath() + " has no upstream branch.\n" +
|
||||
"To push the current branch and set the remote as upstream, use\n" +
|
||||
"\tdolt push --set-upstream " + remoteName + " " + currentBranch.GetPath() + "\n" +
|
||||
"To have this happen automatically for branches without a tracking\n" +
|
||||
"upstream, see 'push.autoSetupRemote' in 'dolt config --help'.").Build()
|
||||
}
|
||||
verr = errhand.BuildDError("fatal: The current branch " + currentBranch.GetPath() + " has no upstream branch.\n" +
|
||||
"To push the current branch and set the remote as upstream, use\n" +
|
||||
"\tdolt push --set-upstream " + remoteName + " " + currentBranch.GetPath() + "\n" +
|
||||
"To have this happen automatically for branches without a tracking\n" +
|
||||
"upstream, see 'push.autoSetupRemote' in 'dolt config --help'.").Build()
|
||||
|
||||
case env.ErrInvalidSetUpstreamArgs:
|
||||
verr = errhand.BuildDError("error: --set-upstream requires <remote> and <refspec> params.").SetPrintUsage().Build()
|
||||
|
||||
@@ -103,7 +103,11 @@ func (cmd ResetCmd) Exec(ctx context.Context, commandStr string, args []string,
|
||||
} else {
|
||||
if apr.NArg() == 1 {
|
||||
ref := apr.Arg(0)
|
||||
if actions.IsValidRef(ctx, ref, dEnv.DoltDB, dEnv.RepoStateReader()) {
|
||||
isValidRef, err := actions.IsValidRef(ctx, ref, dEnv.DoltDB, dEnv.RepoStateReader())
|
||||
if err != nil {
|
||||
return handleErrAndExit(err)
|
||||
}
|
||||
if isValidRef {
|
||||
return handleResetSoftToRef(ctx, dEnv, ref, usage)
|
||||
}
|
||||
}
|
||||
@@ -145,7 +149,10 @@ func handleResetHard(ctx context.Context, apr *argparser.ArgParseResults, usage
|
||||
arg = apr.Arg(0)
|
||||
}
|
||||
|
||||
headRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
ws, err := dEnv.WorkingSet(ctx)
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
|
||||
@@ -159,7 +159,11 @@ func parseShowArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
|
||||
|
||||
func showObjects(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts) error {
|
||||
if len(opts.specRefs) == 0 {
|
||||
return showCommitSpec(ctx, dEnv, opts, dEnv.RepoStateReader().CWBHeadSpec())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return showCommitSpec(ctx, dEnv, opts, headRef)
|
||||
}
|
||||
|
||||
for _, specRef := range opts.specRefs {
|
||||
@@ -243,7 +247,12 @@ func showSpecRef(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts, specRef
|
||||
|
||||
func showCommitSpec(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts, commitSpec *doltdb.CommitSpec) error {
|
||||
|
||||
commit, err := dEnv.DoltDB.Resolve(ctx, commitSpec, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commit, err := dEnv.DoltDB.Resolve(ctx, commitSpec, headRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -283,7 +292,10 @@ func showCommit(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts, comm *do
|
||||
return err
|
||||
}
|
||||
|
||||
headRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cwbHash, err := dEnv.DoltDB.GetHashForRefStr(ctx, headRef.String())
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -176,16 +176,16 @@ func (cmd SqlCmd) RequiresRepo() bool {
|
||||
func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {
|
||||
ap := cmd.ArgParser()
|
||||
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, sqlDocs, ap))
|
||||
apr, verr := cmd.handleLegacyArguments(ap, commandStr, args)
|
||||
if verr != nil {
|
||||
if verr == argparser.ErrHelp {
|
||||
apr, err := cmd.handleLegacyArguments(ap, commandStr, args)
|
||||
if err != nil {
|
||||
if err == argparser.ErrHelp {
|
||||
help()
|
||||
return 0
|
||||
}
|
||||
return HandleVErrAndExitCode(verr, usage)
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
|
||||
err := validateSqlArgs(apr)
|
||||
err = validateSqlArgs(apr)
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
@@ -286,7 +286,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
|
||||
}
|
||||
|
||||
// handleLegacyArguments is a temporary function to parse args, and print a error and explanation when the old form is provided.
|
||||
func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr string, args []string) (*argparser.ArgParseResults, errhand.VerboseError) {
|
||||
func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr string, args []string) (*argparser.ArgParseResults, error) {
|
||||
|
||||
apr, err := ap.Parse(args)
|
||||
|
||||
@@ -312,12 +312,12 @@ func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr stri
|
||||
|
||||
if newErr != nil {
|
||||
// Neither form of the arguments works. Print the usage and the error of the first parse.
|
||||
return nil, errhand.VerboseErrorFromError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The legacy form worked, so print an error and exit.
|
||||
err = fmt.Errorf("SQL arguments have changed. Move --data-dir, --doltcfg-dir to before the sql sub command.")
|
||||
return nil, errhand.VerboseErrorFromError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return apr, nil
|
||||
|
||||
@@ -16,27 +16,51 @@ package sqlserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/utils/version"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/server"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/clusterdb"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/version"
|
||||
)
|
||||
|
||||
const (
|
||||
clusterUpdateInterval = time.Second * 5
|
||||
|
||||
dbLabel = "database"
|
||||
roleLabel = "role"
|
||||
remoteLabel = "remote"
|
||||
)
|
||||
|
||||
var _ server.ServerEventListener = (*metricsListener)(nil)
|
||||
|
||||
type metricsListener struct {
|
||||
labels prometheus.Labels
|
||||
|
||||
cntConnections prometheus.Counter
|
||||
cntDisconnects prometheus.Counter
|
||||
gaugeConcurrentConn prometheus.Gauge
|
||||
gaugeConcurrentQueries prometheus.Gauge
|
||||
histQueryDur prometheus.Histogram
|
||||
gaugeVersion prometheus.Gauge
|
||||
|
||||
// replication metrics
|
||||
isReplicaGauges *prometheus.GaugeVec
|
||||
replicationLagGauges *prometheus.GaugeVec
|
||||
|
||||
// used in updating cluster metrics
|
||||
clusterStatus clusterdb.ClusterStatusProvider
|
||||
mu *sync.Mutex
|
||||
done bool
|
||||
clusterSeenDbs map[string]struct{}
|
||||
}
|
||||
|
||||
func newMetricsListener(labels prometheus.Labels, versionStr string) (*metricsListener, error) {
|
||||
func newMetricsListener(labels prometheus.Labels, versionStr string, clusterStatus clusterdb.ClusterStatusProvider) (*metricsListener, error) {
|
||||
ml := &metricsListener{
|
||||
labels: labels,
|
||||
cntConnections: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "dss_connects",
|
||||
Help: "Count of server connects",
|
||||
@@ -68,6 +92,19 @@ func newMetricsListener(labels prometheus.Labels, versionStr string) (*metricsLi
|
||||
Help: "The version of dolt currently running on the machine",
|
||||
ConstLabels: labels,
|
||||
}),
|
||||
replicationLagGauges: prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "dss_replication_lag",
|
||||
Help: "The reported replication lag of this server when it is a primary to the given standby.",
|
||||
ConstLabels: labels,
|
||||
}, []string{dbLabel, remoteLabel}),
|
||||
isReplicaGauges: prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "dss_is_replica",
|
||||
Help: "one if the server is currently in this role, zero otherwise",
|
||||
ConstLabels: labels,
|
||||
}, []string{dbLabel}),
|
||||
clusterStatus: clusterStatus,
|
||||
mu: &sync.Mutex{},
|
||||
clusterSeenDbs: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
u32Version, err := version.Encode(versionStr)
|
||||
@@ -87,11 +124,63 @@ func newMetricsListener(labels prometheus.Labels, versionStr string) (*metricsLi
|
||||
prometheus.MustRegister(ml.gaugeConcurrentConn)
|
||||
prometheus.MustRegister(ml.gaugeConcurrentQueries)
|
||||
prometheus.MustRegister(ml.histQueryDur)
|
||||
prometheus.MustRegister(ml.replicationLagGauges)
|
||||
prometheus.MustRegister(ml.isReplicaGauges)
|
||||
|
||||
go func() {
|
||||
for ml.updateReplMetrics() {
|
||||
time.Sleep(clusterUpdateInterval)
|
||||
}
|
||||
}()
|
||||
|
||||
ml.gaugeVersion.Set(f64Version)
|
||||
return ml, nil
|
||||
}
|
||||
|
||||
func (ml *metricsListener) updateReplMetrics() bool {
|
||||
ml.mu.Lock()
|
||||
defer ml.mu.Unlock()
|
||||
|
||||
if ml.done {
|
||||
return false
|
||||
}
|
||||
|
||||
perDbStatus := ml.clusterStatus.GetClusterStatus()
|
||||
if perDbStatus == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
dbNames := make(map[string]struct{})
|
||||
for _, status := range perDbStatus {
|
||||
dbName := status.Database
|
||||
dbNames[dbName] = struct{}{}
|
||||
|
||||
if status.Role == string(cluster.RolePrimary) {
|
||||
ml.isReplicaGauges.WithLabelValues(status.Database).Set(0.0)
|
||||
|
||||
if status.ReplicationLag == nil {
|
||||
ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(-1.0)
|
||||
} else {
|
||||
ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(float64(status.ReplicationLag.Milliseconds()))
|
||||
}
|
||||
} else {
|
||||
ml.isReplicaGauges.WithLabelValues(status.Database).Set(1.0)
|
||||
ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(-1.0)
|
||||
}
|
||||
}
|
||||
|
||||
// deregister metrics for deleted databases
|
||||
for db := range ml.clusterSeenDbs {
|
||||
if _, ok := dbNames[db]; !ok {
|
||||
ml.isReplicaGauges.DeletePartialMatch(prometheus.Labels{"database": db})
|
||||
ml.replicationLagGauges.DeletePartialMatch(prometheus.Labels{"database": db})
|
||||
}
|
||||
}
|
||||
ml.clusterSeenDbs = dbNames
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ml *metricsListener) ClientConnected() {
|
||||
ml.gaugeConcurrentConn.Add(1.0)
|
||||
ml.cntConnections.Add(1.0)
|
||||
@@ -118,4 +207,16 @@ func (ml *metricsListener) Close() {
|
||||
prometheus.Unregister(ml.gaugeConcurrentConn)
|
||||
prometheus.Unregister(ml.gaugeConcurrentQueries)
|
||||
prometheus.Unregister(ml.histQueryDur)
|
||||
|
||||
ml.closeReplicationMetrics()
|
||||
}
|
||||
|
||||
func (ml *metricsListener) closeReplicationMetrics() {
|
||||
ml.mu.Lock()
|
||||
defer ml.mu.Unlock()
|
||||
|
||||
prometheus.Unregister(ml.replicationLagGauges)
|
||||
prometheus.Unregister(ml.isReplicaGauges)
|
||||
|
||||
ml.done = true
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
|
||||
"github.com/dolthub/go-mysql-server/server"
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/types"
|
||||
"github.com/dolthub/vitess/go/mysql"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -40,6 +41,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/binlogreplication"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster"
|
||||
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqlserver"
|
||||
)
|
||||
|
||||
@@ -87,6 +89,32 @@ func Serve(
|
||||
}
|
||||
logrus.SetFormatter(LogFormat{})
|
||||
|
||||
sql.SystemVariables.AddSystemVariables([]sql.SystemVariable{
|
||||
{
|
||||
Name: dsess.DoltLogLevel,
|
||||
Scope: sql.SystemVariableScope_Global,
|
||||
Dynamic: true,
|
||||
SetVarHintApplies: false,
|
||||
Type: types.NewSystemEnumType(dsess.DoltLogLevel,
|
||||
logrus.PanicLevel.String(),
|
||||
logrus.FatalLevel.String(),
|
||||
logrus.ErrorLevel.String(),
|
||||
logrus.WarnLevel.String(),
|
||||
logrus.InfoLevel.String(),
|
||||
logrus.DebugLevel.String(),
|
||||
logrus.TraceLevel.String(),
|
||||
),
|
||||
Default: logrus.GetLevel().String(),
|
||||
NotifyChanged: func(scope sql.SystemVariableScope, v sql.SystemVarValue) {
|
||||
if level, err := logrus.ParseLevel(v.Val.(string)); err == nil {
|
||||
logrus.SetLevel(level)
|
||||
} else {
|
||||
logrus.Warnf("could not parse requested log level %s as a log level. dolt_log_level variable value and logging behavior will diverge.", v.Val.(string))
|
||||
}
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
var mrEnv *env.MultiRepoEnv
|
||||
var err error
|
||||
fs := dEnv.FS
|
||||
@@ -178,7 +206,7 @@ func Serve(
|
||||
labels := serverConfig.MetricsLabels()
|
||||
|
||||
var listener *metricsListener
|
||||
listener, startError = newMetricsListener(labels, version)
|
||||
listener, startError = newMetricsListener(labels, version, clusterController)
|
||||
if startError != nil {
|
||||
cli.Println(startError)
|
||||
return
|
||||
|
||||
@@ -133,7 +133,11 @@ func applyStashAtIdx(ctx context.Context, dEnv *env.DoltEnv, curWorkingRoot *dol
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
parentCommit, err := dEnv.DoltDB.Resolve(ctx, headCommitSpec, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
parentCommit, err := dEnv.DoltDB.Resolve(ctx, headCommitSpec, headRef)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -91,13 +91,15 @@ func (cmd StashCmd) EventType() eventsapi.ClientEventType {
|
||||
|
||||
// Exec executes the command
|
||||
func (cmd StashCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {
|
||||
ap := cmd.ArgParser()
|
||||
help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, stashDocs, ap))
|
||||
apr := cli.ParseArgsOrDie(ap, args, help)
|
||||
|
||||
if !dEnv.DoltDB.Format().UsesFlatbuffers() {
|
||||
cli.PrintErrln(ErrStashNotSupportedForOldFormat.Error())
|
||||
return 1
|
||||
}
|
||||
ap := cmd.ArgParser()
|
||||
help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, stashDocs, ap))
|
||||
apr := cli.ParseArgsOrDie(ap, args, help)
|
||||
|
||||
if dEnv.IsLocked() {
|
||||
return commands.HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
|
||||
}
|
||||
@@ -222,7 +224,10 @@ func stashChanges(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPars
|
||||
}
|
||||
}
|
||||
|
||||
curHeadRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
curHeadRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curBranchName := curHeadRef.String()
|
||||
commitSpec, err := doltdb.NewCommitSpec(curBranchName)
|
||||
if err != nil {
|
||||
|
||||
@@ -95,9 +95,14 @@ func (cmd StatusCmd) Exec(ctx context.Context, commandStr string, args []string,
|
||||
}
|
||||
|
||||
func PrintStatus(ctx context.Context, dEnv *env.DoltEnv, stagedTbls, notStagedTbls []diff.TableDelta, showIgnoredTables bool, as merge.ArtifactStatus) error {
|
||||
cli.Printf(branchHeader, dEnv.RepoStateReader().CWBHeadRef().GetPath())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := printRemoteRefTrackingInfo(ctx, dEnv)
|
||||
cli.Printf(branchHeader, headRef.GetPath())
|
||||
|
||||
err = printRemoteRefTrackingInfo(ctx, dEnv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,7 +146,10 @@ func handleStatusVErr(err error) int {
|
||||
func printRemoteRefTrackingInfo(ctx context.Context, dEnv *env.DoltEnv) error {
|
||||
ddb := dEnv.DoltDB
|
||||
rsr := dEnv.RepoStateReader()
|
||||
headRef := rsr.CWBHeadRef()
|
||||
headRef, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
branches, err := rsr.GetBranches()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
+10
-1
@@ -62,7 +62,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "1.0.0"
|
||||
Version = "1.1.0"
|
||||
)
|
||||
|
||||
var dumpDocsCommand = &commands.DumpDocsCmd{}
|
||||
@@ -427,6 +427,15 @@ func runMain() int {
|
||||
globalArgs, args, initCliContext, printUsage, err := splitArgsOnSubCommand(args)
|
||||
if printUsage {
|
||||
doltCommand.PrintUsage("dolt")
|
||||
_, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString("dolt", doc, globalArgParser))
|
||||
|
||||
specialMsg := `
|
||||
Dolt subcommands are in transition to using the flags listed below as global flags.
|
||||
The sql subcommand is currently the only command that uses these flags. All other commands will ignore them.
|
||||
`
|
||||
cli.Println(specialMsg)
|
||||
usage()
|
||||
|
||||
return 0
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -59,7 +59,7 @@ require (
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/creasty/defaults v1.6.0
|
||||
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230510045813-ba911392b553
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230517201855-8477b3b02370
|
||||
github.com/dolthub/swiss v0.1.0
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
@@ -95,6 +95,7 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac // indirect
|
||||
github.com/dolthub/jsonpath v0.0.1 // indirect
|
||||
github.com/dolthub/maphash v0.0.0-20221220182448-74e1e1ea1577 // indirect
|
||||
github.com/go-fonts/liberation v0.2.0 // indirect
|
||||
@@ -125,6 +126,7 @@ require (
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rs/xid v1.4.0 // indirect
|
||||
github.com/tetratelabs/wazero v1.0.3 // indirect
|
||||
github.com/tidwall/gjson v1.14.4 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
|
||||
@@ -166,8 +166,12 @@ github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 h1:u3PMzfF8RkKd3lB9pZ2bfn0qEG+1G
|
||||
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2/go.mod h1:mIEZOHnFx4ZMQeawhw9rhsj+0zwQj7adVsnBX7t+eKY=
|
||||
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
|
||||
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230510045813-ba911392b553 h1:dLgqnwh32cJPrjV3dja/hWluXwcrG1QrIKd29Vc5tfw=
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230510045813-ba911392b553/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4=
|
||||
github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac h1:/bsG4AyV5MesUPw7LSkxHKMsP9f+LSLrsMbBxLP6+Mk=
|
||||
github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac/go.mod h1:xLKpPutKiF9FxxcLG3gf/JA95YZQNAqBegkDRe1AZF4=
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854 h1:2AMV4KSxCp6sHA3eWlKX/93HEVNacfqFJhSQw39VRh4=
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854/go.mod h1:J+NMF5CgU6b3tBI4G2QVb1AUSf+YoPDDCsZkcM7rY1I=
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230517201855-8477b3b02370 h1:YGNpsPKq7u5cAow/5Sjb3ncu5Qh6SwCp5jBBMhtCnRs=
|
||||
github.com/dolthub/go-mysql-server v0.15.1-0.20230517201855-8477b3b02370/go.mod h1:J+NMF5CgU6b3tBI4G2QVb1AUSf+YoPDDCsZkcM7rY1I=
|
||||
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
|
||||
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
|
||||
github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0=
|
||||
@@ -623,6 +627,8 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/tealeg/xlsx v1.0.5 h1:+f8oFmvY8Gw1iUXzPk+kz+4GpbDZPK1FhPiQRd+ypgE=
|
||||
github.com/tealeg/xlsx v1.0.5/go.mod h1:btRS8dz54TDnvKNosuAqxrM1QgN1udgk9O34bDCnORM=
|
||||
github.com/tetratelabs/wazero v1.0.3 h1:IWmaxc/5vKg71DE+c0SLjjLFAA3u3tD/Zegpgif2Wpo=
|
||||
github.com/tetratelabs/wazero v1.0.3/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ=
|
||||
github.com/thepudds/swisstable v0.0.0-20221011152303-9c77dc657777 h1:5u+6YWU2faS+Sr/x8j9yalMpSDUkatNOZWXV3wMUCGQ=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
|
||||
|
||||
@@ -219,7 +219,11 @@ func MaybeResolveRoot(ctx context.Context, rsr env.RepoStateReader, doltDB *dolt
|
||||
return nil, false
|
||||
}
|
||||
|
||||
cm, err := doltDB.Resolve(ctx, cs, rsr.CWBHeadRef())
|
||||
headRef, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
cm, err := doltDB.Resolve(ctx, cs, headRef)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
@@ -398,7 +398,29 @@ func (td TableDelta) CurName() string {
|
||||
}
|
||||
|
||||
func (td TableDelta) HasFKChanges() bool {
|
||||
return !fkSlicesAreEqual(td.FromFks, td.ToFks)
|
||||
if len(td.FromFks) != len(td.ToFks) {
|
||||
return true
|
||||
}
|
||||
|
||||
sort.Slice(td.FromFks, func(i, j int) bool {
|
||||
return td.FromFks[i].Name < td.FromFks[j].Name
|
||||
})
|
||||
sort.Slice(td.ToFks, func(i, j int) bool {
|
||||
return td.ToFks[i].Name < td.ToFks[j].Name
|
||||
})
|
||||
|
||||
fromSchemaMap := td.FromFksParentSch
|
||||
fromSchemaMap[td.FromName] = td.FromSch
|
||||
toSchemaMap := td.ToFksParentSch
|
||||
toSchemaMap[td.ToName] = td.ToSch
|
||||
|
||||
for i := range td.FromFks {
|
||||
if !td.FromFks[i].Equals(td.ToFks[i], fromSchemaMap, toSchemaMap) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetSchemas returns the table's schema at the fromRoot and toRoot, or schema.Empty if the table did not exist.
|
||||
@@ -538,26 +560,6 @@ func (td TableDelta) GetRowData(ctx context.Context) (from, to durable.Index, er
|
||||
return from, to, nil
|
||||
}
|
||||
|
||||
func fkSlicesAreEqual(from, to []doltdb.ForeignKey) bool {
|
||||
if len(from) != len(to) {
|
||||
return false
|
||||
}
|
||||
|
||||
sort.Slice(from, func(i, j int) bool {
|
||||
return from[i].Name < from[j].Name
|
||||
})
|
||||
sort.Slice(to, func(i, j int) bool {
|
||||
return to[i].Name < to[j].Name
|
||||
})
|
||||
|
||||
for i := range from {
|
||||
if !from[i].DeepEquals(to[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SqlSchemaDiff returns a slice of DDL statements that will transform the schema in the from delta to the schema in
|
||||
// the to delta.
|
||||
func SqlSchemaDiff(ctx context.Context, td TableDelta, toSchemas map[string]schema.Schema) ([]string, error) {
|
||||
|
||||
@@ -49,8 +49,8 @@ func NewPushOnWriteHook(destDB *DoltDB, tmpDir string) *PushOnWriteHook {
|
||||
}
|
||||
|
||||
// Execute implements CommitHook, replicates head updates to the destDb field
|
||||
func (ph *PushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error {
|
||||
return pushDataset(ctx, ph.destDB, db, ds, ph.tmpDir)
|
||||
func (ph *PushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) {
|
||||
return nil, pushDataset(ctx, ph.destDB, db, ds, ph.tmpDir)
|
||||
}
|
||||
|
||||
func pushDataset(ctx context.Context, destDB, srcDB datas.Database, ds datas.Dataset, tmpDir string) error {
|
||||
@@ -135,16 +135,16 @@ func (*AsyncPushOnWriteHook) ExecuteForWorkingSets() bool {
|
||||
}
|
||||
|
||||
// Execute implements CommitHook, replicates head updates to the destDb field
|
||||
func (ah *AsyncPushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error {
|
||||
func (ah *AsyncPushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) {
|
||||
addr, _ := ds.MaybeHeadAddr()
|
||||
|
||||
select {
|
||||
case ah.ch <- PushArg{ds: ds, db: db, hash: addr}:
|
||||
case <-ctx.Done():
|
||||
ah.ch <- PushArg{ds: ds, db: db, hash: addr}
|
||||
return ctx.Err()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// HandleError implements CommitHook
|
||||
@@ -174,12 +174,12 @@ func NewLogHook(msg []byte) *LogHook {
|
||||
}
|
||||
|
||||
// Execute implements CommitHook, writes message to log channel
|
||||
func (lh *LogHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error {
|
||||
func (lh *LogHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) {
|
||||
if lh.out != nil {
|
||||
_, err := lh.out.Write(lh.msg)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// HandleError implements CommitHook
|
||||
|
||||
@@ -136,7 +136,7 @@ func TestPushOnWriteHook(t *testing.T) {
|
||||
ds, err := ddb.db.GetDataset(ctx, "refs/heads/main")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = hook.Execute(ctx, ds, ddb.db)
|
||||
_, err = hook.Execute(ctx, ds, ddb.db)
|
||||
require.NoError(t, err)
|
||||
|
||||
cs, _ = NewCommitSpec(defaultBranch)
|
||||
@@ -269,7 +269,7 @@ func TestAsyncPushOnWrite(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
ds, err := ddb.db.GetDataset(ctx, "refs/heads/main")
|
||||
require.NoError(t, err)
|
||||
err = hook.Execute(ctx, ds, ddb.db)
|
||||
_, err = hook.Execute(ctx, ds, ddb.db)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1079,7 +1079,7 @@ func (ddb *DoltDB) GetRefsOfTypeByNomsRoot(ctx context.Context, refTypeFilter ma
|
||||
|
||||
// NewBranchAtCommit creates a new branch with HEAD at the commit given. Branch names must pass IsValidUserBranchName.
|
||||
// Silently overwrites any existing branch with the same name given, if one exists.
|
||||
func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, commit *Commit) error {
|
||||
func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, commit *Commit, replicationStatus *ReplicationStatusController) error {
|
||||
if !IsValidBranchRef(branchRef) {
|
||||
panic(fmt.Sprintf("invalid branch name %s, use IsValidUserBranchName check", branchRef.String()))
|
||||
}
|
||||
@@ -1124,7 +1124,7 @@ func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef,
|
||||
}
|
||||
|
||||
ws = ws.WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot)
|
||||
return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, TodoWorkingSetMeta())
|
||||
return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, TodoWorkingSetMeta(), replicationStatus)
|
||||
}
|
||||
|
||||
// CopyWorkingSet copies a WorkingSetRef from one ref to another. If `force` is
|
||||
@@ -1155,15 +1155,15 @@ func (ddb *DoltDB) CopyWorkingSet(ctx context.Context, fromWSRef ref.WorkingSetR
|
||||
}
|
||||
}
|
||||
|
||||
return ddb.UpdateWorkingSet(ctx, toWSRef, ws, currWsHash, TodoWorkingSetMeta())
|
||||
return ddb.UpdateWorkingSet(ctx, toWSRef, ws, currWsHash, TodoWorkingSetMeta(), nil)
|
||||
}
|
||||
|
||||
// DeleteBranch deletes the branch given, returning an error if it doesn't exist.
|
||||
func (ddb *DoltDB) DeleteBranch(ctx context.Context, branch ref.DoltRef) error {
|
||||
return ddb.deleteRef(ctx, branch)
|
||||
func (ddb *DoltDB) DeleteBranch(ctx context.Context, branch ref.DoltRef, replicationStatus *ReplicationStatusController) error {
|
||||
return ddb.deleteRef(ctx, branch, replicationStatus)
|
||||
}
|
||||
|
||||
func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef) error {
|
||||
func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef, replicationStatus *ReplicationStatusController) error {
|
||||
ds, err := ddb.db.GetDataset(ctx, dref.String())
|
||||
|
||||
if err != nil {
|
||||
@@ -1184,7 +1184,7 @@ func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef) error {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = ddb.db.Delete(ctx, ds)
|
||||
_, err = ddb.db.withReplicationStatusController(replicationStatus).Delete(ctx, ds)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1216,6 +1216,20 @@ func (ddb *DoltDB) NewTagAtCommit(ctx context.Context, tagRef ref.DoltRef, c *Co
|
||||
return err
|
||||
}
|
||||
|
||||
type ReplicationStatusController struct {
|
||||
// A slice of funcs which can be called to wait for the replication
|
||||
// associated with a commithook to complete. Must return if the
|
||||
// associated Context is canceled.
|
||||
Wait []func(ctx context.Context) error
|
||||
|
||||
// There is an entry here for each function in Wait. If a Wait fails,
|
||||
// you can notify the corresponding function in this slice. This might
|
||||
// control resiliency behaviors like adaptive retry and timeouts,
|
||||
// circuit breakers, etc. and might feed into exposed replication
|
||||
// metrics.
|
||||
NotifyWaitFailed []func()
|
||||
}
|
||||
|
||||
// UpdateWorkingSet updates the working set with the ref given to the root value given
|
||||
// |prevHash| is the hash of the expected WorkingSet struct stored in the ref, not the hash of the RootValue there.
|
||||
func (ddb *DoltDB) UpdateWorkingSet(
|
||||
@@ -1224,6 +1238,7 @@ func (ddb *DoltDB) UpdateWorkingSet(
|
||||
workingSet *WorkingSet,
|
||||
prevHash hash.Hash,
|
||||
meta *datas.WorkingSetMeta,
|
||||
replicationStatus *ReplicationStatusController,
|
||||
) error {
|
||||
ds, err := ddb.db.GetDataset(ctx, workingSetRef.String())
|
||||
if err != nil {
|
||||
@@ -1235,7 +1250,7 @@ func (ddb *DoltDB) UpdateWorkingSet(
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = ddb.db.UpdateWorkingSet(ctx, ds, datas.WorkingSetSpec{
|
||||
_, err = ddb.db.withReplicationStatusController(replicationStatus).UpdateWorkingSet(ctx, ds, datas.WorkingSetSpec{
|
||||
Meta: meta,
|
||||
WorkingRoot: workingRootRef,
|
||||
StagedRoot: stagedRef,
|
||||
@@ -1255,6 +1270,7 @@ func (ddb *DoltDB) CommitWithWorkingSet(
|
||||
commit *PendingCommit, workingSet *WorkingSet,
|
||||
prevHash hash.Hash,
|
||||
meta *datas.WorkingSetMeta,
|
||||
replicationStatus *ReplicationStatusController,
|
||||
) (*Commit, error) {
|
||||
wsDs, err := ddb.db.GetDataset(ctx, workingSetRef.String())
|
||||
if err != nil {
|
||||
@@ -1271,12 +1287,13 @@ func (ddb *DoltDB) CommitWithWorkingSet(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commitDataset, _, err := ddb.db.CommitWithWorkingSet(ctx, headDs, wsDs, commit.Roots.Staged.nomsValue(), datas.WorkingSetSpec{
|
||||
Meta: meta,
|
||||
WorkingRoot: workingRootRef,
|
||||
StagedRoot: stagedRef,
|
||||
MergeState: mergeState,
|
||||
}, prevHash, commit.CommitOptions)
|
||||
commitDataset, _, err := ddb.db.withReplicationStatusController(replicationStatus).
|
||||
CommitWithWorkingSet(ctx, headDs, wsDs, commit.Roots.Staged.nomsValue(), datas.WorkingSetSpec{
|
||||
Meta: meta,
|
||||
WorkingRoot: workingRootRef,
|
||||
StagedRoot: stagedRef,
|
||||
MergeState: mergeState,
|
||||
}, prevHash, commit.CommitOptions)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1310,7 +1327,7 @@ func (ddb *DoltDB) DeleteWorkingSet(ctx context.Context, workingSetRef ref.Worki
|
||||
}
|
||||
|
||||
func (ddb *DoltDB) DeleteTag(ctx context.Context, tag ref.DoltRef) error {
|
||||
err := ddb.deleteRef(ctx, tag)
|
||||
err := ddb.deleteRef(ctx, tag, nil)
|
||||
|
||||
if err == ErrBranchNotFound {
|
||||
return ErrTagNotFound
|
||||
@@ -1337,7 +1354,7 @@ func (ddb *DoltDB) NewWorkspaceAtCommit(ctx context.Context, workRef ref.DoltRef
|
||||
}
|
||||
|
||||
func (ddb *DoltDB) DeleteWorkspace(ctx context.Context, workRef ref.DoltRef) error {
|
||||
err := ddb.deleteRef(ctx, workRef)
|
||||
err := ddb.deleteRef(ctx, workRef, nil)
|
||||
|
||||
if err == ErrBranchNotFound {
|
||||
return ErrWorkspaceNotFound
|
||||
@@ -1652,7 +1669,7 @@ func (ddb *DoltDB) RemoveStashAtIdx(ctx context.Context, idx int) error {
|
||||
// RemoveAllStashes removes the stash list Dataset from the database,
|
||||
// which equivalent to removing Stash entries from the stash list.
|
||||
func (ddb *DoltDB) RemoveAllStashes(ctx context.Context) error {
|
||||
err := ddb.deleteRef(ctx, ref.NewStashRef())
|
||||
err := ddb.deleteRef(ctx, ref.NewStashRef(), nil)
|
||||
if err == ErrBranchNotFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -137,7 +137,11 @@ func (i prollyArtifactIndex) ConflictCount(ctx context.Context) (uint64, error)
|
||||
}
|
||||
|
||||
func (i prollyArtifactIndex) ConstraintViolationCount(ctx context.Context) (uint64, error) {
|
||||
return i.index.CountOfTypes(ctx, prolly.ArtifactTypeForeignKeyViol, prolly.ArtifactTypeUniqueKeyViol, prolly.ArtifactTypeChkConsViol)
|
||||
return i.index.CountOfTypes(ctx,
|
||||
prolly.ArtifactTypeForeignKeyViol,
|
||||
prolly.ArtifactTypeUniqueKeyViol,
|
||||
prolly.ArtifactTypeChkConsViol,
|
||||
prolly.ArtifactTypeNullViol)
|
||||
}
|
||||
|
||||
func (i prollyArtifactIndex) ClearConflicts(ctx context.Context) (ArtifactIndex, error) {
|
||||
|
||||
@@ -29,7 +29,6 @@ var ErrInvalidBranchOrHash = errors.New("string is not a valid branch or hash")
|
||||
var ErrInvalidHash = errors.New("string is not a valid hash")
|
||||
|
||||
var ErrFoundHashNotACommit = errors.New("the value retrieved for this hash is not a commit")
|
||||
|
||||
var ErrHashNotFound = errors.New("could not find a value for this hash")
|
||||
var ErrBranchNotFound = errors.New("branch not found")
|
||||
var ErrTagNotFound = errors.New("tag not found")
|
||||
@@ -49,6 +48,8 @@ var ErrIsBehind = errors.New("cannot reverse from b to a. b is a is behind a alr
|
||||
var ErrUnresolvedConflictsOrViolations = errors.New("merge has unresolved conflicts or constraint violations")
|
||||
var ErrMergeActive = errors.New("merging is not possible because you have not committed an active merge")
|
||||
|
||||
var ErrOperationNotSupportedInDetachedHead = errors.New("this operation is not supported while in a detached head state")
|
||||
|
||||
type ErrClientOutOfDate struct {
|
||||
RepoVer FeatureVersion
|
||||
ClientVer FeatureVersion
|
||||
|
||||
@@ -124,7 +124,86 @@ func (fk ForeignKey) EqualDefs(other ForeignKey) bool {
|
||||
fk.OnDelete == other.OnDelete
|
||||
}
|
||||
|
||||
// DeepEquals compares all attributes of a foreign key to another, including name and table names.
|
||||
// Equals compares this ForeignKey to |other| and returns true if they are equal. Foreign keys can either be in
|
||||
// a "resolved" state, where the referenced columns in the parent and child tables are identified by column tags,
|
||||
// or in an "unresolved" state where the reference columns in the parent and child are still identified by strings.
|
||||
// If one foreign key is resolved and one is unresolved, the logic for comparing them requires resolving the string
|
||||
// column names to column tags, which is why |fkSchemasByName| and |otherSchemasByName| are passed in. Each of these
|
||||
// is a map of table schemas for |fk| and |other|, where the child table and every parent table referenced in the
|
||||
// foreign key is present in the map.
|
||||
func (fk ForeignKey) Equals(other ForeignKey, fkSchemasByName, otherSchemasByName map[string]schema.Schema) bool {
|
||||
// If both FKs are resolved or unresolved, we can just deeply compare them
|
||||
if fk.IsResolved() == other.IsResolved() {
|
||||
return fk.DeepEquals(other)
|
||||
}
|
||||
|
||||
// Otherwise, one FK is resolved and one is not, so we need to work a little harder
|
||||
// to calculate equality since their referenced columns are represented differently.
|
||||
// First check the attributes that don't change when an FK is resolved or unresolved.
|
||||
if fk.Name != other.Name &&
|
||||
fk.TableName != other.TableName &&
|
||||
fk.ReferencedTableName != other.ReferencedTableName &&
|
||||
fk.TableIndex != other.TableIndex &&
|
||||
fk.ReferencedTableIndex != other.ReferencedTableIndex &&
|
||||
fk.OnUpdate == other.OnUpdate &&
|
||||
fk.OnDelete == other.OnDelete {
|
||||
return false
|
||||
}
|
||||
|
||||
// Sort out which FK is resolved and which is not
|
||||
var resolvedFK, unresolvedFK ForeignKey
|
||||
var resolvedSchemasByName map[string]schema.Schema
|
||||
if fk.IsResolved() {
|
||||
resolvedFK, unresolvedFK, resolvedSchemasByName = fk, other, fkSchemasByName
|
||||
} else {
|
||||
resolvedFK, unresolvedFK, resolvedSchemasByName = other, fk, otherSchemasByName
|
||||
}
|
||||
|
||||
// Check the columns on the child table
|
||||
if len(resolvedFK.TableColumns) != len(unresolvedFK.UnresolvedFKDetails.TableColumns) {
|
||||
return false
|
||||
}
|
||||
for i, tag := range resolvedFK.TableColumns {
|
||||
unresolvedColName := unresolvedFK.UnresolvedFKDetails.TableColumns[i]
|
||||
resolvedSch, ok := resolvedSchemasByName[resolvedFK.TableName]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
resolvedCol, ok := resolvedSch.GetAllCols().GetByTag(tag)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if resolvedCol.Name != unresolvedColName {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Check the columns on the parent table
|
||||
if len(resolvedFK.ReferencedTableColumns) != len(unresolvedFK.UnresolvedFKDetails.ReferencedTableColumns) {
|
||||
return false
|
||||
}
|
||||
for i, tag := range resolvedFK.ReferencedTableColumns {
|
||||
unresolvedColName := unresolvedFK.UnresolvedFKDetails.ReferencedTableColumns[i]
|
||||
resolvedSch, ok := resolvedSchemasByName[unresolvedFK.ReferencedTableName]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
resolvedCol, ok := resolvedSch.GetAllCols().GetByTag(tag)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if resolvedCol.Name != unresolvedColName {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DeepEquals compares all attributes of a foreign key to another, including name and
|
||||
// table names. Note that if one foreign key is resolved and the other is NOT resolved,
|
||||
// then this function will not calculate equality correctly. When comparing a resolved
|
||||
// FK with an unresolved FK, the ForeignKey.Equals() function should be used instead.
|
||||
func (fk ForeignKey) DeepEquals(other ForeignKey) bool {
|
||||
if !fk.EqualDefs(other) {
|
||||
return false
|
||||
|
||||
@@ -17,6 +17,7 @@ package doltdb
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/dolthub/dolt/go/store/datas"
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
@@ -26,12 +27,13 @@ import (
|
||||
type hooksDatabase struct {
|
||||
datas.Database
|
||||
postCommitHooks []CommitHook
|
||||
rsc *ReplicationStatusController
|
||||
}
|
||||
|
||||
// CommitHook is an abstraction for executing arbitrary commands after atomic database commits
|
||||
type CommitHook interface {
|
||||
// Execute is arbitrary read-only function whose arguments are new Dataset commit into a specific Database
|
||||
Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error
|
||||
Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error)
|
||||
// HandleError is an bridge function to handle Execute errors
|
||||
HandleError(ctx context.Context, err error) error
|
||||
// SetLogger lets clients specify an output stream for HandleError
|
||||
@@ -40,8 +42,16 @@ type CommitHook interface {
|
||||
ExecuteForWorkingSets() bool
|
||||
}
|
||||
|
||||
// If a commit hook supports this interface, it can be notified if waiting for
|
||||
// replication in the callback returned by |Execute| failed to complete in time
|
||||
// or returned an error.
|
||||
type NotifyWaitFailedCommitHook interface {
|
||||
NotifyWaitFailed()
|
||||
}
|
||||
|
||||
func (db hooksDatabase) SetCommitHooks(ctx context.Context, postHooks []CommitHook) hooksDatabase {
|
||||
db.postCommitHooks = postHooks
|
||||
db.postCommitHooks = make([]CommitHook, len(postHooks))
|
||||
copy(db.postCommitHooks, postHooks)
|
||||
return db
|
||||
}
|
||||
|
||||
@@ -52,19 +62,60 @@ func (db hooksDatabase) SetCommitHookLogger(ctx context.Context, wr io.Writer) h
|
||||
return db
|
||||
}
|
||||
|
||||
func (db hooksDatabase) withReplicationStatusController(rsc *ReplicationStatusController) hooksDatabase {
|
||||
db.rsc = rsc
|
||||
return db
|
||||
}
|
||||
|
||||
func (db hooksDatabase) PostCommitHooks() []CommitHook {
|
||||
return db.postCommitHooks
|
||||
toret := make([]CommitHook, len(db.postCommitHooks))
|
||||
copy(toret, db.postCommitHooks)
|
||||
return toret
|
||||
}
|
||||
|
||||
func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset, onlyWS bool) {
|
||||
var err error
|
||||
for _, hook := range db.postCommitHooks {
|
||||
var wg sync.WaitGroup
|
||||
rsc := db.rsc
|
||||
var ioff int
|
||||
if rsc != nil {
|
||||
ioff = len(rsc.Wait)
|
||||
rsc.Wait = append(rsc.Wait, make([]func(context.Context) error, len(db.postCommitHooks))...)
|
||||
rsc.NotifyWaitFailed = append(rsc.NotifyWaitFailed, make([]func(), len(db.postCommitHooks))...)
|
||||
}
|
||||
for il, hook := range db.postCommitHooks {
|
||||
if !onlyWS || hook.ExecuteForWorkingSets() {
|
||||
err = hook.Execute(ctx, ds, db)
|
||||
if err != nil {
|
||||
hook.HandleError(ctx, err)
|
||||
i := il
|
||||
hook := hook
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
f, err := hook.Execute(ctx, ds, db)
|
||||
if err != nil {
|
||||
hook.HandleError(ctx, err)
|
||||
}
|
||||
if rsc != nil {
|
||||
rsc.Wait[i+ioff] = f
|
||||
if nf, ok := hook.(NotifyWaitFailedCommitHook); ok {
|
||||
rsc.NotifyWaitFailed[i+ioff] = nf.NotifyWaitFailed
|
||||
} else {
|
||||
rsc.NotifyWaitFailed[i+ioff] = func() {}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
if rsc != nil {
|
||||
j := ioff
|
||||
for i := ioff; i < len(rsc.Wait); i++ {
|
||||
if rsc.Wait[i] != nil {
|
||||
rsc.Wait[j] = rsc.Wait[i]
|
||||
rsc.NotifyWaitFailed[j] = rsc.NotifyWaitFailed[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
rsc.Wait = rsc.Wait[:j]
|
||||
rsc.NotifyWaitFailed = rsc.NotifyWaitFailed[:j]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -355,7 +355,7 @@ func (t *Table) GetConstraintViolationsSchema(ctx context.Context) (schema.Schem
|
||||
}
|
||||
|
||||
typeType, err := typeinfo.FromSqlType(
|
||||
gmstypes.MustCreateEnumType([]string{"foreign key", "unique index", "check constraint"}, sql.Collation_Default))
|
||||
gmstypes.MustCreateEnumType([]string{"foreign key", "unique index", "check constraint", "not null"}, sql.Collation_Default))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ func (mr *MultiRepoTestSetup) NewRemote(remoteName string) {
|
||||
|
||||
func (mr *MultiRepoTestSetup) NewBranch(dbName, branchName string) {
|
||||
dEnv := mr.envs[dbName]
|
||||
err := actions.CreateBranchWithStartPt(context.Background(), dEnv.DbData(), branchName, "head", false)
|
||||
err := actions.CreateBranchWithStartPt(context.Background(), dEnv.DbData(), branchName, "head", false, nil)
|
||||
if err != nil {
|
||||
mr.Errhand(err)
|
||||
}
|
||||
@@ -260,14 +260,20 @@ func (mr *MultiRepoTestSetup) CommitWithWorkingSet(dbName string) *doltdb.Commit
|
||||
panic("pending commit error: " + err.Error())
|
||||
}
|
||||
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
panic("couldn't get working set: " + err.Error())
|
||||
}
|
||||
|
||||
commit, err := dEnv.DoltDB.CommitWithWorkingSet(
|
||||
ctx,
|
||||
dEnv.RepoStateReader().CWBHeadRef(),
|
||||
headRef,
|
||||
ws.Ref(),
|
||||
pendingCommit,
|
||||
ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(),
|
||||
prevHash,
|
||||
doltdb.TodoWorkingSetMeta(),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
panic("couldn't commit: " + err.Error())
|
||||
|
||||
+42
-20
@@ -31,16 +31,22 @@ var ErrCOBranchDelete = errors.New("attempted to delete checked out branch")
|
||||
var ErrUnmergedBranch = errors.New("branch is not fully merged")
|
||||
var ErrWorkingSetsOnBothBranches = errors.New("checkout would overwrite uncommitted changes on target branch")
|
||||
|
||||
func RenameBranch(ctx context.Context, dbData env.DbData, oldBranch, newBranch string, remoteDbPro env.RemoteDbProvider, force bool) error {
|
||||
func RenameBranch(ctx context.Context, dbData env.DbData, oldBranch, newBranch string, remoteDbPro env.RemoteDbProvider, force bool, rsc *doltdb.ReplicationStatusController) error {
|
||||
oldRef := ref.NewBranchRef(oldBranch)
|
||||
newRef := ref.NewBranchRef(newBranch)
|
||||
|
||||
err := CopyBranchOnDB(ctx, dbData.Ddb, oldBranch, newBranch, force)
|
||||
// TODO: This function smears the branch updates across multiple commits of the datas.Database.
|
||||
|
||||
err := CopyBranchOnDB(ctx, dbData.Ddb, oldBranch, newBranch, force, rsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref.Equals(dbData.Rsr.CWBHeadRef(), oldRef) {
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ref.Equals(headRef, oldRef) {
|
||||
err = dbData.Rsw.SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: newRef})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -66,14 +72,14 @@ func RenameBranch(ctx context.Context, dbData env.DbData, oldBranch, newBranch s
|
||||
}
|
||||
}
|
||||
|
||||
return DeleteBranch(ctx, dbData, oldBranch, DeleteOptions{Force: true}, remoteDbPro)
|
||||
return DeleteBranch(ctx, dbData, oldBranch, DeleteOptions{Force: true}, remoteDbPro, rsc)
|
||||
}
|
||||
|
||||
func CopyBranch(ctx context.Context, dEnv *env.DoltEnv, oldBranch, newBranch string, force bool) error {
|
||||
return CopyBranchOnDB(ctx, dEnv.DoltDB, oldBranch, newBranch, force)
|
||||
return CopyBranchOnDB(ctx, dEnv.DoltDB, oldBranch, newBranch, force, nil)
|
||||
}
|
||||
|
||||
func CopyBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, oldBranch, newBranch string, force bool) error {
|
||||
func CopyBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, oldBranch, newBranch string, force bool, rsc *doltdb.ReplicationStatusController) error {
|
||||
oldRef := ref.NewBranchRef(oldBranch)
|
||||
newRef := ref.NewBranchRef(newBranch)
|
||||
|
||||
@@ -104,7 +110,7 @@ func CopyBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, oldBranch, newBranc
|
||||
return err
|
||||
}
|
||||
|
||||
return ddb.NewBranchAtCommit(ctx, newRef, cm)
|
||||
return ddb.NewBranchAtCommit(ctx, newRef, cm, rsc)
|
||||
}
|
||||
|
||||
type DeleteOptions struct {
|
||||
@@ -112,7 +118,7 @@ type DeleteOptions struct {
|
||||
Remote bool
|
||||
}
|
||||
|
||||
func DeleteBranch(ctx context.Context, dbData env.DbData, brName string, opts DeleteOptions, remoteDbPro env.RemoteDbProvider) error {
|
||||
func DeleteBranch(ctx context.Context, dbData env.DbData, brName string, opts DeleteOptions, remoteDbPro env.RemoteDbProvider, rsc *doltdb.ReplicationStatusController) error {
|
||||
var branchRef ref.DoltRef
|
||||
if opts.Remote {
|
||||
var err error
|
||||
@@ -122,15 +128,19 @@ func DeleteBranch(ctx context.Context, dbData env.DbData, brName string, opts De
|
||||
}
|
||||
} else {
|
||||
branchRef = ref.NewBranchRef(brName)
|
||||
if ref.Equals(dbData.Rsr.CWBHeadRef(), branchRef) {
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ref.Equals(headRef, branchRef) {
|
||||
return ErrCOBranchDelete
|
||||
}
|
||||
}
|
||||
|
||||
return DeleteBranchOnDB(ctx, dbData, branchRef, opts, remoteDbPro)
|
||||
return DeleteBranchOnDB(ctx, dbData, branchRef, opts, remoteDbPro, rsc)
|
||||
}
|
||||
|
||||
func DeleteBranchOnDB(ctx context.Context, dbdata env.DbData, branchRef ref.DoltRef, opts DeleteOptions, pro env.RemoteDbProvider) error {
|
||||
func DeleteBranchOnDB(ctx context.Context, dbdata env.DbData, branchRef ref.DoltRef, opts DeleteOptions, pro env.RemoteDbProvider, rsc *doltdb.ReplicationStatusController) error {
|
||||
ddb := dbdata.Ddb
|
||||
hasRef, err := ddb.HasRef(ctx, branchRef)
|
||||
|
||||
@@ -173,7 +183,7 @@ func DeleteBranchOnDB(ctx context.Context, dbdata env.DbData, branchRef ref.Dolt
|
||||
}
|
||||
}
|
||||
|
||||
return ddb.DeleteBranch(ctx, branchRef)
|
||||
return ddb.DeleteBranch(ctx, branchRef, rsc)
|
||||
}
|
||||
|
||||
// validateBranchMergedIntoCurrentWorkingBranch returns an error if the given branch is not fully merged into the HEAD of the current branch.
|
||||
@@ -193,7 +203,11 @@ func validateBranchMergedIntoCurrentWorkingBranch(ctx context.Context, dbdata en
|
||||
return err
|
||||
}
|
||||
|
||||
cwbHead, err := dbdata.Ddb.Resolve(ctx, cwbCs, dbdata.Rsr.CWBHeadRef())
|
||||
headRef, err := dbdata.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cwbHead, err := dbdata.Ddb.Resolve(ctx, cwbCs, headRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -267,8 +281,8 @@ func validateBranchMergedIntoUpstream(ctx context.Context, dbdata env.DbData, br
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateBranchWithStartPt(ctx context.Context, dbData env.DbData, newBranch, startPt string, force bool) error {
|
||||
err := createBranch(ctx, dbData, newBranch, startPt, force)
|
||||
func CreateBranchWithStartPt(ctx context.Context, dbData env.DbData, newBranch, startPt string, force bool, rsc *doltdb.ReplicationStatusController) error {
|
||||
err := createBranch(ctx, dbData, newBranch, startPt, force, rsc)
|
||||
|
||||
if err != nil {
|
||||
if err == ErrAlreadyExists {
|
||||
@@ -289,7 +303,7 @@ func CreateBranchWithStartPt(ctx context.Context, dbData env.DbData, newBranch,
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, startingPoint string, force bool, headRef ref.DoltRef) error {
|
||||
func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, startingPoint string, force bool, headRef ref.DoltRef, rsc *doltdb.ReplicationStatusController) error {
|
||||
branchRef := ref.NewBranchRef(newBranch)
|
||||
hasRef, err := ddb.HasRef(ctx, branchRef)
|
||||
if err != nil {
|
||||
@@ -314,7 +328,7 @@ func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, starti
|
||||
return err
|
||||
}
|
||||
|
||||
err = ddb.NewBranchAtCommit(ctx, branchRef, cm)
|
||||
err = ddb.NewBranchAtCommit(ctx, branchRef, cm, rsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -322,8 +336,12 @@ func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, starti
|
||||
return nil
|
||||
}
|
||||
|
||||
func createBranch(ctx context.Context, dbData env.DbData, newBranch, startingPoint string, force bool) error {
|
||||
return CreateBranchOnDB(ctx, dbData.Ddb, newBranch, startingPoint, force, dbData.Rsr.CWBHeadRef())
|
||||
func createBranch(ctx context.Context, dbData env.DbData, newBranch, startingPoint string, force bool, rsc *doltdb.ReplicationStatusController) error {
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return CreateBranchOnDB(ctx, dbData.Ddb, newBranch, startingPoint, force, headRef, rsc)
|
||||
}
|
||||
|
||||
var emptyHash = hash.Hash{}
|
||||
@@ -341,7 +359,11 @@ func MaybeGetCommit(ctx context.Context, dEnv *env.DoltEnv, str string) (*doltdb
|
||||
cs, err := doltdb.NewCommitSpec(str)
|
||||
|
||||
if err == nil {
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef)
|
||||
|
||||
if errors.Is(err, doltdb.ErrBranchNotFound) {
|
||||
return nil, nil
|
||||
|
||||
+10
-2
@@ -159,7 +159,10 @@ func rootsForBranch(ctx context.Context, roots doltdb.Roots, branchRoot *doltdb.
|
||||
|
||||
func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, force bool) error {
|
||||
branchRef := ref.NewBranchRef(brName)
|
||||
initialHeadRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
initialHeadRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db := dEnv.DoltDB
|
||||
hasRef, err := db.HasRef(ctx, branchRef)
|
||||
@@ -170,7 +173,11 @@ func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, force
|
||||
return doltdb.ErrBranchNotFound
|
||||
}
|
||||
|
||||
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), branchRef) {
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ref.Equals(headRef, branchRef) {
|
||||
return doltdb.ErrAlreadyOnBranch
|
||||
}
|
||||
|
||||
@@ -331,6 +338,7 @@ func cleanOldWorkingSet(
|
||||
initialWs.WithWorkingRoot(newRoots.Working).WithStagedRoot(newRoots.Staged).ClearMerge(),
|
||||
h,
|
||||
dEnv.NewWorkingSetMeta("reset hard"),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
+1
-1
@@ -230,7 +230,7 @@ func CloneRemote(ctx context.Context, srcDB *doltdb.DoltDB, remoteName, branch s
|
||||
}
|
||||
|
||||
if brnch.GetPath() != branch {
|
||||
err := dEnv.DoltDB.DeleteBranch(ctx, brnch)
|
||||
err := dEnv.DoltDB.DeleteBranch(ctx, brnch, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: %s; %s", ErrFailedToDeleteBranch, brnch.String(), err.Error())
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func TestGetDotDotRevisions(t *testing.T) {
|
||||
|
||||
// Create a feature branch.
|
||||
bref := ref.NewBranchRef("feature")
|
||||
err = dEnv.DoltDB.NewBranchAtCommit(context.Background(), bref, mainCommits[5])
|
||||
err = dEnv.DoltDB.NewBranchAtCommit(context.Background(), bref, mainCommits[5], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create 3 commits on feature branch.
|
||||
|
||||
+7
-3
@@ -168,7 +168,11 @@ func PushToRemoteBranch(ctx context.Context, rsr env.RepoStateReader, tempTableD
|
||||
}
|
||||
|
||||
cs, _ := doltdb.NewCommitSpec(srcRef.GetPath())
|
||||
cm, err := localDB.Resolve(ctx, cs, rsr.CWBHeadRef())
|
||||
headRef, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cm, err := localDB.Resolve(ctx, cs, headRef)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w; refspec not found: '%s'; %s", ref.ErrInvalidRefSpec, srcRef.GetPath(), err.Error())
|
||||
@@ -220,14 +224,14 @@ func DeleteRemoteBranch(ctx context.Context, targetRef ref.BranchRef, remoteRef
|
||||
}
|
||||
|
||||
if hasRef {
|
||||
err = remoteDB.DeleteBranch(ctx, targetRef)
|
||||
err = remoteDB.DeleteBranch(ctx, targetRef, nil)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = localDB.DeleteBranch(ctx, remoteRef)
|
||||
err = localDB.DeleteBranch(ctx, remoteRef, nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
+30
-10
@@ -39,7 +39,11 @@ func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, ro
|
||||
return nil, doltdb.Roots{}, err
|
||||
}
|
||||
|
||||
newHead, err = ddb.Resolve(ctx, cs, rsr.CWBHeadRef())
|
||||
headRef, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, doltdb.Roots{}, err
|
||||
}
|
||||
newHead, err = ddb.Resolve(ctx, cs, headRef)
|
||||
if err != nil {
|
||||
return nil, doltdb.Roots{}, err
|
||||
}
|
||||
@@ -164,7 +168,7 @@ func ResetHard(
|
||||
return err
|
||||
}
|
||||
|
||||
err = dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, dEnv.NewWorkingSetMeta("reset hard"))
|
||||
err = dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, dEnv.NewWorkingSetMeta("reset hard"), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -220,7 +224,11 @@ func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (do
|
||||
return doltdb.Roots{}, err
|
||||
}
|
||||
|
||||
newHead, err := dbData.Ddb.Resolve(ctx, cs, dbData.Rsr.CWBHeadRef())
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return doltdb.Roots{}, err
|
||||
}
|
||||
newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef)
|
||||
if err != nil {
|
||||
return doltdb.Roots{}, err
|
||||
}
|
||||
@@ -231,7 +239,7 @@ func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (do
|
||||
}
|
||||
|
||||
// Update the head to this commit
|
||||
if err = dbData.Ddb.SetHeadToCommit(ctx, dbData.Rsr.CWBHeadRef(), newHead); err != nil {
|
||||
if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil {
|
||||
return doltdb.Roots{}, err
|
||||
}
|
||||
|
||||
@@ -265,19 +273,31 @@ func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb
|
||||
}
|
||||
|
||||
// IsValidRef validates whether the input parameter is a valid cString
|
||||
// TODO: this doesn't belong int his package
|
||||
func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) bool {
|
||||
// TODO: this doesn't belong in this package
|
||||
func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) {
|
||||
// The error return value is only for propagating unhandled errors from rsr.CWBHeadRef()
|
||||
// All other errors merely indicate an invalid ref spec.
|
||||
// TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones.
|
||||
cs, err := doltdb.NewCommitSpec(cSpecStr)
|
||||
if err != nil {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
_, err = ddb.Resolve(ctx, cs, rsr.CWBHeadRef())
|
||||
headRef, err := rsr.CWBHeadRef()
|
||||
if err == doltdb.ErrOperationNotSupportedInDetachedHead {
|
||||
// This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed.
|
||||
// Basically, this guarentees that resolving "HEAD" or similar will return an error but other resolves will work.
|
||||
headRef = nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = ddb.Resolve(ctx, cs, headRef)
|
||||
if err != nil {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CleanUntracked deletes untracked tables from the working root.
|
||||
|
||||
+5
-1
@@ -32,7 +32,11 @@ type TagProps struct {
|
||||
}
|
||||
|
||||
func CreateTag(ctx context.Context, dEnv *env.DoltEnv, tagName, startPoint string, props TagProps) error {
|
||||
return CreateTagOnDB(ctx, dEnv.DoltDB, tagName, startPoint, props, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return CreateTagOnDB(ctx, dEnv.DoltDB, tagName, startPoint, props, headRef)
|
||||
}
|
||||
|
||||
func CreateTagOnDB(ctx context.Context, ddb *doltdb.DoltDB, tagName, startPoint string, props TagProps, headRef ref.DoltRef) error {
|
||||
|
||||
+10
-2
@@ -28,7 +28,11 @@ var ErrCOWorkspaceDelete = errors.New("attempted to delete checked out workspace
|
||||
var ErrBranchNameExists = errors.New("workspace name must not be existing branch name")
|
||||
|
||||
func CreateWorkspace(ctx context.Context, dEnv *env.DoltEnv, name, startPoint string) error {
|
||||
return CreateWorkspaceOnDB(ctx, dEnv.DoltDB, name, startPoint, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return CreateWorkspaceOnDB(ctx, dEnv.DoltDB, name, startPoint, headRef)
|
||||
}
|
||||
|
||||
func CreateWorkspaceOnDB(ctx context.Context, ddb *doltdb.DoltDB, name, startPoint string, headRef ref.DoltRef) error {
|
||||
@@ -86,7 +90,11 @@ func DeleteWorkspace(ctx context.Context, dEnv *env.DoltEnv, workspaceName strin
|
||||
}
|
||||
} else {
|
||||
dref = ref.NewWorkspaceRef(workspaceName)
|
||||
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), dref) {
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ref.Equals(headRef, dref) {
|
||||
return ErrCOWorkspaceDelete
|
||||
}
|
||||
}
|
||||
|
||||
+18
-11
@@ -206,7 +206,10 @@ func (dEnv *DoltEnv) Valid() bool {
|
||||
// initWorkingSetFromRepoState sets the working set for the env's head to mirror the contents of the repo state file.
|
||||
// This is only necessary to migrate repos written before this method was introduced, and can be removed after 1.0
|
||||
func (dEnv *DoltEnv) initWorkingSetFromRepoState(ctx context.Context) error {
|
||||
headRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wsRef, err := ref.WorkingSetRefForHead(headRef)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -591,7 +594,11 @@ func (dEnv *DoltEnv) WorkingSet(ctx context.Context) (*doltdb.WorkingSet, error)
|
||||
}
|
||||
|
||||
func WorkingSet(ctx context.Context, ddb *doltdb.DoltDB, rsr RepoStateReader) (*doltdb.WorkingSet, error) {
|
||||
workingSetRef, err := ref.WorkingSetRefForHead(rsr.CWBHeadRef())
|
||||
headRef, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workingSetRef, err := ref.WorkingSetRefForHead(headRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -629,7 +636,7 @@ func (dEnv *DoltEnv) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.Root
|
||||
wsRef = ws.Ref()
|
||||
}
|
||||
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, dEnv.workingSetMeta())
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, dEnv.workingSetMeta(), nil)
|
||||
}
|
||||
|
||||
// UpdateWorkingSet updates the working set for the current working branch to the value given.
|
||||
@@ -648,19 +655,19 @@ func (dEnv *DoltEnv) UpdateWorkingSet(ctx context.Context, ws *doltdb.WorkingSet
|
||||
}
|
||||
}
|
||||
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws, h, dEnv.workingSetMeta())
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws, h, dEnv.workingSetMeta(), nil)
|
||||
}
|
||||
|
||||
type repoStateReader struct {
|
||||
*DoltEnv
|
||||
}
|
||||
|
||||
func (r *repoStateReader) CWBHeadRef() ref.DoltRef {
|
||||
return r.RepoState.CWBHeadRef()
|
||||
func (r *repoStateReader) CWBHeadRef() (ref.DoltRef, error) {
|
||||
return r.RepoState.CWBHeadRef(), nil
|
||||
}
|
||||
|
||||
func (r *repoStateReader) CWBHeadSpec() *doltdb.CommitSpec {
|
||||
return r.RepoState.CWBHeadSpec()
|
||||
func (r *repoStateReader) CWBHeadSpec() (*doltdb.CommitSpec, error) {
|
||||
return r.RepoState.CWBHeadSpec(), nil
|
||||
}
|
||||
|
||||
func (dEnv *DoltEnv) RepoStateReader() RepoStateReader {
|
||||
@@ -758,7 +765,7 @@ func (dEnv *DoltEnv) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootV
|
||||
wsRef = ws.Ref()
|
||||
}
|
||||
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, dEnv.workingSetMeta())
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, dEnv.workingSetMeta(), nil)
|
||||
}
|
||||
|
||||
func (dEnv *DoltEnv) AbortMerge(ctx context.Context) error {
|
||||
@@ -772,7 +779,7 @@ func (dEnv *DoltEnv) AbortMerge(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.AbortMerge(), h, dEnv.workingSetMeta())
|
||||
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.AbortMerge(), h, dEnv.workingSetMeta(), nil)
|
||||
}
|
||||
|
||||
func (dEnv *DoltEnv) workingSetMeta() *datas.WorkingSetMeta {
|
||||
@@ -911,7 +918,7 @@ func (dEnv *DoltEnv) RemoveRemote(ctx context.Context, name string) error {
|
||||
rr := r.(ref.RemoteRef)
|
||||
|
||||
if rr.GetRemote() == remote.Name {
|
||||
err = ddb.DeleteBranch(ctx, rr)
|
||||
err = ddb.DeleteBranch(ctx, rr, nil)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w; failed to delete remote tracking ref '%s'; %s", ErrFailedToDeleteRemote, rr.String(), err.Error())
|
||||
|
||||
Vendored
+27
-11
@@ -101,16 +101,20 @@ type MemoryRepoState struct {
|
||||
var _ RepoStateReader = MemoryRepoState{}
|
||||
var _ RepoStateWriter = MemoryRepoState{}
|
||||
|
||||
func (m MemoryRepoState) CWBHeadRef() ref.DoltRef {
|
||||
return m.Head
|
||||
func (m MemoryRepoState) CWBHeadRef() (ref.DoltRef, error) {
|
||||
return m.Head, nil
|
||||
}
|
||||
|
||||
func (m MemoryRepoState) CWBHeadSpec() *doltdb.CommitSpec {
|
||||
spec, err := doltdb.NewCommitSpec(m.CWBHeadRef().GetPath())
|
||||
func (m MemoryRepoState) CWBHeadSpec() (*doltdb.CommitSpec, error) {
|
||||
headRef, err := m.CWBHeadRef()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
return spec
|
||||
spec, err := doltdb.NewCommitSpec(headRef.GetPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (m MemoryRepoState) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootValue) error {
|
||||
@@ -120,7 +124,11 @@ func (m MemoryRepoState) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.R
|
||||
ws, err := m.WorkingSet(ctx)
|
||||
if err == doltdb.ErrWorkingSetNotFound {
|
||||
// first time updating root
|
||||
wsRef, err = ref.WorkingSetRefForHead(m.CWBHeadRef())
|
||||
headRef, err := m.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wsRef, err = ref.WorkingSetRefForHead(headRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -136,7 +144,7 @@ func (m MemoryRepoState) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.R
|
||||
wsRef = ws.Ref()
|
||||
}
|
||||
|
||||
return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, m.workingSetMeta())
|
||||
return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, m.workingSetMeta(), nil)
|
||||
}
|
||||
|
||||
func (m MemoryRepoState) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.RootValue) error {
|
||||
@@ -146,7 +154,11 @@ func (m MemoryRepoState) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.
|
||||
ws, err := m.WorkingSet(ctx)
|
||||
if err == doltdb.ErrWorkingSetNotFound {
|
||||
// first time updating root
|
||||
wsRef, err = ref.WorkingSetRefForHead(m.CWBHeadRef())
|
||||
headRef, err := m.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wsRef, err = ref.WorkingSetRefForHead(headRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -162,11 +174,15 @@ func (m MemoryRepoState) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.
|
||||
wsRef = ws.Ref()
|
||||
}
|
||||
|
||||
return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, m.workingSetMeta())
|
||||
return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, m.workingSetMeta(), nil)
|
||||
}
|
||||
|
||||
func (m MemoryRepoState) WorkingSet(ctx context.Context) (*doltdb.WorkingSet, error) {
|
||||
workingSetRef, err := ref.WorkingSetRefForHead(m.CWBHeadRef())
|
||||
headRef, err := m.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workingSetRef, err := ref.WorkingSetRefForHead(headRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
Vendored
+8
-2
@@ -141,7 +141,10 @@ func NewPushOpts(ctx context.Context, apr *argparser.ArgParseResults, rsr RepoSt
|
||||
}
|
||||
|
||||
remote, remoteOK := remotes[remoteName]
|
||||
currentBranch := rsr.CWBHeadRef()
|
||||
currentBranch, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
branches, err := rsr.GetBranches()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -422,7 +425,10 @@ func NewPullSpec(_ context.Context, rsr RepoStateReader, remoteName, remoteRefNa
|
||||
|
||||
var remoteRef ref.DoltRef
|
||||
if remoteRefName == "" {
|
||||
branch := rsr.CWBHeadRef()
|
||||
branch, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trackedBranches, err := rsr.GetBranches()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
+2
-2
@@ -27,8 +27,8 @@ import (
|
||||
|
||||
// TODO: change name to ClientStateReader, move out of env package
|
||||
type RepoStateReader interface {
|
||||
CWBHeadRef() ref.DoltRef
|
||||
CWBHeadSpec() *doltdb.CommitSpec
|
||||
CWBHeadRef() (ref.DoltRef, error)
|
||||
CWBHeadSpec() (*doltdb.CommitSpec, error)
|
||||
GetRemotes() (map[string]Remote, error)
|
||||
GetBackups() (map[string]Remote, error)
|
||||
GetBranches() (map[string]BranchConfig, error)
|
||||
|
||||
@@ -62,7 +62,12 @@ func NewMergeSpec(ctx context.Context, rsr env.RepoStateReader, ddb *doltdb.Dolt
|
||||
return nil, err
|
||||
}
|
||||
|
||||
headCM, err := ddb.Resolve(context.TODO(), headCS, rsr.CWBHeadRef())
|
||||
headRef, err := rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
headCM, err := ddb.Resolve(context.TODO(), headCS, headRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -72,7 +77,7 @@ func NewMergeSpec(ctx context.Context, rsr env.RepoStateReader, ddb *doltdb.Dolt
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mergeCM, err := ddb.Resolve(context.TODO(), mergeCS, rsr.CWBHeadRef())
|
||||
mergeCM, err := ddb.Resolve(context.TODO(), mergeCS, headRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -159,7 +164,11 @@ func ExecuteFFMerge(
|
||||
}
|
||||
|
||||
if !spec.Squash {
|
||||
err = dEnv.DoltDB.FastForward(ctx, dEnv.RepoStateReader().CWBHeadRef(), spec.MergeC)
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dEnv.DoltDB.FastForward(ctx, headRef, spec.MergeC)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -133,17 +133,17 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
leftRows := durable.ProllyMapFromIndex(lr)
|
||||
leftEditor := durable.ProllyMapFromIndex(lr).Mutate()
|
||||
|
||||
ai, err := mergeTbl.GetArtifacts(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ae := durable.ProllyMapFromArtifactIndex(ai).Editor()
|
||||
artEditor := durable.ProllyMapFromArtifactIndex(ai).Editor()
|
||||
|
||||
keyless := schema.IsKeyless(tm.leftSch)
|
||||
|
||||
pri, err := newPrimaryMerger(leftRows, tm, valueMerger, finalSch)
|
||||
pri, err := newPrimaryMerger(leftEditor, tm, valueMerger, finalSch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -151,13 +151,18 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
conflicts, err := newConflictMerger(ctx, tm, ae)
|
||||
conflicts, err := newConflictMerger(ctx, tm, artEditor)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// validator shares editor with conflict merge
|
||||
uniq, err := newUniqValidator(ctx, finalSch, tm, valueMerger, ae)
|
||||
// validator shares an artifact editor with conflict merge
|
||||
uniq, err := newUniqValidator(ctx, finalSch, tm, valueMerger, artEditor)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
nullChk, err := newNullValidator(ctx, finalSch, tm, valueMerger, artEditor, leftEditor, sec.leftMut)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -177,7 +182,16 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
s.DataConflicts += cnt
|
||||
s.ConstraintViolations += cnt
|
||||
|
||||
cnt, err = nullChk.validateDiff(ctx, diff)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
s.ConstraintViolations += cnt
|
||||
if cnt > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch diff.Op {
|
||||
case tree.DiffOpDivergentModifyConflict, tree.DiffOpDivergentDeleteConflict:
|
||||
@@ -379,7 +393,7 @@ func newUniqValidator(ctx context.Context, sch schema.Schema, tm *TableMerger, v
|
||||
return uv, nil
|
||||
}
|
||||
|
||||
func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (conflicts int, err error) {
|
||||
func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (violations int, err error) {
|
||||
var value val.Tuple
|
||||
switch diff.Op {
|
||||
case tree.DiffOpRightAdd, tree.DiffOpRightModify:
|
||||
@@ -399,7 +413,7 @@ func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff
|
||||
|
||||
for _, idx := range uv.indexes {
|
||||
err = idx.findCollisions(ctx, diff.Key, value, func(k, v val.Tuple) error {
|
||||
conflicts++
|
||||
violations++
|
||||
return uv.insertArtifact(ctx, k, v, idx.meta)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -491,6 +505,135 @@ func (idx uniqIndex) findCollisions(ctx context.Context, key, value val.Tuple, c
|
||||
return cb(key, value)
|
||||
}
|
||||
|
||||
// nullValidator enforces NOT NULL constraints on merge
|
||||
type nullValidator struct {
|
||||
table string
|
||||
// final is the merge result schema
|
||||
final schema.Schema
|
||||
// leftMap and rightMap map value tuples to |final|
|
||||
leftMap, rightMap val.OrdinalMapping
|
||||
// edits is the artifacts maps editor
|
||||
artEditor *prolly.ArtifactsEditor
|
||||
// leftEdits if the left-side row editor
|
||||
leftEditor *prolly.MutableMap
|
||||
// secEditors are the secondary index editors
|
||||
secEditors []MutableSecondaryIdx
|
||||
// theirRootish is the hash.Hash of the right-side revision
|
||||
theirRootish hash.Hash
|
||||
// ourRootish is the hash.Hash of the left-side revision
|
||||
ourRootish hash.Hash
|
||||
}
|
||||
|
||||
func newNullValidator(
|
||||
ctx context.Context,
|
||||
final schema.Schema,
|
||||
tm *TableMerger,
|
||||
vm *valueMerger,
|
||||
artEditor *prolly.ArtifactsEditor,
|
||||
leftEditor *prolly.MutableMap,
|
||||
secEditors []MutableSecondaryIdx,
|
||||
) (nullValidator, error) {
|
||||
theirRootish, err := tm.rightSrc.HashOf()
|
||||
if err != nil {
|
||||
return nullValidator{}, err
|
||||
}
|
||||
ourRootish, err := tm.rightSrc.HashOf()
|
||||
if err != nil {
|
||||
return nullValidator{}, err
|
||||
}
|
||||
return nullValidator{
|
||||
table: tm.name,
|
||||
final: final,
|
||||
leftMap: vm.leftMapping,
|
||||
rightMap: vm.rightMapping,
|
||||
artEditor: artEditor,
|
||||
leftEditor: leftEditor,
|
||||
secEditors: secEditors,
|
||||
theirRootish: theirRootish,
|
||||
ourRootish: ourRootish,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (nv nullValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (count int, err error) {
|
||||
switch diff.Op {
|
||||
case tree.DiffOpRightAdd, tree.DiffOpRightModify:
|
||||
var violations []string
|
||||
for to, from := range nv.rightMap {
|
||||
col := nv.final.GetNonPKCols().GetByIndex(to)
|
||||
if col.IsNullable() {
|
||||
continue
|
||||
}
|
||||
if from < 0 {
|
||||
// non-nullable column in |nv.final| does not exist
|
||||
// on the right side of the merge, check if it will
|
||||
// be populated with a default value
|
||||
if col.Default == "" {
|
||||
violations = append(violations, col.Name)
|
||||
}
|
||||
} else {
|
||||
if diff.Right.FieldIsNull(from) {
|
||||
violations = append(violations, col.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// for right-side NULL violations, we insert a constraint violation and
|
||||
// set |count| > 0 to signal to the caller that |diff| should not be applied
|
||||
if len(violations) > 0 {
|
||||
var meta prolly.ConstraintViolationMeta
|
||||
if meta, err = newNotNullViolationMeta(violations, diff.Right); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = nv.artEditor.ReplaceConstraintViolation(ctx, diff.Key, nv.theirRootish, prolly.ArtifactTypeNullViol, meta)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
count = len(violations)
|
||||
|
||||
case tree.DiffOpLeftAdd, tree.DiffOpLeftModify:
|
||||
var violations []string
|
||||
for to, from := range nv.leftMap {
|
||||
col := nv.final.GetNonPKCols().GetByIndex(to)
|
||||
if col.IsNullable() {
|
||||
continue
|
||||
}
|
||||
if from < 0 {
|
||||
// non-nullable column in |nv.final| does not exist
|
||||
// on the left side of the merge, check if it will
|
||||
// be populated with a default value
|
||||
if col.Default == "" {
|
||||
violations = append(violations, col.Name)
|
||||
}
|
||||
} else {
|
||||
if diff.Left.FieldIsNull(from) {
|
||||
violations = append(violations, col.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// for left-side NULL violations, we insert a constraint violation and
|
||||
// then must explicitly remove this row from all left-side indexes
|
||||
if len(violations) > 0 {
|
||||
var meta prolly.ConstraintViolationMeta
|
||||
if meta, err = newNotNullViolationMeta(violations, diff.Left); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = nv.artEditor.ReplaceConstraintViolation(ctx, diff.Key, nv.ourRootish, prolly.ArtifactTypeNullViol, meta)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err = nv.leftEditor.Delete(ctx, diff.Key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, editor := range nv.secEditors {
|
||||
if err = editor.DeleteEntry(ctx, diff.Key, diff.Left); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// conflictMerger processing primary key diffs
|
||||
// with conflict types into artifact table writes.
|
||||
type conflictMerger struct {
|
||||
@@ -570,9 +713,9 @@ type primaryMerger struct {
|
||||
finalSch schema.Schema
|
||||
}
|
||||
|
||||
func newPrimaryMerger(leftRows prolly.Map, tableMerger *TableMerger, valueMerger *valueMerger, finalSch schema.Schema) (*primaryMerger, error) {
|
||||
func newPrimaryMerger(leftEditor *prolly.MutableMap, tableMerger *TableMerger, valueMerger *valueMerger, finalSch schema.Schema) (*primaryMerger, error) {
|
||||
return &primaryMerger{
|
||||
mut: leftRows.Mutate(),
|
||||
mut: leftEditor,
|
||||
valueMerger: valueMerger,
|
||||
tableMerger: tableMerger,
|
||||
finalSch: finalSch,
|
||||
@@ -596,12 +739,12 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc
|
||||
return fmt.Errorf("cannot merge keyless tables with reordered columns")
|
||||
}
|
||||
} else {
|
||||
tempTupleValue, err := remapTupleWithColumnDefaults(ctx, &diff.Right, sourceSch.GetValueDescriptor(),
|
||||
tempTupleValue, err := remapTupleWithColumnDefaults(ctx, diff.Right, sourceSch.GetValueDescriptor(),
|
||||
m.valueMerger.rightMapping, m.tableMerger, m.finalSch, m.valueMerger.syncPool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newTupleValue = *tempTupleValue
|
||||
newTupleValue = tempTupleValue
|
||||
}
|
||||
return m.mut.Put(ctx, diff.Key, newTupleValue)
|
||||
case tree.DiffOpRightDelete:
|
||||
@@ -732,7 +875,7 @@ func remapTuple(tuple val.Tuple, desc val.TupleDesc, mapping val.OrdinalMapping)
|
||||
// currently being merged and associated node store. |mergedSch| is the new schema of the table and is used to look up
|
||||
// column default values to apply to any existing rows when a new column is added as part of a merge. |pool| is used to
|
||||
// to allocate memory for the new tuple. A pointer to the new tuple data is returned, along with any error encountered.
|
||||
func remapTupleWithColumnDefaults(ctx *sql.Context, tuple *val.Tuple, tupleDesc val.TupleDesc, mapping val.OrdinalMapping, tm *TableMerger, mergedSch schema.Schema, pool pool.BuffPool) (*val.Tuple, error) {
|
||||
func remapTupleWithColumnDefaults(ctx *sql.Context, tuple val.Tuple, tupleDesc val.TupleDesc, mapping val.OrdinalMapping, tm *TableMerger, mergedSch schema.Schema, pool pool.BuffPool) (val.Tuple, error) {
|
||||
tb := val.NewTupleBuilder(mergedSch.GetValueDescriptor())
|
||||
|
||||
for to, from := range mapping {
|
||||
@@ -761,19 +904,20 @@ func remapTupleWithColumnDefaults(ctx *sql.Context, tuple *val.Tuple, tupleDesc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
value, _, err = col.TypeInfo.ToSqlType().Convert(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = index.PutField(ctx, tm.ns, tb, to, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tb.PutRaw(to, tupleDesc.GetField(from, *tuple))
|
||||
tb.PutRaw(to, tupleDesc.GetField(from, tuple))
|
||||
}
|
||||
}
|
||||
|
||||
newTuple := tb.Build(pool)
|
||||
return &newTuple, nil
|
||||
return tb.Build(pool), nil
|
||||
}
|
||||
|
||||
func mergeTableArtifacts(ctx context.Context, tm *TableMerger, mergeTbl *doltdb.Table) (*doltdb.Table, error) {
|
||||
@@ -912,12 +1056,12 @@ func migrateDataToMergedSchema(ctx *sql.Context, tm *TableMerger, vm *valueMerge
|
||||
return err
|
||||
}
|
||||
|
||||
newValueTuple, err := remapTupleWithColumnDefaults(ctx, &valueTuple, valueDescriptor, vm.leftMapping, tm, mergedSch, vm.syncPool)
|
||||
newValueTuple, err := remapTupleWithColumnDefaults(ctx, valueTuple, valueDescriptor, vm.leftMapping, tm, mergedSch, vm.syncPool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mut.Put(ctx, keyTuple, *newValueTuple)
|
||||
err = mut.Put(ctx, keyTuple, newValueTuple)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -301,7 +301,6 @@ func (rm *RootMerger) maybeShortCircuit(ctx context.Context, tm *TableMerger, op
|
||||
func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bool, error) {
|
||||
existingVD := existingSch.GetValueDescriptor()
|
||||
targetVD := targetSch.GetValueDescriptor()
|
||||
|
||||
_, valMapping, err := schema.MapSchemaBasedOnTagAndName(existingSch, targetSch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -318,11 +317,6 @@ func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bo
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If a not-null constraint was added, bail.
|
||||
if existingVD.Types[existingIndex].Nullable && !targetVD.Types[targetIndex].Nullable {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If the collation was changed, bail.
|
||||
// Different collations will affect the ordering of any secondary indexes using this column.
|
||||
existingStr, ok1 := existingSch.GetNonPKCols().GetByIndex(existingIndex).TypeInfo.ToSqlType().(sql.StringType)
|
||||
@@ -338,18 +332,18 @@ func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bo
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i, j := range valMapping {
|
||||
if i == j {
|
||||
for targetIndex, existingIndex := range valMapping {
|
||||
if targetIndex == existingIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
col := targetSch.GetNonPKCols().GetByIndex(targetIndex)
|
||||
// If we haven't bailed so far, then these fields were added at the end.
|
||||
// If they are not-null bail.
|
||||
if !targetVD.Types[i].Nullable {
|
||||
// If one of these fields is NOT NULL, without a default value, then fail.
|
||||
if !col.IsNullable() && col.Default == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -595,16 +595,19 @@ func mapColumns(ourCC, theirCC, ancCC *schema.ColCollection) (columnMappings, er
|
||||
})
|
||||
|
||||
// Handle any remaining columns on the "their" side
|
||||
for _, theirCol := range theirTagsToCols {
|
||||
ancCol, foundAncByTag := ancCC.GetByTag(theirCol.Tag)
|
||||
_ = theirCC.Iter(func(tag uint64, theirCol schema.Column) (stop bool, err error) {
|
||||
if _, ok := theirTagsToCols[tag]; !ok {
|
||||
return // already added
|
||||
}
|
||||
|
||||
ancCol, foundAncByTag := ancCC.GetByTag(tag)
|
||||
if !foundAncByTag {
|
||||
// Ditto for finding the ancestor column
|
||||
ancCol, _ = ancCC.GetByNameCaseInsensitive(theirCol.Name)
|
||||
}
|
||||
|
||||
columnMappings = append(columnMappings, newColumnMapping(ancCol, schema.InvalidCol, theirCol))
|
||||
}
|
||||
|
||||
return
|
||||
})
|
||||
return columnMappings, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -743,7 +743,7 @@ func buildLeftRightAncCommitsAndBranches(t *testing.T, ddb *doltdb.DoltDB, rootT
|
||||
commit, err := ddb.Commit(context.Background(), hash, ref.NewBranchRef(env.DefaultInitBranch), meta)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ddb.NewBranchAtCommit(context.Background(), ref.NewBranchRef("to-merge"), initialCommit)
|
||||
err = ddb.NewBranchAtCommit(context.Background(), ref.NewBranchRef("to-merge"), initialCommit, nil)
|
||||
require.NoError(t, err)
|
||||
mergeCommit, err := ddb.Commit(context.Background(), mergeHash, ref.NewBranchRef("to-merge"), meta)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -66,6 +66,9 @@ func TestSchemaMerge(t *testing.T) {
|
||||
t.Run("column default tests", func(t *testing.T) {
|
||||
testSchemaMerge(t, columnDefaultTests)
|
||||
})
|
||||
t.Run("nullability tests", func(t *testing.T) {
|
||||
testSchemaMerge(t, nullabilityTests)
|
||||
})
|
||||
t.Run("column type change tests", func(t *testing.T) {
|
||||
testSchemaMerge(t, typeChangeTests)
|
||||
})
|
||||
@@ -286,6 +289,36 @@ var columnDefaultTests = []schemaMergeTest{
|
||||
},
|
||||
}
|
||||
|
||||
var nullabilityTests = []schemaMergeTest{
|
||||
{
|
||||
name: "add not null column to empty table",
|
||||
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")),
|
||||
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")),
|
||||
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")),
|
||||
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")),
|
||||
skipOldFmt: true,
|
||||
skipFlipOnOldFormat: true,
|
||||
},
|
||||
{
|
||||
name: "add not null constraint to existing column",
|
||||
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1)),
|
||||
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1)),
|
||||
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1), row(2, 2)),
|
||||
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1), row(2, 2)),
|
||||
skipOldFmt: true,
|
||||
skipFlipOnOldFormat: true,
|
||||
},
|
||||
{
|
||||
name: "add not null column to non-empty table",
|
||||
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1)),
|
||||
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19)),
|
||||
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1), row(2)),
|
||||
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19), row(2, 19)),
|
||||
skipOldFmt: true,
|
||||
skipFlipOnOldFormat: true,
|
||||
},
|
||||
}
|
||||
|
||||
var columnReorderingTests = []schemaMergeTest{}
|
||||
|
||||
var typeChangeTests = []schemaMergeTest{
|
||||
@@ -513,7 +546,14 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
|
||||
for name, addr := range exp {
|
||||
a, ok := act[name]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, addr, a)
|
||||
if !assert.Equal(t, addr, a) {
|
||||
expTbl, _, err := m.GetTable(ctx, name)
|
||||
require.NoError(t, err)
|
||||
t.Logf("expected rows: %s", expTbl.DebugString(ctx))
|
||||
actTbl, _, err := result.Root.GetTable(ctx, name)
|
||||
require.NoError(t, err)
|
||||
t.Logf("actual rows: %s", actTbl.DebugString(ctx))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -57,6 +57,7 @@ const (
|
||||
CvType_ForeignKey CvType = iota + 1
|
||||
CvType_UniqueIndex
|
||||
CvType_CheckConstraint
|
||||
CvType_NotNull
|
||||
)
|
||||
|
||||
type FKViolationReceiver interface {
|
||||
|
||||
@@ -16,6 +16,7 @@ package merge
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -149,3 +150,33 @@ func ordinalMappingFromIndex(def schema.Index) (m val.OrdinalMapping) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type NullViolationMeta struct {
|
||||
Columns []string `json:"Columns"`
|
||||
}
|
||||
|
||||
func newNotNullViolationMeta(violations []string, value val.Tuple) (prolly.ConstraintViolationMeta, error) {
|
||||
info, err := json.Marshal(NullViolationMeta{Columns: violations})
|
||||
if err != nil {
|
||||
return prolly.ConstraintViolationMeta{}, err
|
||||
}
|
||||
return prolly.ConstraintViolationMeta{
|
||||
VInfo: info,
|
||||
Value: value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m NullViolationMeta) Unmarshall(ctx *sql.Context) (val types.JSONDocument, err error) {
|
||||
return types.JSONDocument{Val: m}, nil
|
||||
}
|
||||
|
||||
func (m NullViolationMeta) Compare(ctx *sql.Context, v types.JSONValue) (cmp int, err error) {
|
||||
ours := types.JSONDocument{Val: m}
|
||||
return ours.Compare(ctx, v)
|
||||
}
|
||||
|
||||
func (m NullViolationMeta) ToString(ctx *sql.Context) (string, error) {
|
||||
return fmt.Sprintf("{Columns: [%s]}", strings.Join(m.Columns, ",")), nil
|
||||
}
|
||||
|
||||
var _ types.JSONValue = FkCVMeta{}
|
||||
|
||||
@@ -182,7 +182,7 @@ func persistMigratedCommitMapping(ctx context.Context, ddb *doltdb.DoltDB, mappi
|
||||
}
|
||||
|
||||
br := ref.NewBranchRef(MigratedCommitsBranch)
|
||||
err = ddb.NewBranchAtCommit(ctx, br, init)
|
||||
err = ddb.NewBranchAtCommit(ctx, br, init, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -298,6 +298,6 @@ func commitRoot(
|
||||
Name: meta.Name,
|
||||
Email: meta.Email,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func migrateWorkingSet(ctx context.Context, menv Environment, brRef ref.BranchRe
|
||||
|
||||
newWs := doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(wr).WithStagedRoot(sr)
|
||||
|
||||
return new.UpdateWorkingSet(ctx, wsRef, newWs, hash.Hash{}, oldWs.Meta())
|
||||
return new.UpdateWorkingSet(ctx, wsRef, newWs, hash.Hash{}, oldWs.Meta(), nil)
|
||||
}
|
||||
|
||||
func migrateCommit(ctx context.Context, menv Environment, oldCm *doltdb.Commit, new *doltdb.DoltDB, prog *progress) error {
|
||||
|
||||
@@ -113,7 +113,11 @@ func AllBranches(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn,
|
||||
|
||||
// CurrentBranch rewrites the history of the current branch using the |replay| function.
|
||||
func CurrentBranch(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn, nerf NeedsRebaseFn) error {
|
||||
return rebaseRefs(ctx, dEnv.DbData(), replay, nerf, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return rebaseRefs(ctx, dEnv.DbData(), replay, nerf, headRef)
|
||||
}
|
||||
|
||||
// AllBranchesByRoots rewrites the history of all branches in the repo using the |replay| function.
|
||||
@@ -130,7 +134,11 @@ func AllBranchesByRoots(ctx context.Context, dEnv *env.DoltEnv, replay ReplayRoo
|
||||
// CurrentBranchByRoot rewrites the history of the current branch using the |replay| function.
|
||||
func CurrentBranchByRoot(ctx context.Context, dEnv *env.DoltEnv, replay ReplayRootFn, nerf NeedsRebaseFn) error {
|
||||
replayCommit := wrapReplayRootFn(replay)
|
||||
return rebaseRefs(ctx, dEnv.DbData(), replayCommit, nerf, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return rebaseRefs(ctx, dEnv.DbData(), replayCommit, nerf, headRef)
|
||||
}
|
||||
|
||||
func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, nerf NeedsRebaseFn, refs ...ref.DoltRef) error {
|
||||
@@ -152,7 +160,7 @@ func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, n
|
||||
for i, r := range refs {
|
||||
switch dRef := r.(type) {
|
||||
case ref.BranchRef:
|
||||
err = ddb.NewBranchAtCommit(ctx, dRef, newHeads[i])
|
||||
err = ddb.NewBranchAtCommit(ctx, dRef, newHeads[i], nil)
|
||||
|
||||
case ref.TagRef:
|
||||
// rewrite tag with new commit
|
||||
|
||||
@@ -706,7 +706,7 @@ func closeWriteSession(ctx *sql.Context, engine *gms.Engine, databaseName string
|
||||
return err
|
||||
}
|
||||
|
||||
return sqlDatabase.DbData().Ddb.UpdateWorkingSet(ctx, newWorkingSet.Ref(), newWorkingSet, hash, newWorkingSet.Meta())
|
||||
return sqlDatabase.DbData().Ddb.UpdateWorkingSet(ctx, newWorkingSet.Ref(), newWorkingSet, hash, newWorkingSet.Meta(), nil)
|
||||
}
|
||||
|
||||
// getTableSchema returns a sql.Schema for the specified table in the specified database.
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
)
|
||||
|
||||
var _ doltdb.CommitHook = (*commithook)(nil)
|
||||
var _ doltdb.NotifyWaitFailedCommitHook = (*commithook)(nil)
|
||||
|
||||
type commithook struct {
|
||||
rootLgr *logrus.Entry
|
||||
@@ -53,6 +54,20 @@ type commithook struct {
|
||||
// commithooks are caught up with replicating to the standby.
|
||||
waitNotify func()
|
||||
|
||||
// This is a slice of notification channels maintained by the
|
||||
// commithook. The semantics are:
|
||||
// 1. All accesses to |successChs| must happen with |mu| held.
|
||||
// 2. There may be |0| or more channels in the slice.
|
||||
// 3. As a reader, if |successChs| is non-empty, you should just read a value, for example, |successChs[0]| and use it. All entries will be closed at the same time. If |successChs| is empty when you need a channel, you should add one to it.
|
||||
// 4. If you read a channel out of |successChs|, that channel will be closed on the next successful replication attempt. It will not be closed before then.
|
||||
successChs []chan struct{}
|
||||
|
||||
// If this is true, the waitF returned by Execute() will fast fail if
|
||||
// we are not already caught up, instead of blocking on a successCh
|
||||
// actually indicated we are caught up. This is set to by a call to
|
||||
// NotifyWaitFailed(), an optional interface on CommitHook.
|
||||
fastFailReplicationWait bool
|
||||
|
||||
role Role
|
||||
|
||||
// The standby replica to which the new root gets replicated.
|
||||
@@ -108,6 +123,7 @@ func (h *commithook) replicate(ctx context.Context) {
|
||||
defer h.logger().Tracef("cluster/commithook: background thread: replicate: shutdown.")
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
shouldHeartbeat := false
|
||||
for {
|
||||
lgr := h.logger()
|
||||
// Shutdown for context canceled.
|
||||
@@ -138,11 +154,25 @@ func (h *commithook) replicate(ctx context.Context) {
|
||||
h.nextHeadIncomingTime = time.Now()
|
||||
} else if h.shouldReplicate() {
|
||||
h.attemptReplicate(ctx)
|
||||
shouldHeartbeat = false
|
||||
} else {
|
||||
lgr.Tracef("cluster/commithook: background thread: waiting for signal.")
|
||||
if h.waitNotify != nil {
|
||||
h.waitNotify()
|
||||
}
|
||||
caughtUp := h.isCaughtUp()
|
||||
if len(h.successChs) != 0 && caughtUp {
|
||||
for _, ch := range h.successChs {
|
||||
close(ch)
|
||||
}
|
||||
h.successChs = nil
|
||||
h.fastFailReplicationWait = false
|
||||
}
|
||||
if shouldHeartbeat {
|
||||
h.attemptHeartbeat(ctx)
|
||||
} else if caughtUp {
|
||||
shouldHeartbeat = true
|
||||
}
|
||||
h.cond.Wait()
|
||||
lgr.Tracef("cluster/commithook: background thread: woken up.")
|
||||
}
|
||||
@@ -175,6 +205,37 @@ func (h *commithook) primaryNeedsInit() bool {
|
||||
return h.role == RolePrimary && h.nextHead == (hash.Hash{})
|
||||
}
|
||||
|
||||
// Called by the replicate thread to periodically heartbeat liveness to a
|
||||
// standby if we are a primary. These heartbeats are best effort and currently
|
||||
// do not affect the data plane much.
|
||||
//
|
||||
// preconditions: h.mu is locked and shouldReplicate() returned false.
|
||||
func (h *commithook) attemptHeartbeat(ctx context.Context) {
|
||||
if h.role != RolePrimary {
|
||||
return
|
||||
}
|
||||
head := h.lastPushedHead
|
||||
if head.IsEmpty() {
|
||||
return
|
||||
}
|
||||
destDB := h.destDB
|
||||
if destDB == nil {
|
||||
return
|
||||
}
|
||||
ctx, h.cancelReplicate = context.WithTimeout(ctx, 5*time.Second)
|
||||
defer func() {
|
||||
if h.cancelReplicate != nil {
|
||||
h.cancelReplicate()
|
||||
}
|
||||
h.cancelReplicate = nil
|
||||
}()
|
||||
h.mu.Unlock()
|
||||
datasDB := doltdb.HackDatasDatabaseFromDoltDB(destDB)
|
||||
cs := datas.ChunkStoreFromDatabase(datasDB)
|
||||
cs.Commit(ctx, head, head)
|
||||
h.mu.Lock()
|
||||
}
|
||||
|
||||
// Called by the replicate thread to push the nextHead to the destDB and set
|
||||
// its root to the new value.
|
||||
//
|
||||
@@ -192,6 +253,13 @@ func (h *commithook) attemptReplicate(ctx context.Context) {
|
||||
}
|
||||
h.cancelReplicate = nil
|
||||
}()
|
||||
successChs := h.successChs
|
||||
h.successChs = nil
|
||||
defer func() {
|
||||
if len(successChs) != 0 {
|
||||
h.successChs = append(h.successChs, successChs...)
|
||||
}
|
||||
}()
|
||||
h.mu.Unlock()
|
||||
|
||||
if destDB == nil {
|
||||
@@ -242,6 +310,12 @@ func (h *commithook) attemptReplicate(ctx context.Context) {
|
||||
h.lastPushedHead = toPush
|
||||
h.lastSuccess = incomingTime
|
||||
h.nextPushAttempt = time.Time{}
|
||||
if len(successChs) != 0 {
|
||||
for _, ch := range successChs {
|
||||
close(ch)
|
||||
}
|
||||
successChs = nil
|
||||
}
|
||||
} else {
|
||||
h.currentError = new(string)
|
||||
*h.currentError = fmt.Sprintf("failed to commit chunks on destDB: %v", err)
|
||||
@@ -354,21 +428,21 @@ var errDetectedBrokenConfigStr = "error: more than one server was configured as
|
||||
|
||||
// Execute on this commithook updates the target root hash we're attempting to
|
||||
// replicate and wakes the replication thread.
|
||||
func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error {
|
||||
func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) {
|
||||
lgr := h.logger()
|
||||
lgr.Tracef("cluster/commithook: Execute called post commit")
|
||||
cs := datas.ChunkStoreFromDatabase(db)
|
||||
root, err := cs.Root(ctx)
|
||||
if err != nil {
|
||||
lgr.Errorf("cluster/commithook: Execute: error retrieving local database root: %v", err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
lgr = h.logger()
|
||||
if h.role != RolePrimary {
|
||||
lgr.Warnf("cluster/commithook received commit callback for a commit on %s, but we are not role primary; not replicating the commit, which is likely to be lost.", ds.ID())
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
if root != h.nextHead {
|
||||
lgr.Tracef("signaling replication thread to push new head: %v", root.String())
|
||||
@@ -377,7 +451,34 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat
|
||||
h.nextPushAttempt = time.Time{}
|
||||
h.cond.Signal()
|
||||
}
|
||||
return nil
|
||||
var waitF func(context.Context) error
|
||||
if !h.isCaughtUp() {
|
||||
if h.fastFailReplicationWait {
|
||||
waitF = func(ctx context.Context) error {
|
||||
return fmt.Errorf("circuit breaker for replication to %s/%s is open. this commit did not necessarily replicate successfully.", h.remotename, h.dbname)
|
||||
}
|
||||
} else {
|
||||
if len(h.successChs) == 0 {
|
||||
h.successChs = append(h.successChs, make(chan struct{}))
|
||||
}
|
||||
successCh := h.successChs[0]
|
||||
waitF = func(ctx context.Context) error {
|
||||
select {
|
||||
case <-successCh:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return waitF, nil
|
||||
}
|
||||
|
||||
func (h *commithook) NotifyWaitFailed() {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
h.fastFailReplicationWait = true
|
||||
}
|
||||
|
||||
func (h *commithook) HandleError(ctx context.Context, err error) error {
|
||||
|
||||
@@ -82,6 +82,7 @@ type Controller struct {
|
||||
|
||||
type sqlvars interface {
|
||||
AddSystemVariables(sysVars []sql.SystemVariable)
|
||||
GetGlobal(name string) (sql.SystemVariable, interface{}, bool)
|
||||
}
|
||||
|
||||
// We can manage certain aspects of the exposed databases on the server through
|
||||
|
||||
@@ -417,14 +417,17 @@ func (db Database) getTableInsensitive(ctx *sql.Context, head *doltdb.Commit, ds
|
||||
|
||||
// resolveAsOf resolves given expression to a commit, if one exists.
|
||||
func resolveAsOf(ctx *sql.Context, db Database, asOf interface{}) (*doltdb.Commit, *doltdb.RootValue, error) {
|
||||
head := db.rsr.CWBHeadRef()
|
||||
head, err := db.rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
switch x := asOf.(type) {
|
||||
case time.Time:
|
||||
return resolveAsOfTime(ctx, db.ddb, head, x)
|
||||
case string:
|
||||
return resolveAsOfCommitRef(ctx, db, head, x)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported AS OF type %T", asOf))
|
||||
return nil, nil, fmt.Errorf("unsupported AS OF type %T", asOf)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -645,6 +648,12 @@ func (db Database) GetRoot(ctx *sql.Context) (*doltdb.RootValue, error) {
|
||||
return dbState.GetRoots().Working, nil
|
||||
}
|
||||
|
||||
// GetWorkingSet gets the current working set for the database.
|
||||
// If there is no working set (most likely because the DB is in Detached Head mode, return an error.
|
||||
// If a command needs to work while in Detached Head, that command should call sess.LookupDbState directly.
|
||||
// TODO: This is a temporary measure to make sure that new commands that call GetWorkingSet don't unexpectedly receive
|
||||
// a null pointer. In the future, we should replace all uses of dbState.WorkingSet, including this, with a new interface
|
||||
// where users avoid handling the WorkingSet directly.
|
||||
func (db Database) GetWorkingSet(ctx *sql.Context) (*doltdb.WorkingSet, error) {
|
||||
sess := dsess.DSessFromSess(ctx.Session)
|
||||
dbState, ok, err := sess.LookupDbState(ctx, db.Name())
|
||||
@@ -654,6 +663,9 @@ func (db Database) GetWorkingSet(ctx *sql.Context) (*doltdb.WorkingSet, error) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no root value found in session")
|
||||
}
|
||||
if dbState.WorkingSet == nil {
|
||||
return nil, doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
return dbState.WorkingSet, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -864,7 +864,11 @@ func initialDbState(ctx context.Context, db dsess.SqlDatabase, branch string) (d
|
||||
if len(branch) > 0 {
|
||||
r = ref.NewBranchRef(branch)
|
||||
} else {
|
||||
r = rsr.CWBHeadRef()
|
||||
var err error
|
||||
r, err = rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return dsess.InitialDbState{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var retainedErr error
|
||||
@@ -1418,7 +1422,11 @@ func initialStateForCommit(ctx context.Context, srcDb ReadOnlyDatabase) (dsess.I
|
||||
return dsess.InitialDbState{}, err
|
||||
}
|
||||
|
||||
cm, err := srcDb.DbData().Ddb.Resolve(ctx, spec, srcDb.DbData().Rsr.CWBHeadRef())
|
||||
headRef, err := srcDb.DbData().Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return dsess.InitialDbState{}, err
|
||||
}
|
||||
cm, err := srcDb.DbData().Ddb.Resolve(ctx, spec, headRef)
|
||||
if err != nil {
|
||||
return dsess.InitialDbState{}, err
|
||||
}
|
||||
@@ -1448,8 +1456,8 @@ type staticRepoState struct {
|
||||
env.RepoStateReader
|
||||
}
|
||||
|
||||
func (s staticRepoState) CWBHeadRef() ref.DoltRef {
|
||||
return s.branch
|
||||
func (s staticRepoState) CWBHeadRef() (ref.DoltRef, error) {
|
||||
return s.branch, nil
|
||||
}
|
||||
|
||||
// formatDbMapKeyName returns formatted string of database name and/or branch name. Database name is case-insensitive,
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/types"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
)
|
||||
@@ -46,6 +47,10 @@ func (ab *ActiveBranchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, er
|
||||
}
|
||||
|
||||
currentBranchRef, err := dSess.CWBHeadRef(ctx, dbName)
|
||||
if err == doltdb.ErrOperationNotSupportedInDetachedHead {
|
||||
// active_branch should return NULL if we're in detached head state
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -94,11 +94,16 @@ func resolveRefSpecs(ctx *sql.Context, leftSpec, rightSpec string) (left, right
|
||||
return nil, nil, sql.ErrDatabaseNotFound.New(dbName)
|
||||
}
|
||||
|
||||
left, err = doltDB.Resolve(ctx, lcs, dbData.Rsr.CWBHeadRef())
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
right, err = doltDB.Resolve(ctx, rcs, dbData.Rsr.CWBHeadRef())
|
||||
|
||||
left, err = doltDB.Resolve(ctx, lcs, headRef)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
right, err = doltDB.Resolve(ctx, rcs, headRef)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -64,30 +64,36 @@ func doDoltBranch(ctx *sql.Context, args []string) (int, error) {
|
||||
return 1, fmt.Errorf("Could not load database %s", dbName)
|
||||
}
|
||||
|
||||
var rsc doltdb.ReplicationStatusController
|
||||
|
||||
switch {
|
||||
case apr.Contains(cli.CopyFlag):
|
||||
err = copyBranch(ctx, dbData, apr)
|
||||
err = copyBranch(ctx, dbData, apr, &rsc)
|
||||
case apr.Contains(cli.MoveFlag):
|
||||
err = renameBranch(ctx, dbData, apr, dSess, dbName)
|
||||
err = renameBranch(ctx, dbData, apr, dSess, dbName, &rsc)
|
||||
case apr.Contains(cli.DeleteFlag), apr.Contains(cli.DeleteForceFlag):
|
||||
err = deleteBranches(ctx, dbData, apr, dSess, dbName)
|
||||
err = deleteBranches(ctx, dbData, apr, dSess, dbName, &rsc)
|
||||
default:
|
||||
err = createNewBranch(ctx, dbData, apr)
|
||||
err = createNewBranch(ctx, dbData, apr, &rsc)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 1, err
|
||||
} else {
|
||||
return 0, commitTransaction(ctx, dSess)
|
||||
return 0, commitTransaction(ctx, dSess, &rsc)
|
||||
}
|
||||
}
|
||||
|
||||
func commitTransaction(ctx *sql.Context, dSess *dsess.DoltSession) error {
|
||||
func commitTransaction(ctx *sql.Context, dSess *dsess.DoltSession, rsc *doltdb.ReplicationStatusController) error {
|
||||
err := dSess.CommitTransaction(ctx, ctx.GetTransaction())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rsc != nil {
|
||||
dsess.WaitForReplicationController(ctx, *rsc)
|
||||
}
|
||||
|
||||
// Because this transaction manipulation is happening outside the engine's awareness, we need to set it to nil here
|
||||
// to get a fresh transaction started on the next statement.
|
||||
// TODO: put this under engine control
|
||||
@@ -97,7 +103,7 @@ func commitTransaction(ctx *sql.Context, dSess *dsess.DoltSession) error {
|
||||
|
||||
// renameBranch takes DoltSession and database name to try accessing file system for dolt database.
|
||||
// If the oldBranch being renamed is the current branch on CLI, then RepoState head will be updated with the newBranch ref.
|
||||
func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string) error {
|
||||
func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string, rsc *doltdb.ReplicationStatusController) error {
|
||||
if apr.NArg() != 2 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
@@ -124,7 +130,7 @@ func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseRe
|
||||
return err
|
||||
}
|
||||
|
||||
err := actions.RenameBranch(ctx, dbData, oldBranchName, newBranchName, sess.Provider(), force)
|
||||
err := actions.RenameBranch(ctx, dbData, oldBranchName, newBranchName, sess.Provider(), force, rsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,7 +156,7 @@ func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseRe
|
||||
// deleteBranches takes DoltSession and database name to try accessing file system for dolt database.
|
||||
// If the database is not session state db and the branch being deleted is the current branch on CLI, it will update
|
||||
// the RepoState to set head as empty branchRef.
|
||||
func deleteBranches(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string) error {
|
||||
func deleteBranches(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string, rsc *doltdb.ReplicationStatusController) error {
|
||||
if apr.NArg() == 0 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
@@ -194,7 +200,7 @@ func deleteBranches(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParse
|
||||
|
||||
err = actions.DeleteBranch(ctx, dbData, branchName, actions.DeleteOptions{
|
||||
Force: force,
|
||||
}, dSess.Provider())
|
||||
}, dSess.Provider(), rsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -274,7 +280,7 @@ func loadConfig(ctx *sql.Context) *env.DoltCliConfig {
|
||||
return dEnv.Config
|
||||
}
|
||||
|
||||
func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error {
|
||||
func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error {
|
||||
if apr.NArg() == 0 || apr.NArg() > 2 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
@@ -332,7 +338,7 @@ func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgPars
|
||||
return err
|
||||
}
|
||||
|
||||
err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, startPt, apr.Contains(cli.ForceFlag))
|
||||
err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, startPt, apr.Contains(cli.ForceFlag), rsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -348,7 +354,7 @@ func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgPars
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error {
|
||||
func copyBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error {
|
||||
if apr.NArg() != 2 {
|
||||
return InvalidArgErr
|
||||
}
|
||||
@@ -364,10 +370,10 @@ func copyBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResu
|
||||
}
|
||||
|
||||
force := apr.Contains(cli.ForceFlag)
|
||||
return copyABranch(ctx, dbData, srcBr, destBr, force)
|
||||
return copyABranch(ctx, dbData, srcBr, destBr, force, rsc)
|
||||
}
|
||||
|
||||
func copyABranch(ctx *sql.Context, dbData env.DbData, srcBr string, destBr string, force bool) error {
|
||||
func copyABranch(ctx *sql.Context, dbData env.DbData, srcBr string, destBr string, force bool, rsc *doltdb.ReplicationStatusController) error {
|
||||
if err := branch_control.CanCreateBranch(ctx, destBr); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -378,7 +384,7 @@ func copyABranch(ctx *sql.Context, dbData env.DbData, srcBr string, destBr strin
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := actions.CopyBranchOnDB(ctx, dbData.Ddb, srcBr, destBr, force)
|
||||
err := actions.CopyBranchOnDB(ctx, dbData.Ddb, srcBr, destBr, force, rsc)
|
||||
if err != nil {
|
||||
if err == doltdb.ErrBranchNotFound {
|
||||
return errors.New(fmt.Sprintf("fatal: A branch named '%s' not found", srcBr))
|
||||
|
||||
@@ -71,9 +71,11 @@ func doDoltCheckout(ctx *sql.Context, args []string) (int, error) {
|
||||
return 1, fmt.Errorf("Could not load database %s", currentDbName)
|
||||
}
|
||||
|
||||
var rsc doltdb.ReplicationStatusController
|
||||
|
||||
// Checking out new branch.
|
||||
if branchOrTrack {
|
||||
err = checkoutNewBranch(ctx, dbName, dbData, apr)
|
||||
err = checkoutNewBranch(ctx, dbName, dbData, apr, &rsc)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
} else {
|
||||
@@ -121,13 +123,15 @@ func doDoltCheckout(ctx *sql.Context, args []string) (int, error) {
|
||||
|
||||
err = checkoutTables(ctx, roots, dbName, args)
|
||||
if err != nil && apr.NArg() == 1 {
|
||||
err = checkoutRemoteBranch(ctx, dbName, dbData, branchName, apr)
|
||||
err = checkoutRemoteBranch(ctx, dbName, dbData, branchName, apr, &rsc)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
dsess.WaitForReplicationController(ctx, rsc)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -168,7 +172,7 @@ func createWorkingSetForLocalBranch(ctx *sql.Context, ddb *doltdb.DoltDB, branch
|
||||
}
|
||||
|
||||
ws := doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot)
|
||||
return ddb.UpdateWorkingSet(ctx, wsRef, ws, hash.Hash{} /* current hash... */, doltdb.TodoWorkingSetMeta())
|
||||
return ddb.UpdateWorkingSet(ctx, wsRef, ws, hash.Hash{} /* current hash... */, doltdb.TodoWorkingSetMeta(), nil)
|
||||
}
|
||||
|
||||
// getRevisionForRevisionDatabase returns the root database name and revision for a database, or just the root database name if the specified db name is not a revision database.
|
||||
@@ -196,7 +200,7 @@ func getRevisionForRevisionDatabase(ctx *sql.Context, dbName string) (string, st
|
||||
|
||||
// checkoutRemoteBranch checks out a remote branch creating a new local branch with the same name as the remote branch
|
||||
// and set its upstream. The upstream persists out of sql session.
|
||||
func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, branchName string, apr *argparser.ArgParseResults) error {
|
||||
func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, branchName string, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error {
|
||||
remoteRefs, err := actions.GetRemoteBranchRef(ctx, dbData.Ddb, branchName)
|
||||
if err != nil {
|
||||
return errors.New("fatal: unable to read from data repository")
|
||||
@@ -206,7 +210,7 @@ func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, br
|
||||
return fmt.Errorf("error: could not find %s", branchName)
|
||||
} else if len(remoteRefs) == 1 {
|
||||
remoteRef := remoteRefs[0]
|
||||
err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, remoteRef.String(), false)
|
||||
err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, remoteRef.String(), false, rsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -220,13 +224,18 @@ func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, br
|
||||
return errhand.BuildDError(fmt.Errorf("%w: '%s'", err, remoteRef.GetRemote()).Error()).Build()
|
||||
}
|
||||
|
||||
return env.SetRemoteUpstreamForRefSpec(dbData.Rsw, refSpec, remoteRef.GetRemote(), dbData.Rsr.CWBHeadRef())
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return env.SetRemoteUpstreamForRefSpec(dbData.Rsw, refSpec, remoteRef.GetRemote(), headRef)
|
||||
} else {
|
||||
return fmt.Errorf("'%s' matched multiple (%v) remote tracking branches", branchName, len(remoteRefs))
|
||||
}
|
||||
}
|
||||
|
||||
func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, apr *argparser.ArgParseResults) error {
|
||||
func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error {
|
||||
var newBranchName string
|
||||
var remoteName, remoteBranchName string
|
||||
var startPt = "head"
|
||||
@@ -259,7 +268,7 @@ func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, apr *
|
||||
newBranchName = newBranch
|
||||
}
|
||||
|
||||
err = actions.CreateBranchWithStartPt(ctx, dbData, newBranchName, startPt, false)
|
||||
err = actions.CreateBranchWithStartPt(ctx, dbData, newBranchName, startPt, false, rsc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -136,7 +136,11 @@ func cherryPick(ctx *sql.Context, dSess *dsess.DoltSession, roots doltdb.Roots,
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
cherryCommit, err := doltDB.Resolve(ctx, cherryCommitSpec, dbData.Rsr.CWBHeadRef())
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
cherryCommit, err := doltDB.Resolve(ctx, cherryCommitSpec, headRef)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
@@ -120,7 +120,12 @@ func doDoltMerge(ctx *sql.Context, args []string) (int, int, error) {
|
||||
if !ok {
|
||||
return noConflictsOrViolations, threeWayMerge, fmt.Errorf("Could not load database %s", dbName)
|
||||
}
|
||||
msg := fmt.Sprintf("Merge branch '%s' into %s", branchName, dbData.Rsr.CWBHeadRef().GetPath())
|
||||
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return noConflictsOrViolations, threeWayMerge, err
|
||||
}
|
||||
msg := fmt.Sprintf("Merge branch '%s' into %s", branchName, headRef.GetPath())
|
||||
if userMsg, mOk := apr.GetValue(cli.MessageArg); mOk {
|
||||
msg = userMsg
|
||||
}
|
||||
@@ -266,7 +271,11 @@ func executeFFMerge(ctx *sql.Context, dbName string, squash bool, ws *doltdb.Wor
|
||||
// TODO: This is all incredibly suspect, needs to be replaced with library code that is functional instead of
|
||||
// altering global state
|
||||
if !squash {
|
||||
err = dbData.Ddb.FastForward(ctx, dbData.Rsr.CWBHeadRef(), cm2)
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dbData.Ddb.FastForward(ctx, headRef, cm2)
|
||||
if err != nil {
|
||||
return ws, err
|
||||
}
|
||||
|
||||
@@ -148,7 +148,11 @@ func doDoltPull(ctx *sql.Context, args []string) (int, int, error) {
|
||||
return noConflictsOrViolations, threeWayMerge, err
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, dbData.Rsr.CWBHeadRef().GetPath())
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return noConflictsOrViolations, threeWayMerge, err
|
||||
}
|
||||
msg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, headRef.GetPath())
|
||||
ws, conflicts, fastForward, err = performMerge(ctx, sess, roots, ws, dbName, mergeSpec, apr.Contains(cli.NoCommitFlag), msg)
|
||||
if err != nil && !errors.Is(doltdb.ErrUpToDate, err) {
|
||||
return conflicts, fastForward, err
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
@@ -65,11 +66,13 @@ func doDoltRemote(ctx *sql.Context, args []string) (int, error) {
|
||||
return 1, fmt.Errorf("error: invalid argument, use 'dolt_remotes' system table to list remotes")
|
||||
}
|
||||
|
||||
var rsc doltdb.ReplicationStatusController
|
||||
|
||||
switch apr.Arg(0) {
|
||||
case "add":
|
||||
err = addRemote(ctx, dbName, dbData, apr, dSess)
|
||||
case "remove", "rm":
|
||||
err = removeRemote(ctx, dbData, apr)
|
||||
err = removeRemote(ctx, dbData, apr, &rsc)
|
||||
default:
|
||||
err = fmt.Errorf("error: invalid argument")
|
||||
}
|
||||
@@ -77,6 +80,9 @@ func doDoltRemote(ctx *sql.Context, args []string) (int, error) {
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
|
||||
dsess.WaitForReplicationController(ctx, rsc)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -106,7 +112,7 @@ func addRemote(_ *sql.Context, dbName string, dbd env.DbData, apr *argparser.Arg
|
||||
return dbd.Rsw.AddRemote(r)
|
||||
}
|
||||
|
||||
func removeRemote(ctx *sql.Context, dbd env.DbData, apr *argparser.ArgParseResults) error {
|
||||
func removeRemote(ctx *sql.Context, dbd env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error {
|
||||
if apr.NArg() != 2 {
|
||||
return fmt.Errorf("error: invalid argument")
|
||||
}
|
||||
@@ -133,7 +139,7 @@ func removeRemote(ctx *sql.Context, dbd env.DbData, apr *argparser.ArgParseResul
|
||||
rr := r.(ref.RemoteRef)
|
||||
|
||||
if rr.GetRemote() == remote.Name {
|
||||
err = ddb.DeleteBranch(ctx, rr)
|
||||
err = ddb.DeleteBranch(ctx, rr, rsc)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w; failed to delete remote tracking ref '%s'; %s", env.ErrFailedToDeleteRemote, rr.String(), err.Error())
|
||||
|
||||
@@ -100,7 +100,11 @@ func doDoltReset(ctx *sql.Context, args []string) (int, error) {
|
||||
|
||||
// TODO: this overrides the transaction setting, needs to happen at commit, not here
|
||||
if newHead != nil {
|
||||
if err := dbData.Ddb.SetHeadToCommit(ctx, dbData.Rsr.CWBHeadRef(), newHead); err != nil {
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return 1, err
|
||||
}
|
||||
if err := dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil {
|
||||
return 1, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,7 +97,10 @@ func doDoltTag(ctx *sql.Context, args []string) (int, error) {
|
||||
if len(apr.Args) > 1 {
|
||||
startPoint = apr.Arg(1)
|
||||
}
|
||||
headRef := dbData.Rsr.CWBHeadRef()
|
||||
headRef, err := dbData.Rsr.CWBHeadRef()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = actions.CreateTagOnDB(ctx, dbData.Ddb, tagName, startPoint, props, headRef)
|
||||
if err != nil {
|
||||
return 1, err
|
||||
|
||||
@@ -573,7 +573,7 @@ func (d *DoltSession) NewPendingCommit(ctx *sql.Context, dbName string, roots do
|
||||
headHash, _ := headCommit.HashOf()
|
||||
|
||||
if sessionState.WorkingSet == nil {
|
||||
return nil, fmt.Errorf("Cannot commit while not attached to a branch. ")
|
||||
return nil, doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
|
||||
var mergeParentCommits []*doltdb.Commit
|
||||
@@ -841,6 +841,10 @@ func (d *DoltSession) SetRoot(ctx *sql.Context, dbName string, newRoot *doltdb.R
|
||||
return err
|
||||
}
|
||||
|
||||
if sessionState.WorkingSet == nil {
|
||||
return doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
|
||||
if rootsEqual(sessionState.GetRoots().Working, newRoot) {
|
||||
return nil
|
||||
}
|
||||
@@ -864,6 +868,10 @@ func (d *DoltSession) SetRoots(ctx *sql.Context, dbName string, roots doltdb.Roo
|
||||
return err
|
||||
}
|
||||
|
||||
if sessionState.WorkingSet == nil {
|
||||
return doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
|
||||
workingSet := sessionState.WorkingSet.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged)
|
||||
return d.SetWorkingSet(ctx, dbName, workingSet)
|
||||
}
|
||||
@@ -1039,6 +1047,9 @@ func (d *DoltSession) WorkingSet(ctx *sql.Context, dbName string) (*doltdb.Worki
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sessionState.WorkingSet == nil {
|
||||
return nil, doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
return sessionState.WorkingSet, nil
|
||||
}
|
||||
|
||||
@@ -1266,7 +1277,7 @@ func (d *DoltSession) CWBHeadRef(ctx *sql.Context, dbName string) (ref.DoltRef,
|
||||
}
|
||||
|
||||
if dbState.WorkingSet == nil {
|
||||
return nil, nil
|
||||
return nil, doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
|
||||
return dbState.WorkingSet.Ref().ToHeadRef()
|
||||
|
||||
@@ -61,29 +61,30 @@ func (s SessionStateAdapter) GetRoots(ctx context.Context) (doltdb.Roots, error)
|
||||
return state.GetRoots(), nil
|
||||
}
|
||||
|
||||
func (s SessionStateAdapter) CWBHeadRef() ref.DoltRef {
|
||||
func (s SessionStateAdapter) CWBHeadRef() (ref.DoltRef, error) {
|
||||
workingSet, err := s.session.WorkingSet(sql.NewContext(context.Background()), s.dbName)
|
||||
if err != nil {
|
||||
// TODO: fix this interface
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
headRef, err := workingSet.Ref().ToHeadRef()
|
||||
// TODO: fix this interface
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
return headRef
|
||||
return headRef, nil
|
||||
}
|
||||
|
||||
func (s SessionStateAdapter) CWBHeadSpec() *doltdb.CommitSpec {
|
||||
func (s SessionStateAdapter) CWBHeadSpec() (*doltdb.CommitSpec, error) {
|
||||
// TODO: get rid of this
|
||||
ref := s.CWBHeadRef()
|
||||
ref, err := s.CWBHeadRef()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec, err := doltdb.NewCommitSpec(ref.GetPath())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return spec
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s SessionStateAdapter) GetRemotes() (map[string]env.Remote, error) {
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/vitess/go/mysql"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
@@ -232,7 +233,9 @@ func doltCommit(ctx *sql.Context,
|
||||
|
||||
workingSet = workingSet.ClearMerge()
|
||||
|
||||
newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, &pending, workingSet, currHash, tx.getWorkingSetMeta(ctx))
|
||||
var rsc doltdb.ReplicationStatusController
|
||||
newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, &pending, workingSet, currHash, tx.getWorkingSetMeta(ctx), &rsc)
|
||||
WaitForReplicationController(ctx, rsc)
|
||||
return workingSet, newCommit, err
|
||||
}
|
||||
|
||||
@@ -243,7 +246,10 @@ func txCommit(ctx *sql.Context,
|
||||
workingSet *doltdb.WorkingSet,
|
||||
hash hash.Hash,
|
||||
) (*doltdb.WorkingSet, *doltdb.Commit, error) {
|
||||
return workingSet, nil, tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx))
|
||||
var rsc doltdb.ReplicationStatusController
|
||||
err := tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx), &rsc)
|
||||
WaitForReplicationController(ctx, rsc)
|
||||
return workingSet, nil, err
|
||||
}
|
||||
|
||||
// DoltCommit commits the working set and creates a new DoltCommit as specified, in one atomic write
|
||||
@@ -251,6 +257,71 @@ func (tx *DoltTransaction) DoltCommit(ctx *sql.Context, workingSet *doltdb.Worki
|
||||
return tx.doCommit(ctx, workingSet, commit, doltCommit)
|
||||
}
|
||||
|
||||
func WaitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatusController) {
|
||||
if len(rsc.Wait) == 0 {
|
||||
return
|
||||
}
|
||||
_, timeout, ok := sql.SystemVariables.GetGlobal(DoltClusterAckWritesTimeoutSecs)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
timeoutI := timeout.(int64)
|
||||
if timeoutI == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
cCtx, cancel := context.WithCancel(ctx)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(rsc.Wait))
|
||||
for i, f := range rsc.Wait {
|
||||
f := f
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := f(cCtx)
|
||||
if err == nil {
|
||||
rsc.Wait[i] = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
waitFailed := false
|
||||
select {
|
||||
case <-time.After(time.Duration(timeoutI) * time.Second):
|
||||
// We timed out before all the waiters were done.
|
||||
// First we make certain to finalize everything.
|
||||
cancel()
|
||||
<-done
|
||||
waitFailed = true
|
||||
case <-done:
|
||||
cancel()
|
||||
}
|
||||
|
||||
// Just because our waiters all completed does not mean they all
|
||||
// returned nil errors. Any non-nil entries in rsc.Wait returned an
|
||||
// error. We turn those into warnings here.
|
||||
numFailed := 0
|
||||
for i, f := range rsc.Wait {
|
||||
if f != nil {
|
||||
numFailed += 1
|
||||
if waitFailed {
|
||||
rsc.NotifyWaitFailed[i]()
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.Session.Warn(&sql.Warning{
|
||||
Level: "Warning",
|
||||
Code: mysql.ERQueryTimeout,
|
||||
Message: fmt.Sprintf("Timed out replication of commit to %d out of %d replicas.", numFailed, len(rsc.Wait)),
|
||||
})
|
||||
}
|
||||
|
||||
// doCommit commits this transaction with the write function provided. It takes the same params as DoltCommit
|
||||
func (tx *DoltTransaction) doCommit(
|
||||
ctx *sql.Context,
|
||||
|
||||
@@ -50,9 +50,11 @@ const (
|
||||
AwsCredsProfile = "aws_credentials_profile"
|
||||
AwsCredsRegion = "aws_credentials_region"
|
||||
ShowBranchDatabases = "dolt_show_branch_databases"
|
||||
DoltLogLevel = "dolt_log_level"
|
||||
|
||||
DoltClusterRoleVariable = "dolt_cluster_role"
|
||||
DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch"
|
||||
DoltClusterRoleVariable = "dolt_cluster_role"
|
||||
DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch"
|
||||
DoltClusterAckWritesTimeoutSecs = "dolt_cluster_ack_writes_timeout_secs"
|
||||
)
|
||||
|
||||
const URLTemplateDatabasePlaceholder = "{database}"
|
||||
|
||||
@@ -117,6 +117,12 @@ func (cvt *prollyConstraintViolationsTable) PartitionRows(ctx *sql.Context, part
|
||||
return nil, err
|
||||
}
|
||||
kd, vd := sch.GetMapDescriptors()
|
||||
|
||||
// value tuples encoded in ConstraintViolationMeta may
|
||||
// violate the not null constraints assumed by fixed access
|
||||
kd = kd.WithoutFixedAccess()
|
||||
vd = vd.WithoutFixedAccess()
|
||||
|
||||
return prollyCVIter{
|
||||
itr: itr,
|
||||
sch: sch,
|
||||
@@ -155,7 +161,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
}
|
||||
|
||||
r := make(sql.Row, itr.sch.GetAllCols().Size()+3)
|
||||
r[0] = art.TheirRootIsh.String()
|
||||
r[0] = art.SourceRootish.String()
|
||||
r[1] = mapCVType(art.ArtType)
|
||||
|
||||
var meta prolly.ConstraintViolationMeta
|
||||
@@ -167,7 +173,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
o := 2
|
||||
if !schema.IsKeyless(itr.sch) {
|
||||
for i := 0; i < itr.kd.Count(); i++ {
|
||||
r[o+i], err = index.GetField(ctx, itr.kd, i, art.Key, itr.ns)
|
||||
r[o+i], err = index.GetField(ctx, itr.kd, i, art.SourceKey, itr.ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,6 +212,13 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
return nil, err
|
||||
}
|
||||
r[o] = m
|
||||
case prolly.ArtifactTypeNullViol:
|
||||
var m merge.NullViolationMeta
|
||||
err = json.Unmarshal(meta.VInfo, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r[o] = m
|
||||
default:
|
||||
panic("json not implemented for artifact type")
|
||||
}
|
||||
@@ -295,6 +308,8 @@ func mapCVType(artifactType prolly.ArtifactType) (outType uint64) {
|
||||
outType = uint64(merge.CvType_UniqueIndex)
|
||||
case prolly.ArtifactTypeChkConsViol:
|
||||
outType = uint64(merge.CvType_CheckConstraint)
|
||||
case prolly.ArtifactTypeNullViol:
|
||||
outType = uint64(merge.CvType_NotNull)
|
||||
default:
|
||||
panic("unhandled cv type")
|
||||
}
|
||||
@@ -309,6 +324,8 @@ func unmapCVType(in merge.CvType) (out prolly.ArtifactType) {
|
||||
out = prolly.ArtifactTypeUniqueKeyViol
|
||||
case merge.CvType_CheckConstraint:
|
||||
out = prolly.ArtifactTypeChkConsViol
|
||||
case merge.CvType_NotNull:
|
||||
out = prolly.ArtifactTypeNullViol
|
||||
default:
|
||||
panic("unhandled cv type")
|
||||
}
|
||||
|
||||
@@ -117,13 +117,12 @@ var _ sql.RowDeleter = (*ignoreWriter)(nil)
|
||||
type ignoreWriter struct {
|
||||
it *IgnoreTable
|
||||
errDuringStatementBegin error
|
||||
workingSet *doltdb.WorkingSet
|
||||
prevHash *hash.Hash
|
||||
tableWriter writer.TableWriter
|
||||
}
|
||||
|
||||
func newIgnoreWriter(it *IgnoreTable) *ignoreWriter {
|
||||
return &ignoreWriter{it, nil, nil, nil, nil}
|
||||
return &ignoreWriter{it, nil, nil, nil}
|
||||
}
|
||||
|
||||
// Insert inserts the row given, returning an error if it cannot. Insert will be called once for each row to process
|
||||
@@ -179,7 +178,6 @@ func (iw *ignoreWriter) StatementBegin(ctx *sql.Context) {
|
||||
|
||||
iw.prevHash = &prevHash
|
||||
|
||||
iw.workingSet = dbState.WorkingSet
|
||||
found, err := roots.Working.HasTable(ctx, doltdb.IgnoreTableName)
|
||||
|
||||
if err != nil {
|
||||
@@ -229,6 +227,11 @@ func (iw *ignoreWriter) StatementBegin(ctx *sql.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if dbState.WorkingSet == nil {
|
||||
iw.errDuringStatementBegin = doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
return
|
||||
}
|
||||
|
||||
// We use WriteSession.SetWorkingSet instead of DoltSession.SetRoot because we want to avoid modifying the root
|
||||
// until the end of the transaction, but we still want the WriteSession to be able to find the newly
|
||||
// created table.
|
||||
|
||||
@@ -49,7 +49,7 @@ var skipPrepared bool
|
||||
// SkipPreparedsCount is used by the "ci-check-repo CI workflow
|
||||
// as a reminder to consider prepareds when adding a new
|
||||
// enginetest suite.
|
||||
const SkipPreparedsCount = 85
|
||||
const SkipPreparedsCount = 86
|
||||
|
||||
const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS"
|
||||
|
||||
@@ -158,6 +158,51 @@ func TestSingleScript(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience test for debugging a single query. Unskip and set to the desired query.
|
||||
func TestSingleMergeScript(t *testing.T) {
|
||||
t.Skip()
|
||||
var scripts = []MergeScriptTest{
|
||||
{
|
||||
Name: "adding a non-null column with a default value to one side",
|
||||
AncSetUpScript: []string{
|
||||
"set dolt_force_transaction_commit = on;",
|
||||
"create table t (pk int primary key, col1 int);",
|
||||
"insert into t values (1, 1);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 int not null default 0",
|
||||
"alter table t add column col3 int;",
|
||||
"insert into t values (2, 2, 2, null);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t values (3, 3);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t;",
|
||||
Expected: []sql.Row{{1, 1, 0, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, violation_type from dolt_constraint_violations_t",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range scripts {
|
||||
t.Run("merge right into left", func(t *testing.T) {
|
||||
enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, false))
|
||||
})
|
||||
t.Run("merge left into right", func(t *testing.T) {
|
||||
enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, true))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleQueryPrepared(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
|
||||
@@ -3393,7 +3393,7 @@ var PatchTableFunctionScriptTests = []queries.ScriptTest{
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "SELECT statement_order, table_name, diff_type, statement FROM dolt_patch('HEAD', 'WORKING')",
|
||||
Expected: []sql.Row{{1, "foo", "schema", "CREATE TABLE `foo` (\n `pk` int NOT NULL,\n `c1` int,\n PRIMARY KEY (`pk`),\n CONSTRAINT `chk_eq3jn5ra` CHECK ((c1 > 3))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
|
||||
Expected: []sql.Row{{1, "foo", "schema", "CREATE TABLE `foo` (\n `pk` int NOT NULL,\n `c1` int,\n PRIMARY KEY (`pk`),\n CONSTRAINT `foo_chk_eq3jn5ra` CHECK ((c1 > 3))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
package enginetest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/enginetest/queries"
|
||||
@@ -1606,6 +1605,56 @@ var Dolt1MergeScripts = []queries.ScriptTest{
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "try to merge a nullable field into a non-null column",
|
||||
SetUpScript: []string{
|
||||
"SET dolt_force_transaction_commit = on;",
|
||||
"create table test (pk int primary key, c0 int)",
|
||||
"insert into test values (1,1),(3,3);",
|
||||
"call dolt_commit('-Am', 'new table with NULL value');",
|
||||
"call dolt_checkout('-b', 'other')",
|
||||
"insert into test values (2,NULL);",
|
||||
"call dolt_commit('-am', 'inserted null value')",
|
||||
"call dolt_checkout('main');",
|
||||
"alter table test modify c0 int not null;",
|
||||
"insert into test values (4,4)",
|
||||
"call dolt_commit('-am', 'modified column c0 to not null');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('other')",
|
||||
Expected: []sql.Row{{0, 1}},
|
||||
},
|
||||
{
|
||||
Query: "select * from dolt_constraint_violations",
|
||||
Expected: []sql.Row{{"test", uint(1)}},
|
||||
},
|
||||
{
|
||||
Query: "select violation_type, pk, violation_info from dolt_constraint_violations_test",
|
||||
Expected: []sql.Row{
|
||||
{uint16(4), 2, types.JSONDocument{Val: merge.NullViolationMeta{Columns: []string{"c0"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "dolt_revert() detects not null violation (issue #4527)",
|
||||
SetUpScript: []string{
|
||||
"create table test2 (pk int primary key, c0 int)",
|
||||
"insert into test2 values (1,1),(2,NULL),(3,3);",
|
||||
"call dolt_commit('-Am', 'new table with NULL value');",
|
||||
"delete from test2 where pk = 2;",
|
||||
"call dolt_commit('-am', 'deleted row with NULL value');",
|
||||
"alter table test2 modify c0 int not null",
|
||||
"call dolt_commit('-am', 'modified column c0 to not null');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_revert('head~1');",
|
||||
ExpectedErrStr: "revert currently does not handle constraint violations",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var KeylessMergeCVsAndConflictsScripts = []queries.ScriptTest{
|
||||
@@ -4584,6 +4633,37 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
|
||||
{
|
||||
Name: "adding a non-null column with a default value to one side",
|
||||
AncSetUpScript: []string{
|
||||
"set dolt_force_transaction_commit = on;",
|
||||
"create table t (pk int primary key, col1 int);",
|
||||
"insert into t values (1, 1);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 int not null default 0",
|
||||
"alter table t add column col3 int;",
|
||||
"insert into t values (2, 2, 2, null);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t values (3, 3);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t;",
|
||||
Expected: []sql.Row{{1, 1, 0, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, violation_type from dolt_constraint_violations_t",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding a non-null column with a default value to one side (with update to existing row)",
|
||||
AncSetUpScript: []string{
|
||||
"set dolt_force_transaction_commit = on;",
|
||||
"create table t (pk int primary key, col1 int);",
|
||||
"insert into t values (1, 1);",
|
||||
},
|
||||
@@ -4598,19 +4678,25 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"),
|
||||
SkipResultsCheck: true,
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{0, 0}}, // non-symmetric result
|
||||
},
|
||||
{
|
||||
Skip: true,
|
||||
Query: "select * from t;",
|
||||
Query: "select * from t;", // fails with row(1,1,0,NULL)
|
||||
Expected: []sql.Row{{1, 1, 1, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, violation_type from dolt_constraint_violations_t",
|
||||
Expected: []sql.Row{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding a not-null constraint and default value to a column",
|
||||
AncSetUpScript: []string{
|
||||
"set dolt_force_transaction_commit = on;",
|
||||
"create table t (pk int primary key, col1 int);",
|
||||
"insert into t values (1, null), (2, null);",
|
||||
},
|
||||
@@ -4624,19 +4710,23 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"),
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{0, 0x1}},
|
||||
},
|
||||
{
|
||||
Skip: true,
|
||||
Query: "select pk, col1 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 9999},
|
||||
{2, 9999},
|
||||
{3, 30},
|
||||
{4, 40},
|
||||
{5, 9999},
|
||||
{6, 9999},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "select pk, violation_type from dolt_constraint_violations_t",
|
||||
Expected: []sql.Row{
|
||||
{5, uint16(4)},
|
||||
{6, uint16(4)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -4644,6 +4734,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
|
||||
{
|
||||
Name: "adding a not-null constraint to one side",
|
||||
AncSetUpScript: []string{
|
||||
"set dolt_force_transaction_commit = on;",
|
||||
"create table t (pk int primary key, col1 int);",
|
||||
"insert into t values (1, null), (2, null);",
|
||||
},
|
||||
@@ -4656,8 +4747,21 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"),
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{0, 0x1}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 0},
|
||||
{2, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "select violation_type, pk, violation_info from dolt_constraint_violations_t",
|
||||
Expected: []sql.Row{
|
||||
{uint16(4), 3, types.JSONDocument{Val: merge.NullViolationMeta{Columns: []string{"col1"}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -169,11 +169,18 @@ func drainIter(ctx *sql.Context, iter sql.RowIter) error {
|
||||
return iter.Close(ctx)
|
||||
}
|
||||
|
||||
func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) dsess.InitialDbState {
|
||||
func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) (dsess.InitialDbState, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
head := dEnv.RepoStateReader().CWBHeadSpec()
|
||||
headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef())
|
||||
headSpec, err := dEnv.RepoStateReader().CWBHeadSpec()
|
||||
if err != nil {
|
||||
return dsess.InitialDbState{}, err
|
||||
}
|
||||
headRef, err := dEnv.RepoStateReader().CWBHeadRef()
|
||||
if err != nil {
|
||||
return dsess.InitialDbState{}, err
|
||||
}
|
||||
headCommit, err := dEnv.DoltDB.Resolve(ctx, headSpec, headRef)
|
||||
require.NoError(t, err)
|
||||
|
||||
ws, err := dEnv.WorkingSet(ctx)
|
||||
@@ -185,5 +192,5 @@ func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) dsess.InitialD
|
||||
WorkingSet: ws,
|
||||
DbData: dEnv.DbData(),
|
||||
Remotes: dEnv.RepoState.Remotes,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -243,7 +243,7 @@ func (rrd ReadReplicaDatabase) CreateLocalBranchFromRemote(ctx *sql.Context, bra
|
||||
}
|
||||
|
||||
// create refs/heads/branch dataset
|
||||
err = rrd.ddb.NewBranchAtCommit(ctx, branchRef, cm)
|
||||
err = rrd.ddb.NewBranchAtCommit(ctx, branchRef, cm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -334,7 +334,7 @@ func pullBranchesAndUpdateWorkingSet(
|
||||
if commitRootHash != wsWorkingRootHash || commitRootHash != wsStagedRootHash {
|
||||
ws = ws.WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot)
|
||||
|
||||
err = rrd.ddb.UpdateWorkingSet(ctx, ws.Ref(), ws, prevHash, doltdb.TodoWorkingSetMeta())
|
||||
err = rrd.ddb.UpdateWorkingSet(ctx, ws.Ref(), ws, prevHash, doltdb.TodoWorkingSetMeta(), nil)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -458,7 +458,7 @@ func (rrd ReadReplicaDatabase) createNewBranchFromRemote(ctx *sql.Context, remot
|
||||
return err
|
||||
}
|
||||
|
||||
err = rrd.ddb.NewBranchAtCommit(ctx, remoteRef.Ref, cm)
|
||||
err = rrd.ddb.NewBranchAtCommit(ctx, remoteRef.Ref, cm, nil)
|
||||
err = rrd.ddb.SetHead(ctx, trackingRef, remoteRef.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -534,7 +534,7 @@ func refsToDelete(remRefs, localRefs []doltdb.RefWithHash) []doltdb.RefWithHash
|
||||
|
||||
func deleteBranches(ctx *sql.Context, rrd ReadReplicaDatabase, branches []doltdb.RefWithHash) error {
|
||||
for _, b := range branches {
|
||||
err := rrd.ddb.DeleteBranch(ctx, b.Ref)
|
||||
err := rrd.ddb.DeleteBranch(ctx, b.Ref, nil)
|
||||
if errors.Is(err, doltdb.ErrBranchNotFound) {
|
||||
continue
|
||||
} else if err != nil {
|
||||
|
||||
@@ -1675,7 +1675,7 @@ func processNode(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, node Hist
|
||||
require.NoError(t, err)
|
||||
|
||||
if !ok {
|
||||
err = dEnv.DoltDB.NewBranchAtCommit(ctx, branchRef, parent)
|
||||
err = dEnv.DoltDB.NewBranchAtCommit(ctx, branchRef, parent, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -166,6 +166,13 @@ func AddDoltSystemVariables() {
|
||||
Type: types.NewSystemBoolType(dsess.ShowBranchDatabases),
|
||||
Default: int8(0),
|
||||
},
|
||||
{
|
||||
Name: dsess.DoltClusterAckWritesTimeoutSecs,
|
||||
Dynamic: true,
|
||||
Scope: sql.SystemVariableScope_Persist,
|
||||
Type: types.NewSystemIntType(dsess.DoltClusterAckWritesTimeoutSecs, 0, 60, false),
|
||||
Default: int64(0),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1258,6 +1258,9 @@ func (t *AlterableDoltTable) RewriteInserter(
|
||||
}
|
||||
|
||||
ws := dbState.WorkingSet
|
||||
if ws == nil {
|
||||
return nil, doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
|
||||
head, err := sess.GetHeadCommit(ctx, t.db.Name())
|
||||
if err != nil {
|
||||
@@ -2541,7 +2544,7 @@ func (t *AlterableDoltTable) generateCheckName(ctx *sql.Context, check *sql.Chec
|
||||
bb.Write([]byte(check.CheckExpression))
|
||||
hash := hash.Of(bb.Bytes())
|
||||
|
||||
hashedName := fmt.Sprintf("chk_%s", hash.String()[:8])
|
||||
hashedName := fmt.Sprintf("%s_chk_%s", t.tableName, hash.String()[:8])
|
||||
name := hashedName
|
||||
|
||||
var i int
|
||||
|
||||
@@ -79,6 +79,9 @@ func NewTempTable(
|
||||
}
|
||||
|
||||
ws := dbState.WorkingSet
|
||||
if ws == nil {
|
||||
return nil, doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
|
||||
sch, err := temporaryDoltSchema(ctx, pkSch, collation)
|
||||
if err != nil {
|
||||
@@ -153,6 +156,9 @@ func setTempTableRoot(t *TempTable) func(ctx *sql.Context, dbName string, newRoo
|
||||
}
|
||||
|
||||
ws := dbState.WorkingSet
|
||||
if ws == nil {
|
||||
return doltdb.ErrOperationNotSupportedInDetachedHead
|
||||
}
|
||||
newWs := ws.WithWorkingRoot(newRoot)
|
||||
|
||||
ait, err := globalstate.NewAutoIncrementTracker(ctx, newWs)
|
||||
|
||||
@@ -26,7 +26,8 @@ table AddressMap {
|
||||
// - value addresses for AddressMap leaf nodes
|
||||
address_array:[ubyte] (required);
|
||||
|
||||
// array of uvarint encoded subtree counts
|
||||
// array of varint encoded subtree counts
|
||||
// see: go/store/prolly/message/varint.go
|
||||
subtree_counts:[ubyte];
|
||||
// total count of prolly tree
|
||||
tree_count:uint64;
|
||||
|
||||
+2
-1
@@ -21,7 +21,8 @@ table Blob {
|
||||
// array of subtree addresses for internal tree nodes
|
||||
address_array:[ubyte];
|
||||
|
||||
// array of uvarint encoded subtree sizes
|
||||
// array of varint encoded subtree counts
|
||||
// see: go/store/prolly/message/varint.go
|
||||
subtree_sizes:[ubyte];
|
||||
tree_size:uint64;
|
||||
tree_level:uint8;
|
||||
|
||||
@@ -22,7 +22,8 @@ table CommitClosure {
|
||||
// array of subtree addresses for internal prolly tree nodes
|
||||
address_array:[ubyte];
|
||||
|
||||
// array of uvarint encoded subtree counts
|
||||
// array of varint encoded subtree counts
|
||||
// see: go/store/prolly/message/varint.go
|
||||
subtree_counts:[ubyte];
|
||||
// total count of prolly tree
|
||||
tree_count:uint64;
|
||||
|
||||
@@ -35,7 +35,8 @@ table MergeArtifacts {
|
||||
// array of subtree addresses for internal tree nodes
|
||||
address_array:[ubyte];
|
||||
|
||||
// array of uvarint encoded subtree counts
|
||||
// array of varint encoded subtree counts
|
||||
// see: go/store/prolly/message/varint.go
|
||||
subtree_counts:[ubyte];
|
||||
// total count of prolly tree
|
||||
tree_count:uint64;
|
||||
|
||||
@@ -39,14 +39,13 @@ table ProllyTreeNode {
|
||||
// (eg value tuples containing out-of-line BLOB addresses)
|
||||
value_address_offsets:[uint16];
|
||||
|
||||
|
||||
// array of chunk addresses
|
||||
// - subtree addresses for internal prolly tree nodes
|
||||
// - value addresses for AddressMap leaf nodes
|
||||
address_array:[ubyte];
|
||||
|
||||
|
||||
// array of uvarint encoded subtree counts
|
||||
// array of varint encoded subtree counts
|
||||
// see: go/store/prolly/message/varint.go
|
||||
subtree_counts:[ubyte];
|
||||
// total count of prolly tree
|
||||
tree_count:uint64;
|
||||
|
||||
@@ -42,9 +42,7 @@ table Column {
|
||||
// sql display order
|
||||
display_order:int16;
|
||||
|
||||
// todo(andy): ideally we'd resolve column identity
|
||||
// without using tags, but the current implementation
|
||||
// of schema.Schema is tightly coupled to tags.
|
||||
// column tag
|
||||
tag: uint64;
|
||||
|
||||
// storage encoding
|
||||
|
||||
@@ -34,12 +34,14 @@ table Table {
|
||||
conflicts:Conflicts;
|
||||
|
||||
// address of a violations types.Map (for __DOLT_DEV__).
|
||||
// todo: deprecate
|
||||
violations:[ubyte];
|
||||
|
||||
// address of artifacts
|
||||
artifacts:[ubyte];
|
||||
}
|
||||
|
||||
// todo: deprecate
|
||||
table Conflicts {
|
||||
// address of a conflicts types.Map (for __DOLT_DEV__).
|
||||
data:[ubyte] (required);
|
||||
|
||||
@@ -41,6 +41,11 @@ const (
|
||||
ArtifactTypeUniqueKeyViol
|
||||
// ArtifactTypeChkConsViol is the type for check constraint violations.
|
||||
ArtifactTypeChkConsViol
|
||||
// ArtifactTypeNullViol is the type for nullability violations.
|
||||
ArtifactTypeNullViol
|
||||
)
|
||||
|
||||
const (
|
||||
artifactMapPendingBufferSize = 650_000
|
||||
)
|
||||
|
||||
@@ -191,11 +196,11 @@ func (m ArtifactMap) IterAll(ctx context.Context) (ArtifactIter, error) {
|
||||
}
|
||||
|
||||
func (m ArtifactMap) IterAllCVs(ctx context.Context) (ArtifactIter, error) {
|
||||
itr, err := m.iterAllOfTypes(ctx, ArtifactTypeForeignKeyViol, ArtifactTypeUniqueKeyViol, ArtifactTypeChkConsViol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return itr, nil
|
||||
return m.iterAllOfTypes(ctx,
|
||||
ArtifactTypeForeignKeyViol,
|
||||
ArtifactTypeUniqueKeyViol,
|
||||
ArtifactTypeChkConsViol,
|
||||
ArtifactTypeNullViol)
|
||||
}
|
||||
|
||||
// IterAllConflicts returns an iterator for the conflicts.
|
||||
@@ -318,11 +323,11 @@ type ArtifactsEditor struct {
|
||||
pool pool.BuffPool
|
||||
}
|
||||
|
||||
func (wr *ArtifactsEditor) Add(ctx context.Context, srcKey val.Tuple, theirRootIsh hash.Hash, artType ArtifactType, meta []byte) error {
|
||||
func (wr *ArtifactsEditor) Add(ctx context.Context, srcKey val.Tuple, srcRootish hash.Hash, artType ArtifactType, meta []byte) error {
|
||||
for i := 0; i < srcKey.Count(); i++ {
|
||||
wr.artKB.PutRaw(i, srcKey.GetField(i))
|
||||
}
|
||||
wr.artKB.PutCommitAddr(srcKey.Count(), theirRootIsh)
|
||||
wr.artKB.PutCommitAddr(srcKey.Count(), srcRootish)
|
||||
wr.artKB.PutUint8(srcKey.Count()+1, uint8(artType))
|
||||
key := wr.artKB.Build(wr.pool)
|
||||
|
||||
@@ -337,7 +342,7 @@ func (wr *ArtifactsEditor) Add(ctx context.Context, srcKey val.Tuple, theirRootI
|
||||
// the given will be inserted. Returns true if a violation was replaced. If an
|
||||
// existing violation exists but has a different |meta.VInfo| value then
|
||||
// ErrMergeArtifactCollision is a returned.
|
||||
func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKey val.Tuple, theirRootIsh hash.Hash, artType ArtifactType, meta ConstraintViolationMeta) error {
|
||||
func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKey val.Tuple, srcRootish hash.Hash, artType ArtifactType, meta ConstraintViolationMeta) error {
|
||||
itr, err := wr.mut.IterRange(ctx, PrefixRange(srcKey, wr.srcKeyDesc))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -355,7 +360,7 @@ func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKe
|
||||
var currMeta ConstraintViolationMeta
|
||||
for art, err = aItr.Next(ctx); err == nil; art, err = aItr.Next(ctx) {
|
||||
// prefix scanning sometimes returns keys not in the range
|
||||
if bytes.Compare(art.Key, srcKey) != 0 {
|
||||
if bytes.Compare(art.SourceKey, srcKey) != 0 {
|
||||
continue
|
||||
}
|
||||
if art.ArtType != artType {
|
||||
@@ -386,7 +391,7 @@ func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKe
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wr.Add(ctx, srcKey, theirRootIsh, artType, d)
|
||||
err = wr.Add(ctx, srcKey, srcRootish, artType, d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -441,8 +446,8 @@ func (itr *ConflictArtifactIter) Next(ctx context.Context) (ConflictArtifact, er
|
||||
}
|
||||
|
||||
return ConflictArtifact{
|
||||
Key: art.Key,
|
||||
TheirRootIsh: art.TheirRootIsh,
|
||||
Key: art.SourceKey,
|
||||
TheirRootIsh: art.SourceRootish,
|
||||
Metadata: parsedMeta,
|
||||
}, nil
|
||||
}
|
||||
@@ -495,7 +500,7 @@ var _ ArtifactIter = multiArtifactTypeItr{}
|
||||
|
||||
// newMultiArtifactTypeItr creates an iter that iterates an artifact if its type exists in |types|.
|
||||
func newMultiArtifactTypeItr(itr ArtifactIter, types []ArtifactType) multiArtifactTypeItr {
|
||||
members := make([]bool, 5)
|
||||
members := make([]bool, 6)
|
||||
for _, t := range types {
|
||||
members[uint8(t)] = true
|
||||
}
|
||||
@@ -540,11 +545,11 @@ func (itr artifactIterImpl) Next(ctx context.Context) (Artifact, error) {
|
||||
metadata, _ := itr.artVD.GetJSON(0, v)
|
||||
|
||||
return Artifact{
|
||||
ArtKey: artKey,
|
||||
Key: srcKey,
|
||||
TheirRootIsh: cmHash,
|
||||
ArtType: ArtifactType(artType),
|
||||
Metadata: metadata,
|
||||
ArtKey: artKey,
|
||||
SourceKey: srcKey,
|
||||
SourceRootish: cmHash,
|
||||
ArtType: ArtifactType(artType),
|
||||
Metadata: metadata,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -559,10 +564,10 @@ func (itr artifactIterImpl) getSrcKeyFromArtKey(k val.Tuple) val.Tuple {
|
||||
type Artifact struct {
|
||||
// ArtKey is the key of the artifact itself
|
||||
ArtKey val.Tuple
|
||||
// Key is the key of the source row that the artifact references
|
||||
Key val.Tuple
|
||||
// SourceKey is the key of the source row that the artifact references
|
||||
SourceKey val.Tuple
|
||||
// TheirRootIsh is the working set hash or commit hash of the right in the merge
|
||||
TheirRootIsh hash.Hash
|
||||
SourceRootish hash.Hash
|
||||
// ArtType is the type of the artifact
|
||||
ArtType ArtifactType
|
||||
// Metadata is the encoded json metadata
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/dolthub/dolt/go/store/val"
|
||||
)
|
||||
|
||||
// todo(andy): randomize test seed
|
||||
var testRand = rand.New(rand.NewSource(1))
|
||||
var sharedPool = pool.NewBuffPool()
|
||||
|
||||
|
||||
@@ -224,7 +224,6 @@ func estimateMergeArtifactSize(keys, values [][]byte, subtrees []uint64, keyAddr
|
||||
panic(fmt.Sprintf("value vector exceeds Size limit ( %d > %d )", valSz, MaxVectorOffset))
|
||||
}
|
||||
|
||||
// todo(andy): better estimates
|
||||
bufSz += keySz + valSz // tuples
|
||||
bufSz += refCntSz // subtree counts
|
||||
bufSz += len(keys)*2 + len(values)*2 // offStart
|
||||
|
||||
@@ -222,7 +222,6 @@ func estimateProllyMapSize(keys, values [][]byte, subtrees []uint64, valAddrsCnt
|
||||
panic(fmt.Sprintf("value vector exceeds Size limit ( %d > %d )", valSz, MaxVectorOffset))
|
||||
}
|
||||
|
||||
// todo(andy): better estimates
|
||||
bufSz += keySz + valSz // tuples
|
||||
bufSz += subtreesSz // subtree counts
|
||||
bufSz += len(keys)*2 + len(values)*2 // offStart
|
||||
|
||||
@@ -100,7 +100,6 @@ func writeAddressOffsets(b *fb.Builder, items [][]byte, sumSz int, td val.TupleD
|
||||
}
|
||||
|
||||
func writeCountArray(b *fb.Builder, counts []uint64) fb.UOffsetT {
|
||||
// todo(andy): write without alloc
|
||||
buf := make([]byte, maxEncodedSize(len(counts)))
|
||||
return b.CreateByteVector(encodeVarints(counts, buf))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user