more conflicts woo

This commit is contained in:
James Cor
2022-07-25 22:56:03 -07:00
22 changed files with 141 additions and 231 deletions

28
.github/README.md vendored Normal file
View File

@@ -0,0 +1,28 @@
# Dolt's GitHub Actions
This doc will provide context for the types of Workflows we use in this repository. This doc is not a comprehensive GitHub Actions tutorial. To familiarize yourself with GitHub Actions concepts and the terminology, please see the [documentation](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions).
Dolt uses GitHub Actions Workflows in four primary ways:
* To run continuous integration tests on pull requests and pushes to `main`
* To release and publish new Dolt assets
* To deploy various benchmarking jobs to contexts _other_ than GitHub Actions (like in a Kubernetes cluster, for example).
* To handle misc. [repository_dispatch](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#repository_dispatch) events triggered by external clients.
## Continuous Integration Workflows
Workflows prefixed with `ci-` are run on pull requests to `main`, though some run on pushes to `main` (after a pull request is merged). These workflows are synchronous and don't trigger any other workflows to run.
## Dolt Release Workflows
Workflows prefixed with `cd-` are used for releasing Dolt. Some of these workflows are asynchronous, meaning that they only perform part of a task before triggering the next part of a task to run in a _different_ workflow, sometimes in other GitHub repositories, using `repository_dispatch` events.
## Benchmarking Workflows
Benchmarking workflows are used as an interface for deploying benchmarking jobs to one of our Kubernetes Clusters. Workflows that deploy Kubernetes Jobs are prefixed with `k8s-` and can only be triggered with `repository_dispatch` events. Notice that benchmarking workflows, like `workflows/performance-benchmarks-email-report.yaml` for example, trigger these events using the `peter-evans/repository-dispatch@v1` Action.
These Kubernetes Jobs do not run on GitHub Actions Hosted Runners, so the workflow logs do not contain any information about the deployed Kubernetes Job or any errors it might have encountered. The workflow logs can only tell you if a Job was created successfully or not. To investigate an error or issue with a Job in our Kubernetes Cluster, see the debugging guide [here](https://github.com/dolthub/ld/blob/main/k8s/README.md#debug-performance-benchmarks-and-sql-correctness-jobs).
## Misc. Repository Dispatch Workflows
Some workflows perform single, common tasks and are triggered by `repository_dispatch` events. These include the `workflows/email-report.yaml` that emails the results of performance benchmarks to the team, or the `workflows/pull-report.yaml` that posts those same results to an open pull request. Workflows like these are triggered by external clients.

View File

@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-18.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Benchmark Import Performance
name: Trigger Benchmark Import K8s Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
id: comment-branch

View File

@@ -37,7 +37,7 @@ jobs:
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Run Import benchmarks
- name: Create Import Benchmarking K8s Job
run: ./.github/scripts/import-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: ${{ github.event.client_payload.from_server }}

View File

@@ -37,7 +37,7 @@ jobs:
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Run Sysbench benchmarks
- name: Create Sysbench Performance Benchmarking K8s Job
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: ${{ github.event.client_payload.from_server }}
@@ -53,7 +53,7 @@ jobs:
INIT_BIG_REPO: ${{ github.event.client_payload.init_big_repo }}
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}
- name: Run Tpcc benchmarks
- name: Create TPCC Performance Benchmarking K8s Job
run: ./.github/scripts/performance-benchmarking/run-benchmarks.sh
env:
FROM_SERVER: ${{ github.event.client_payload.from_server }}

View File

@@ -37,7 +37,7 @@ jobs:
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Fuzzer gatekeeper
- name: Create Fuzzer (GateKeeper) K8s Job
run: ./.github/scripts/fuzzer/run-fuzzer.sh
env:
VERSION: ${{ github.sha }}

View File

@@ -1,43 +1,49 @@
name: Benchmark SQL Correctness
name: SQL Correctness
on:
repository_dispatch:
types: [ release-dolt ]
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
default: ''
email:
description: 'Email address to receive results'
required: true
default: ''
types: [ sql-correctness ]
jobs:
set-version-actor:
name: Set Version and Actor
runs-on: ubuntu-18.04
outputs:
version: ${{ steps.set-vars.outputs.version }}
actor: ${{ steps.set-vars.outputs.actor }}
steps:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
correctness:
runs-on: ubuntu-18.04
needs: set-version-actor
name: Benchmark SQL Correctness
name: Dolt SQL Correctness
strategy:
matrix:
dolt_fmt: [ "__LD_1__", "__DOLT_1__" ]
steps:
- uses: peter-evans/repository-dispatch@v1
- uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
client-payload: '{"to_version": "${{ needs.set-version-actor.outputs.version }}", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"}'
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Create SQL Correctness K8s Job
run: ./.github/scripts/sql-correctness/run-correctness.sh
env:
TO_VERSION: ${{ github.event.client_payload.to_version }}
MODE: ${{ github.event.client_payload.mode }}
ACTOR: ${{ github.event.client_payload.actor }}
ACTOR_EMAIL: ${{ github.event.client_payload.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}

View File

@@ -8,7 +8,7 @@ on:
jobs:
perf:
runs-on: ubuntu-18.04
name: Benchmark Latency, Correctness, and Imports
name: Trigger Benchmark Latency, Benchmark Import, and SQL Correctness K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
with:

View File

@@ -37,7 +37,7 @@ jobs:
benchmark-dolt-mysql:
runs-on: ubuntu-18.04
needs: set-version-actor
name: Benchmark Dolt vs MySQL Latency, Benchmark Import
name: Trigger Benchmark Latency and Benchmark Import K8s Workflows
steps:
- uses: peter-evans/repository-dispatch@v1
with:

View File

@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-18.04
needs: [validate-commentor, check-comments]
if: ${{ needs.check-comments.outputs.benchmark == 'true' }}
name: Benchmark Latency
name: Trigger Benchmark Latency K8s Workflow
steps:
- uses: xt0rted/pull-request-comment-branch@v1
id: comment-branch

View File

@@ -1,49 +1,43 @@
name: SQL Correctness
name: Benchmark SQL Correctness
on:
repository_dispatch:
types: [ sql-correctness ]
types: [ release-dolt ]
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
default: ''
email:
description: 'Email address to receive results'
required: true
default: ''
jobs:
set-version-actor:
name: Set Version and Actor
runs-on: ubuntu-18.04
outputs:
version: ${{ steps.set-vars.outputs.version }}
actor: ${{ steps.set-vars.outputs.actor }}
steps:
- name: Set variables
id: set-vars
run: |
echo "::set-output name=version::$VERSION"
echo "::set-output name=actor::$ACTOR"
env:
VERSION: ${{ github.event.inputs.version || github.event.client_payload.version }}
ACTOR: ${{ github.event.client_payload.actor || github.actor }}
correctness:
runs-on: ubuntu-18.04
name: Dolt SQL Correctness
strategy:
matrix:
dolt_fmt: [ "__LD_1__", "__DOLT_1__" ]
needs: set-version-actor
name: Trigger SQL Correctness K8s Workflow
steps:
- uses: actions/checkout@v2
- uses: azure/setup-kubectl@v2.0
- uses: peter-evans/repository-dispatch@v1
with:
version: 'v1.23.6'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
- name: Install aws-iam-authenticator
run: |
curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator && \
chmod +x ./aws-iam-authenticator && \
sudo cp ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
aws-iam-authenticator version
- name: Create and Auth kubeconfig
run: |
echo "$CONFIG" > kubeconfig
KUBECONFIG=kubeconfig kubectl config set-credentials github-actions-dolt --exec-api-version=client.authentication.k8s.io/v1alpha1 --exec-command=aws-iam-authenticator --exec-arg=token --exec-arg=-i --exec-arg=eks-cluster-1
KUBECONFIG=kubeconfig kubectl config set-context github-actions-dolt-context --cluster=eks-cluster-1 --user=github-actions-dolt --namespace=performance-benchmarking
KUBECONFIG=kubeconfig kubectl config use-context github-actions-dolt-context
env:
CONFIG: ${{ secrets.CORP_KUBECONFIG }}
- name: Run correctness
run: ./.github/scripts/sql-correctness/run-correctness.sh
env:
TO_VERSION: ${{ github.event.client_payload.to_version }}
MODE: ${{ github.event.client_payload.mode }}
ACTOR: ${{ github.event.client_payload.actor }}
ACTOR_EMAIL: ${{ github.event.client_payload.actor_email }}
REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }}
KUBECONFIG: "./kubeconfig"
NOMS_BIN_FORMAT: ${{ matrix.dolt_fmt }}
TEMPLATE_SCRIPT: ${{ github.event.client_payload.template_script }}
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: sql-correctness
client-payload: '{"to_version": "${{ needs.set-version-actor.outputs.version }}", "mode": "release", "actor": "${{ needs.set-version-actor.outputs.actor }}", "actor_email": "${{ needs.set-version-actor.outputs.actor_email }}", "template_script": "./.github/scripts/sql-correctness/get-dolt-correctness-job-json.sh"}'

View File

@@ -50,6 +50,7 @@ type SqlEngine struct {
type SqlEngineConfig struct {
InitialDb string
IsReadOnly bool
IsServerLocked bool
DoltCfgDirPath string
PrivFilePath string
ServerUser string
@@ -69,7 +70,7 @@ func NewSqlEngine(
) (*SqlEngine, error) {
if ok, _ := mrEnv.IsLocked(); ok {
config.IsReadOnly = true
config.IsServerLocked = true
}
parallelism := runtime.GOMAXPROCS(0)
@@ -99,10 +100,7 @@ func NewSqlEngine(
}
// Set up engine
engine := gms.New(
analyzer.NewBuilder(pro).WithParallelism(parallelism).Build(),
&gms.Config{IsReadOnly: config.IsReadOnly},
).WithBackgroundThreads(bThreads)
engine := gms.New(analyzer.NewBuilder(pro).WithParallelism(parallelism).Build(), &gms.Config{IsReadOnly: config.IsReadOnly, IsServerLocked: config.IsServerLocked}).WithBackgroundThreads(bThreads)
engine.Analyzer.Catalog.MySQLDb.SetPersister(persister)
engine.Analyzer.Catalog.MySQLDb.SetPlugins(map[string]mysql_db.PlaintextAuthPlugin{

View File

@@ -79,11 +79,6 @@ func Serve(
}
logrus.SetFormatter(LogFormat{})
isReadOnly := false
if serverConfig.ReadOnly() {
isReadOnly = true
}
var mrEnv *env.MultiRepoEnv
var err error
fs := dEnv.FS
@@ -134,7 +129,7 @@ func Serve(
// Create SQL Engine with users
config := &engine.SqlEngineConfig{
InitialDb: "",
IsReadOnly: isReadOnly,
IsReadOnly: serverConfig.ReadOnly(),
PrivFilePath: serverConfig.PrivilegeFilePath(),
DoltCfgDirPath: serverConfig.CfgDir(),
ServerUser: serverConfig.User(),

View File

@@ -56,7 +56,7 @@ import (
)
const (
Version = "0.40.19"
Version = "0.40.20"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}

View File

@@ -127,42 +127,8 @@ func (rcv *RootValue) MutateForeignKeyAddr(j int, n byte) bool {
return false
}
func (rcv *RootValue) SuperSchemasAddr(j int) byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *RootValue) SuperSchemasAddrLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *RootValue) SuperSchemasAddrBytes() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *RootValue) MutateSuperSchemasAddr(j int, n byte) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
}
return false
}
func RootValueStart(builder *flatbuffers.Builder) {
builder.StartObject(4)
builder.StartObject(3)
}
func RootValueAddFeatureVersion(builder *flatbuffers.Builder, featureVersion int64) {
builder.PrependInt64Slot(0, featureVersion, 0)
@@ -179,12 +145,6 @@ func RootValueAddForeignKeyAddr(builder *flatbuffers.Builder, foreignKeyAddr fla
func RootValueStartForeignKeyAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func RootValueAddSuperSchemasAddr(builder *flatbuffers.Builder, superSchemasAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(superSchemasAddr), 0)
}
func RootValueStartSuperSchemasAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func RootValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}

View File

@@ -58,7 +58,7 @@ require (
)
require (
github.com/dolthub/go-mysql-server v0.12.1-0.20220725212253-811079940b3f
github.com/dolthub/go-mysql-server v0.12.1-0.20220726053222-2b2b8c80b1ac
github.com/google/flatbuffers v2.0.6+incompatible
github.com/gosuri/uilive v0.0.4
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6

View File

@@ -173,8 +173,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20220725212253-811079940b3f h1:C6h+OXnxMNE+xKSDyLPEkMhoEz3Zm+o9ntsKd9uo0Wc=
github.com/dolthub/go-mysql-server v0.12.1-0.20220725212253-811079940b3f/go.mod h1:JgB3WpY0RMgyAda3YG5VHVncH2B8i1N9Mx9LOp41lIs=
github.com/dolthub/go-mysql-server v0.12.1-0.20220726053222-2b2b8c80b1ac h1:H3JbDexI4fCE1zm3+fPvo/pvXY7NBvnAhdTuT2V6XTk=
github.com/dolthub/go-mysql-server v0.12.1-0.20220726053222-2b2b8c80b1ac/go.mod h1:JgB3WpY0RMgyAda3YG5VHVncH2B8i1N9Mx9LOp41lIs=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=

View File

@@ -83,10 +83,8 @@ type rvStorage interface {
GetFeatureVersion() (FeatureVersion, bool, error)
GetTablesMap(ctx context.Context, vr types.ValueReadWriter, ns tree.NodeStore) (tableMap, error)
GetSuperSchemaMap(ctx context.Context, vr types.ValueReader) (types.Map, bool, error)
GetForeignKeys(ctx context.Context, vr types.ValueReader) (types.Value, bool, error)
SetSuperSchemaMap(ctx context.Context, vrw types.ValueReadWriter, m types.Map) (rvStorage, error)
SetForeignKeyMap(ctx context.Context, vrw types.ValueReadWriter, m types.Value) (rvStorage, error)
SetFeatureVersion(v FeatureVersion) (rvStorage, error)
@@ -326,12 +324,10 @@ func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter, ns tree.Node
var empty hash.Hash
fkoff := builder.CreateByteVector(empty[:])
ssoff := builder.CreateByteVector(empty[:])
serial.RootValueStart(builder)
serial.RootValueAddFeatureVersion(builder, int64(DoltFeatureVersion))
serial.RootValueAddTables(builder, tablesoff)
serial.RootValueAddForeignKeyAddr(builder, fkoff)
serial.RootValueAddSuperSchemasAddr(builder, ssoff)
bs := serial.FinishMessage(builder, serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
return newRootValue(vrw, ns, types.SerialMessage(bs))
}
@@ -499,23 +495,6 @@ func (root *RootValue) GenerateTagsForNewColumns(
return newTags, nil
}
// GerSuperSchemaMap returns the Noms map that tracks SuperSchemas, used to create new RootValues on checkout branch.
func (root *RootValue) GetSuperSchemaMap(ctx context.Context) (types.Map, error) {
return root.getOrCreateSuperSchemaMap(ctx)
}
func (root *RootValue) getOrCreateSuperSchemaMap(ctx context.Context) (types.Map, error) {
m, found, err := root.st.GetSuperSchemaMap(ctx, root.vrw)
if err != nil {
return types.Map{}, err
}
if found {
return m, nil
}
return types.NewMap(ctx, root.vrw)
}
func (root *RootValue) GetAllSchemas(ctx context.Context) (map[string]schema.Schema, error) {
m := make(map[string]schema.Schema)
err := root.IterTables(ctx, func(name string, table *Table, sch schema.Schema) (stop bool, err error) {
@@ -838,35 +817,10 @@ func (root *RootValue) HashOf() (hash.Hash, error) {
// RenameTable renames a table by changing its string key in the RootValue's table map. In order to preserve
// column tag information, use this method instead of a table drop + add.
func (root *RootValue) RenameTable(ctx context.Context, oldName, newName string) (*RootValue, error) {
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, root.ns, []tableEdit{{old_name: oldName, name: newName}})
if err != nil {
return nil, err
}
ssMap, err := root.getOrCreateSuperSchemaMap(ctx)
if err != nil {
return nil, err
}
ssv, found, err := ssMap.MaybeGet(ctx, types.String(oldName))
if err != nil {
return nil, err
}
if found {
ssme := ssMap.Edit().Remove(types.String(oldName))
ssme = ssme.Set(types.String(newName), ssv)
ssMap, err = ssme.Map(ctx)
if err != nil {
return nil, err
}
newStorage, err = newStorage.SetSuperSchemaMap(ctx, root.vrw, ssMap)
if err != nil {
return nil, err
}
}
return root.withStorage(newStorage), nil
}
@@ -1171,18 +1125,6 @@ func (m fbTableMap) Iter(ctx context.Context, cb func(string, hash.Hash) (bool,
})
}
func (r fbRvStorage) GetSuperSchemaMap(ctx context.Context, vr types.ValueReader) (types.Map, bool, error) {
addr := hash.New(r.srv.SuperSchemasAddrBytes())
if addr.IsEmpty() {
return types.Map{}, false, nil
}
v, err := vr.ReadValue(ctx, addr)
if err != nil {
return types.Map{}, false, err
}
return v.(types.Map), true, nil
}
func (r fbRvStorage) GetForeignKeys(ctx context.Context, vr types.ValueReader) (types.Value, bool, error) {
addr := hash.New(r.srv.ForeignKeyAddrBytes())
if addr.IsEmpty() {
@@ -1195,20 +1137,6 @@ func (r fbRvStorage) GetForeignKeys(ctx context.Context, vr types.ValueReader) (
return v.(types.SerialMessage), true, nil
}
func (r fbRvStorage) SetSuperSchemaMap(ctx context.Context, vrw types.ValueReadWriter, m types.Map) (rvStorage, error) {
var h hash.Hash
if !m.Empty() {
ref, err := vrw.WriteValue(ctx, m)
if err != nil {
return nil, err
}
h = ref.TargetHash()
}
ret := r.clone()
copy(ret.srv.SuperSchemasAddrBytes(), h[:])
return ret, nil
}
func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error) {
builder := flatbuffers.NewBuilder(80)
@@ -1261,12 +1189,10 @@ func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWrite
tablesoff := builder.CreateByteVector(ambytes)
fkoff := builder.CreateByteVector(r.srv.ForeignKeyAddrBytes())
ssoff := builder.CreateByteVector(r.srv.SuperSchemasAddrBytes())
serial.RootValueStart(builder)
serial.RootValueAddFeatureVersion(builder, r.srv.FeatureVersion())
serial.RootValueAddTables(builder, tablesoff)
serial.RootValueAddForeignKeyAddr(builder, fkoff)
serial.RootValueAddSuperSchemasAddr(builder, ssoff)
bs := serial.FinishMessage(builder, serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
return fbRvStorage{serial.GetRootAsRootValue(bs, serial.MessagePrefixSz)}, nil
@@ -1301,11 +1227,10 @@ func (r fbRvStorage) clone() fbRvStorage {
}
func (r fbRvStorage) DebugString(ctx context.Context) string {
return fmt.Sprintf("fbRvStorage[%d, %s, %s, %s]",
return fmt.Sprintf("fbRvStorage[%d, %s, %s]",
r.srv.FeatureVersion(),
"...", // TODO: Print out tables map
hash.New(r.srv.ForeignKeyAddrBytes()).String(),
hash.New(r.srv.SuperSchemasAddrBytes()).String())
hash.New(r.srv.ForeignKeyAddrBytes()).String())
}
func (r fbRvStorage) nomsValue() types.Value {

View File

@@ -65,6 +65,10 @@ type SqlEngineTableWriter struct {
}
func NewSqlEngineTableWriter(ctx context.Context, dEnv *env.DoltEnv, createTableSchema, rowOperationSchema schema.Schema, options *MoverOptions, statsCB noms.StatsCB) (*SqlEngineTableWriter, error) {
if dEnv.IsLocked() {
return nil, env.ErrActiveServerLock.New(dEnv.LockFile())
}
mrEnv, err := env.MultiEnvForDirectory(ctx, dEnv.Config.WriteableConfig(), dEnv.FS, dEnv.Version, dEnv.IgnoreLockFile, dEnv)
if err != nil {
return nil, err

View File

@@ -87,7 +87,7 @@ func LoadedLocalLocation() *time.Location {
func BasicSelectTests() []SelectTest {
headCommitHash := "73hc2robs4v0kt9taoe3m5hd49dmrgun"
if types.Format_Default == types.Format_DOLT_DEV {
headCommitHash = "sauc4hhnfb84498q257lh3asi77fjo0i"
headCommitHash = "7jds1iafb4p0chesrv8nqeqenspeclb8"
}
return []SelectTest{
{

View File

@@ -22,7 +22,6 @@ table RootValue {
tables:[ubyte]; // Serialized AddressMap.
foreign_key_addr:[ubyte];
super_schemas_addr:[ubyte];
}
// KEEP THIS IN SYNC WITH fileidentifiers.go

View File

@@ -109,7 +109,6 @@ func (sm SerialMessage) HumanReadableString() string {
fmt.Fprintf(ret, "{\n")
fmt.Fprintf(ret, "\tFeatureVersion: %d\n", msg.FeatureVersion())
fmt.Fprintf(ret, "\tForeignKeys: #%s\n", hash.New(msg.ForeignKeyAddrBytes()).String())
fmt.Fprintf(ret, "\tSuperSchema: #%s\n", hash.New(msg.SuperSchemasAddrBytes()).String())
fmt.Fprintf(ret, "\tTables: {\n\t%s", SerialMessage(msg.TablesBytes()).HumanReadableString())
fmt.Fprintf(ret, "\t}\n")
fmt.Fprintf(ret, "}")
@@ -235,16 +234,6 @@ func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
return err
}
}
addr = hash.New(msg.SuperSchemasAddrBytes())
if !addr.IsEmpty() {
r, err := constructRef(nbf, addr, PrimitiveTypeMap[ValueKind], SerialMessageRefHeight)
if err != nil {
return err
}
if err = cb(r); err != nil {
return err
}
}
case serial.TableFileID:
msg := serial.GetRootAsTable([]byte(sm), serial.MessagePrefixSz)
addr := hash.New(msg.SchemaBytes())

View File

@@ -167,7 +167,7 @@ SQL
# attempt to create table (autocommit on), expect either some exception
server_query repo1 1 "CREATE TABLE i_should_not_exist (
c0 INT
)" "" "not authorized"
)" "" "database server is set to read only mode"
# Expect that there are still no tables
run dolt ls
@@ -205,7 +205,7 @@ SQL
# make a dolt_commit query
skip "read-only flag does not prevent dolt_commit"
server_query repo1 1 "select dolt_commit('--allow-empty', '-m', 'msg')" "" "not authorized: user does not have permission: write"
server_query repo1 1 "select dolt_commit('--allow-empty', '-m', 'msg')" "" "database server is set to read only mode: user does not have permission: write"
}
@test "sql-server: test command line modification" {
@@ -1425,4 +1425,16 @@ databases:
let PORT="$$ % (65536-1024) + 1024"
run dolt sql-server -P $PORT
[ "$status" -eq 1 ]
}
}
@test "sql-server: sql-server locks database to writes" {
cd repo2
dolt sql -q "create table a (x int primary key)"
start_sql_server
run dolt sql -q "create table b (x int primary key)"
[ "$status" -eq 1 ]
[[ "$output" =~ "database is locked to writes" ]] || false
run dolt sql -q "insert into b values (0)"
[ "$status" -eq 1 ]
[[ "$output" =~ "database is locked to writes" ]] || false
}