merge main

This commit is contained in:
Zach Musgrave
2021-11-04 14:12:38 -07:00
28 changed files with 1058 additions and 130 deletions
+132
View File
@@ -0,0 +1,132 @@
#!/bin/bash
# This script installs starts a dolt server on your Unix compatible computer.
if test -z "$BASH_VERSION"; then
echo "Please run this script using bash, not sh or any other shell. It should be run as root." >&2
exit 1
fi
_() {
install_dolt() {
# Install Dolt if it already doesn't exist
echo "Installing Dolt..."
if ! command -v dolt &> /dev/null
then
sudo bash -c 'curl -L https://github.com/dolthub/dolt/releases/latest/download/install.sh | bash'
fi
}
setup_configs() {
# Set up the dolt user along with core dolt configurations
echo "Setting up Configurations..."
# Check if the user "dolt" already exists. If it exists double check that it is okay to continue
if id -u "dolt" &> /dev/null; then
echo "The user dolt already exists"
read -r -p "Do you want to continue adding privileges to the existing user dolt? " response
response=${response,,} # tolower
if ! ([[ $response =~ ^(yes|y| ) ]] || [[ -z $response ]]); then
exit 1
fi
else
# add the user if `dolt` doesn't exist
useradd -r -m -d /var/lib/doltdb dolt
fi
cd /var/lib/doltdb
read -e -p "Enter an email associated with your user: " -i "dolt-user@dolt.com" email
read -e -p "Enter a username associated with your user: " -i "Dolt Server Account" username
sudo -u dolt dolt config --global --add user.email $email
sudo -u dolt dolt config --global --add user.name $username
}
# Database creation
database_configuration() {
echo "Setting up the dolt database..."
read -e -p "Input the name of your database: " -i "mydb" db_name
local db_dir="databases/$db_name"
cd /var/lib/doltdb
sudo -u dolt mkdir -p $db_dir
cd $db_dir
sudo -u dolt dolt init
}
# Setup and Start daemon
start_server() {
echo "Starting the server"
cd ~
cat > dolt_config.yaml<<EOF
log_level: info
behavior:
read_only: false
autocommit: true
user:
name: root
password: ""
listener:
host: localhost
port: 3306
max_connections: 100
read_timeout_millis: 28800000
write_timeout_millis: 28800000
tls_key: null
tls_cert: null
require_secure_transport: null
databases: []
performance:
query_parallelism: null
EOF
cat > doltdb.service<<EOF
[Unit]
Description=dolt SQL server
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
User=dolt
Group=dolt
ExecStart=/usr/local/bin/dolt sql-server --config=dolt_config.yaml
WorkingDirectory=/var/lib/doltdb/databases/$db_name
KillSignal=SIGTERM
SendSIGKILL=no
EOF
sudo chown root:root doltdb.service
sudo chmod 644 doltdb.service
sudo mv doltdb.service /etc/systemd/system
sudo cp dolt_config.yaml /var/lib/doltdb/databases/$db_name
sudo systemctl daemon-reload
sudo systemctl enable doltdb.service
sudo systemctl start doltdb
}
validate_status() {
if systemctl --state=active | grep "doltdb.service"; then
echo "Sever successfully started..."
else
echo "ERROR: Server did not start properly..."
fi
}
install_dolt
setup_configs
database_configuration
start_server
validate_status
}
_ "$0" "$@"
+354
View File
@@ -0,0 +1,354 @@
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commands
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"github.com/fatih/color"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/mvdata"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/libraries/doltcore/table/pipeline"
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
"github.com/dolthub/dolt/go/store/types"
)
const (
forceParam = "force"
)
var dumpDocs = cli.CommandDocumentationContent{
ShortDesc: `Export all tables.`,
LongDesc: `{{.EmphasisLeft}}dolt dump{{.EmphasisRight}} will export {{.LessThan}}table{{.GreaterThan}} to
{{.LessThan}}|file{{.GreaterThan}}. If a dump file already exists then the operation will fail,unless the
{{.EmphasisLeft}}--force | -f{{.EmphasisRight}} flag is provided. The force flag forces the existing dump file to be
overwritten.
`,
Synopsis: []string{
"[options] [{{.LessThan}}commit{{.GreaterThan}}]",
"[-f] [-r {{.LessThan}}result-format{{.GreaterThan}}] ",
},
}
type DumpCmd struct{}
// Name is returns the name of the Dolt cli command. This is what is used on the command line to invoke the command
func (cmd DumpCmd) Name() string {
return "dump"
}
// Description returns a description of the command
func (cmd DumpCmd) Description() string {
return "Export all tables in the working set into a file."
}
// CreateMarkdown creates a markdown file containing the help text for the command at the given path
func (cmd DumpCmd) CreateMarkdown(wr io.Writer, commandStr string) error {
ap := cmd.createArgParser()
return CreateMarkdown(wr, cli.GetCommandDocumentation(commandStr, lsDocs, ap))
}
func (cmd DumpCmd) createArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(forceParam, "f", "If data already exists in the destination, the force flag will allow the target to be overwritten.")
ap.SupportsString(FormatFlag, "r", "result_file_type", "Define the type of the output file. Valid values are sql and csv. Defaults to sql.")
return ap
}
// EventType returns the type of the event to log
func (cmd DumpCmd) EventType() eventsapi.ClientEventType {
return eventsapi.ClientEventType_DUMP
}
// Exec executes the command
func (cmd DumpCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
ap := cmd.createArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.GetCommandDocumentation(commandStr, dumpDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() > 0 {
return HandleVErrAndExitCode(errhand.BuildDError("too many arguments").SetPrintUsage().Build(), usage)
}
// TODO: make the file name and directory name configurable as command line options
var fileName string
resultFormat, _ := apr.GetValue(FormatFlag)
root, verr := GetWorkingWithVErr(dEnv)
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
}
force := apr.Contains(forceParam)
tblNames, err := doltdb.GetNonSystemTableNames(ctx, root)
if err != nil {
errhand.BuildDError("error: failed to get tables").AddCause(err).Build()
}
if len(tblNames) == 0 {
cli.Println("No tables to export.")
return 0
}
switch resultFormat {
case "", "sql", ".sql":
fileName = "doltdump.sql"
dumpOpts := getDumpOptions(fileName, resultFormat)
fPath, err := checkAndCreateOpenDestFile(ctx, root, dEnv, force, dumpOpts, fileName)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
for _, tbl := range tblNames {
tblOpts := newTableArgs(tbl, dumpOpts.dest)
err = dumpTable(ctx, root, dEnv, tblOpts, fPath)
if err != nil {
return HandleVErrAndExitCode(err, usage)
}
}
case "csv", ".csv":
for _, tbl := range tblNames {
fileName = "doltdump/" + tbl + ".csv"
dumpOpts := getDumpOptions(fileName, resultFormat)
fPath, err := checkAndCreateOpenDestFile(ctx, root, dEnv, force, dumpOpts, fileName)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
tblOpts := newTableArgs(tbl, dumpOpts.dest)
err = dumpTable(ctx, root, dEnv, tblOpts, fPath)
if err != nil {
return HandleVErrAndExitCode(err, usage)
}
}
default:
return HandleVErrAndExitCode(errhand.BuildDError("invalid result format").SetPrintUsage().Build(), usage)
}
cli.PrintErrln(color.CyanString("Successfully exported data."))
return 0
}
type dumpOptions struct {
format string
dest mvdata.DataLocation
}
type tableOptions struct {
tableName string
src mvdata.TableDataLocation
dest mvdata.DataLocation
srcOptions interface{}
}
func (m tableOptions) WritesToTable() bool {
return false
}
func (m tableOptions) SrcName() string {
return m.src.Name
}
func (m tableOptions) DestName() string {
if t, tblDest := m.dest.(mvdata.TableDataLocation); tblDest {
return t.Name
}
if f, fileDest := m.dest.(mvdata.FileDataLocation); fileDest {
return f.Path
}
return m.dest.String()
}
func (m dumpOptions) DumpDestName() string {
if t, tblDest := m.dest.(mvdata.TableDataLocation); tblDest {
return t.Name
}
if f, fileDest := m.dest.(mvdata.FileDataLocation); fileDest {
return f.Path
}
return m.dest.String()
}
// dumpTable dumps table in file given specific table and file location info
func dumpTable(ctx context.Context, root *doltdb.RootValue, dEnv *env.DoltEnv, tblOpts *tableOptions, filePath string) errhand.VerboseError {
mover, verr := NewDumpDataMover(ctx, root, dEnv, tblOpts, importStatsCB, filePath)
if verr != nil {
return verr
}
skipped, verr := mvdata.MoveData(ctx, dEnv, mover, tblOpts)
if verr != nil {
return verr
}
if skipped > 0 {
cli.PrintErrln(color.YellowString("Lines skipped: %d", skipped))
}
return nil
}
// checkAndCreateOpenDestFile returns filePath to created dest file after checking for any existing file and handles it
func checkAndCreateOpenDestFile(ctx context.Context, root *doltdb.RootValue, dEnv *env.DoltEnv, force bool, dumpOpts *dumpOptions, fileName string) (string, errhand.VerboseError) {
ow, err := checkOverwrite(ctx, root, dEnv.FS, force, dumpOpts.dest)
if err != nil {
return "", errhand.VerboseErrorFromError(err)
}
if ow {
return "", errhand.BuildDError("%s already exists. Use -f to overwrite.", fileName).Build()
}
// create new file
err = dEnv.FS.MkDirs(filepath.Dir(dumpOpts.DumpDestName()))
if err != nil {
return "", errhand.VerboseErrorFromError(err)
}
filePath, err := dEnv.FS.Abs(fileName)
if err != nil {
return "", errhand.VerboseErrorFromError(err)
}
os.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, os.ModePerm)
return filePath, nil
}
// checkOverwrite returns TRUE if the file exists and force flag not given and
// FALSE if the file is stream data / file does not exist / file exists and force flag is given
func checkOverwrite(ctx context.Context, root *doltdb.RootValue, fs filesys.ReadableFS, force bool, dest mvdata.DataLocation) (bool, error) {
if _, isStream := dest.(mvdata.StreamDataLocation); isStream {
return false, nil
}
if !force {
return dest.Exists(ctx, root, fs)
}
return false, nil
}
func importStatsCB(stats types.AppliedEditStats) {
noEffect := stats.NonExistentDeletes + stats.SameVal
total := noEffect + stats.Modifications + stats.Additions
displayStr := fmt.Sprintf("Rows Processed: %d, Additions: %d, Modifications: %d, Had No Effect: %d", total, stats.Additions, stats.Modifications, noEffect)
displayStrLen = cli.DeleteAndPrint(displayStrLen, displayStr)
}
// getDumpDestination returns a dump destination corresponding to the input parameters
func getDumpDestination(path string) mvdata.DataLocation {
destLoc := mvdata.NewDataLocation(path, "")
switch val := destLoc.(type) {
case mvdata.FileDataLocation:
if val.Format == mvdata.InvalidDataFormat {
cli.PrintErrln(
color.RedString("Could not infer type file '%s'\n", path),
"File extensions should match supported file types, or should be explicitly defined via the result-format parameter")
return nil
}
case mvdata.StreamDataLocation:
if val.Format == mvdata.InvalidDataFormat {
val = mvdata.StreamDataLocation{Format: mvdata.CsvFile, Reader: os.Stdin, Writer: iohelp.NopWrCloser(cli.CliOut)}
destLoc = val
} else if val.Format != mvdata.CsvFile && val.Format != mvdata.PsvFile {
cli.PrintErrln(color.RedString("Cannot export this format to stdout"))
return nil
}
}
return destLoc
}
// getDumpArgs returns dumpOptions of result format and dest file location corresponding to the input parameters
func getDumpOptions(fileName string, rf string) *dumpOptions {
fileLoc := getDumpDestination(fileName)
return &dumpOptions{
format: rf,
dest: fileLoc,
}
}
// newTableArgs returns tableOptions of table name and src table location and dest file location
// corresponding to the input parameters
func newTableArgs(tblName string, destination mvdata.DataLocation) *tableOptions {
return &tableOptions{
tableName: tblName,
src: mvdata.TableDataLocation{Name: tblName},
dest: destination,
}
}
// NewDumpDataMover returns dataMover with tableOptions given source table and destination file info
func NewDumpDataMover(ctx context.Context, root *doltdb.RootValue, dEnv *env.DoltEnv, tblOpts *tableOptions, statsCB noms.StatsCB, filePath string) (retDataMover *mvdata.DataMover, retErr errhand.VerboseError) {
var err error
rd, srcIsSorted, err := tblOpts.src.NewReader(ctx, root, dEnv.FS, tblOpts.srcOptions)
if err != nil {
return nil, errhand.BuildDError("Error creating reader for %s.", tblOpts.SrcName()).AddCause(err).Build()
}
// close on err exit
defer func() {
if rd != nil {
rErr := rd.Close(ctx)
if rErr != nil {
retErr = errhand.BuildDError("Could not close reader for %s.", tblOpts.SrcName()).AddCause(rErr).Build()
}
}
}()
inSch := rd.GetSchema()
outSch := inSch
opts := editor.Options{Deaf: dEnv.DbEaFactory()}
writer, err := dEnv.FS.OpenForWriteAppend(filePath, os.ModePerm)
if err != nil {
return nil, errhand.BuildDError("Error opening writer for %s.", tblOpts.DestName()).AddCause(err).Build()
}
wr, err := tblOpts.dest.NewCreatingWriter(ctx, tblOpts, root, srcIsSorted, outSch, statsCB, opts, writer)
if err != nil {
return nil, errhand.BuildDError("Could not create table writer for %s", tblOpts.tableName).AddCause(err).Build()
}
emptyTransColl := pipeline.NewTransformCollection()
imp := &mvdata.DataMover{Rd: rd, Transforms: emptyTransColl, Wr: wr, ContOnErr: false}
rd = nil
return imp, nil
}
+17 -1
View File
@@ -18,6 +18,7 @@ import (
"context"
"io"
"os"
"path/filepath"
"strings"
"github.com/fatih/color"
@@ -269,8 +270,23 @@ func NewExportDataMover(ctx context.Context, root *doltdb.RootValue, dEnv *env.D
inSch := rd.GetSchema()
outSch := inSch
err = dEnv.FS.MkDirs(filepath.Dir(exOpts.DestName()))
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
filePath, err := dEnv.FS.Abs(exOpts.DestName())
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
writer, err := dEnv.FS.OpenForWrite(filePath, os.ModePerm)
if err != nil {
return nil, errhand.BuildDError("Error opening writer for %s.", exOpts.DestName()).AddCause(err).Build()
}
opts := editor.Options{Deaf: dEnv.DbEaFactory()}
wr, err := exOpts.dest.NewCreatingWriter(ctx, exOpts, dEnv, root, srcIsSorted, outSch, statsCB, opts)
wr, err := exOpts.dest.NewCreatingWriter(ctx, exOpts, root, srcIsSorted, outSch, statsCB, opts, writer)
if err != nil {
return nil, errhand.BuildDError("Could not create table writer for %s", exOpts.tableName).AddCause(err).Build()
+11 -3
View File
@@ -477,11 +477,19 @@ func newImportDataMover(ctx context.Context, root *doltdb.RootValue, dEnv *env.D
var wr table.TableWriteCloser
switch impOpts.operation {
case CreateOp:
wr, err = impOpts.dest.NewCreatingWriter(ctx, impOpts, dEnv, root, srcIsSorted, wrSch, statsCB, opts)
filePath, err := dEnv.FS.Abs(impOpts.DestName())
if err != nil {
return nil, &mvdata.DataMoverCreationError{ErrType: mvdata.CreateWriterErr, Cause: err}
}
writer, err := dEnv.FS.OpenForWrite(filePath, os.ModePerm)
if err != nil {
return nil, &mvdata.DataMoverCreationError{ErrType: mvdata.CreateWriterErr, Cause: err}
}
wr, err = impOpts.dest.NewCreatingWriter(ctx, impOpts, root, srcIsSorted, wrSch, statsCB, opts, writer)
case ReplaceOp:
wr, err = impOpts.dest.NewReplacingWriter(ctx, impOpts, dEnv, root, srcIsSorted, wrSch, statsCB, opts)
wr, err = impOpts.dest.NewReplacingWriter(ctx, impOpts, root, srcIsSorted, wrSch, statsCB, opts)
case UpdateOp:
wr, err = impOpts.dest.NewUpdatingWriter(ctx, impOpts, dEnv, root, srcIsSorted, wrSch, statsCB, rdTags, opts)
wr, err = impOpts.dest.NewUpdatingWriter(ctx, impOpts, root, srcIsSorted, wrSch, statsCB, rdTags, opts)
default:
err = errors.New("invalid move operation")
}
+1
View File
@@ -95,6 +95,7 @@ var doltCommand = cli.NewSubCommandHandler("dolt", "it's git for data", []cli.Co
commands.MergeBaseCmd{},
commands.RootsCmd{},
commands.VersionCmd{VersionStr: Version},
commands.DumpCmd{},
})
func init() {
@@ -151,6 +151,7 @@ const (
ClientEventType_TAG ClientEventType = 51
ClientEventType_GARBAGE_COLLECTION ClientEventType = 52
ClientEventType_FILTER_BRANCH ClientEventType = 53
ClientEventType_DUMP ClientEventType = 54
)
// Enum value maps for ClientEventType.
Regular → Executable
View File
+4 -4
View File
@@ -17,6 +17,7 @@ package mvdata
import (
"context"
"fmt"
"io"
"path/filepath"
"strings"
@@ -24,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
@@ -89,14 +89,14 @@ type DataLocation interface {
// NewCreatingWriter will create a TableWriteCloser for a DataLocation that will create a new table, or overwrite
// an existing table.
NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error)
NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options, wr io.WriteCloser) (table.TableWriteCloser, error)
// NewUpdatingWriter will create a TableWriteCloser for a DataLocation that will update and append rows based on
// their primary key.
NewUpdatingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, rdTags []uint64, opts editor.Options) (table.TableWriteCloser, error)
NewUpdatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, rdTags []uint64, opts editor.Options) (table.TableWriteCloser, error)
// NewReplacingWriter will create a TableWriteCloser for a DataLocation that will overwrite an existing table if it has the same schema.
NewReplacingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error)
NewReplacingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error)
}
// NewDataLocation creates a DataLocation object from a path and a format string. If the path is the name of a table
+16 -4
View File
@@ -17,7 +17,9 @@ package mvdata
import (
"context"
"fmt"
"os"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -201,10 +203,20 @@ func TestCreateRdWr(t *testing.T) {
loc := test.dl
opts := editor.Options{Deaf: dEnv.DbEaFactory()}
wr, err := loc.NewCreatingWriter(context.Background(), mvOpts, dEnv, root, true, fakeSchema, nil, opts)
if err != nil {
t.Fatal("Unexpected error creating writer.", err)
filePath, fpErr := dEnv.FS.Abs(strings.Split(loc.String(), ":")[1])
if fpErr != nil {
t.Fatal("Unexpected error getting filepath", fpErr)
}
writer, wrErr := dEnv.FS.OpenForWrite(filePath, os.ModePerm)
if wrErr != nil {
t.Fatal("Unexpected error opening file for writer.", wrErr)
}
wr, wErr := loc.NewCreatingWriter(context.Background(), mvOpts, root, true, fakeSchema, nil, opts, writer)
if wErr != nil {
t.Fatal("Unexpected error creating writer.", wErr)
}
actualWrT := reflect.TypeOf(wr).Elem()
@@ -228,7 +240,7 @@ func TestCreateRdWr(t *testing.T) {
rd, _, err := loc.NewReader(context.Background(), root, dEnv.FS, JSONOptions{TableName: testTableName, SchFile: testSchemaFileName})
if err != nil {
t.Fatal("Unexpected error creating writer", err)
t.Fatal("Unexpected error creating reader", err)
}
actualRdT := reflect.TypeOf(rd).Elem()
@@ -18,11 +18,11 @@ import (
"context"
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
@@ -145,18 +145,18 @@ func (dl FileDataLocation) NewReader(ctx context.Context, root *doltdb.RootValue
// NewCreatingWriter will create a TableWriteCloser for a DataLocation that will create a new table, or overwrite
// an existing table.
func (dl FileDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
func (dl FileDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options, wr io.WriteCloser) (table.TableWriteCloser, error) {
switch dl.Format {
case CsvFile:
return csv.OpenCSVWriter(dl.Path, dEnv.FS, outSch, csv.NewCSVInfo())
return csv.NewCSVWriter(wr, outSch, csv.NewCSVInfo())
case PsvFile:
return csv.OpenCSVWriter(dl.Path, dEnv.FS, outSch, csv.NewCSVInfo().SetDelim("|"))
return csv.NewCSVWriter(wr, outSch, csv.NewCSVInfo().SetDelim("|"))
case XlsxFile:
panic("writing to xlsx files is not supported yet")
case JsonFile:
return json.OpenJSONWriter(dl.Path, dEnv.FS, outSch)
return json.NewJSONWriter(wr, outSch)
case SqlFile:
return sqlexport.OpenSQLExportWriter(ctx, dl.Path, dEnv.FS, root, mvOpts.SrcName(), outSch, opts)
return sqlexport.OpenSQLExportWriter(ctx, wr, root, mvOpts.SrcName(), outSch, opts)
}
panic("Invalid Data Format." + string(dl.Format))
@@ -164,12 +164,12 @@ func (dl FileDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMov
// NewUpdatingWriter will create a TableWriteCloser for a DataLocation that will update and append rows based on
// their primary key.
func (dl FileDataLocation) NewUpdatingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, rdTags []uint64, opts editor.Options) (table.TableWriteCloser, error) {
func (dl FileDataLocation) NewUpdatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, rdTags []uint64, opts editor.Options) (table.TableWriteCloser, error) {
panic("Updating of files is not supported")
}
// NewReplacingWriter will create a TableWriteCloser for a DataLocation that will overwrite an existing table while
// preserving schema
func (dl FileDataLocation) NewReplacingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
func (dl FileDataLocation) NewReplacingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
panic("Replacing files is not supported")
}
@@ -20,7 +20,6 @@ import (
"io"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
@@ -75,7 +74,7 @@ func (dl StreamDataLocation) NewReader(ctx context.Context, root *doltdb.RootVal
// NewCreatingWriter will create a TableWriteCloser for a DataLocation that will create a new table, or overwrite
// an existing table.
func (dl StreamDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
func (dl StreamDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options, wr io.WriteCloser) (table.TableWriteCloser, error) {
switch dl.Format {
case CsvFile:
return csv.NewCSVWriter(iohelp.NopWrCloser(dl.Writer), outSch, csv.NewCSVInfo())
@@ -89,12 +88,12 @@ func (dl StreamDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataM
// NewUpdatingWriter will create a TableWriteCloser for a DataLocation that will update and append rows based on
// their primary key.
func (dl StreamDataLocation) NewUpdatingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, _ []uint64, opts editor.Options) (table.TableWriteCloser, error) {
func (dl StreamDataLocation) NewUpdatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, _ []uint64, opts editor.Options) (table.TableWriteCloser, error) {
panic("Updating is not supported for stdout")
}
// NewReplacingWriter will create a TableWriteCloser for a DataLocation that will overwrite an existing table while
// preserving schema
func (dl StreamDataLocation) NewReplacingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
func (dl StreamDataLocation) NewReplacingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, srcIsSorted bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
panic("Replacing is not supported for stdout")
}
@@ -17,12 +17,12 @@ package mvdata
import (
"context"
"errors"
"io"
"sync/atomic"
"github.com/dolthub/dolt/go/libraries/utils/set"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
@@ -78,7 +78,7 @@ func (dl TableDataLocation) NewReader(ctx context.Context, root *doltdb.RootValu
// NewCreatingWriter will create a TableWriteCloser for a DataLocation that will create a new table, or overwrite
// an existing table.
func (dl TableDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
func (dl TableDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMoverOptions, root *doltdb.RootValue, sortedInput bool, outSch schema.Schema, statsCB noms.StatsCB, opts editor.Options, wr io.WriteCloser) (table.TableWriteCloser, error) {
updatedRoot, err := root.CreateEmptyTable(ctx, dl.Name, outSch)
if err != nil {
return nil, err
@@ -91,7 +91,6 @@ func (dl TableDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMo
}
return &tableEditorWriteCloser{
dEnv: dEnv,
insertOnly: true,
initialData: types.EmptyMap,
statsCB: statsCB,
@@ -103,7 +102,7 @@ func (dl TableDataLocation) NewCreatingWriter(ctx context.Context, mvOpts DataMo
// NewUpdatingWriter will create a TableWriteCloser for a DataLocation that will update and append rows based on
// their primary key.
func (dl TableDataLocation) NewUpdatingWriter(ctx context.Context, _ DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, _ bool, wrSch schema.Schema, statsCB noms.StatsCB, rdTags []uint64, opts editor.Options) (table.TableWriteCloser, error) {
func (dl TableDataLocation) NewUpdatingWriter(ctx context.Context, _ DataMoverOptions, root *doltdb.RootValue, _ bool, wrSch schema.Schema, statsCB noms.StatsCB, rdTags []uint64, opts editor.Options) (table.TableWriteCloser, error) {
tbl, ok, err := root.GetTable(ctx, dl.Name)
if err != nil {
return nil, err
@@ -136,7 +135,6 @@ func (dl TableDataLocation) NewUpdatingWriter(ctx context.Context, _ DataMoverOp
}
return &tableEditorWriteCloser{
dEnv: dEnv,
insertOnly: insertOnly,
initialData: m,
statsCB: statsCB,
@@ -149,7 +147,7 @@ func (dl TableDataLocation) NewUpdatingWriter(ctx context.Context, _ DataMoverOp
// NewReplacingWriter will create a TableWriteCloser for a DataLocation that will overwrite an existing table while
// preserving schema
func (dl TableDataLocation) NewReplacingWriter(ctx context.Context, _ DataMoverOptions, dEnv *env.DoltEnv, root *doltdb.RootValue, _ bool, _ schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
func (dl TableDataLocation) NewReplacingWriter(ctx context.Context, _ DataMoverOptions, root *doltdb.RootValue, _ bool, _ schema.Schema, statsCB noms.StatsCB, opts editor.Options) (table.TableWriteCloser, error) {
tbl, ok, err := root.GetTable(ctx, dl.Name)
if err != nil {
return nil, err
@@ -176,7 +174,6 @@ func (dl TableDataLocation) NewReplacingWriter(ctx context.Context, _ DataMoverO
}
return &tableEditorWriteCloser{
dEnv: dEnv,
insertOnly: true,
initialData: types.EmptyMap,
statsCB: statsCB,
@@ -187,7 +184,6 @@ func (dl TableDataLocation) NewReplacingWriter(ctx context.Context, _ DataMoverO
}
type tableEditorWriteCloser struct {
dEnv *env.DoltEnv
tableEditor editor.TableEditor
sess *editor.TableEditSession
initialData types.Map
@@ -15,6 +15,7 @@
package dsess
import (
"errors"
"fmt"
"strconv"
"sync"
@@ -26,6 +27,8 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/config"
)
var ErrSessionNotPeristable = errors.New("session is not persistable")
type DoltSession struct {
*Session
globalsConf config.ReadWriteConfig
@@ -48,6 +51,10 @@ func NewDoltSession(ctx *sql.Context, sqlSess *sql.BaseSession, pro RevisionData
// PersistGlobal implements sql.PersistableSession
func (s *DoltSession) PersistGlobal(sysVarName string, value interface{}) error {
if s.globalsConf == nil {
return ErrSessionNotPeristable
}
sysVar, _, err := validatePersistableSysVar(sysVarName)
if err != nil {
return err
@@ -60,6 +67,10 @@ func (s *DoltSession) PersistGlobal(sysVarName string, value interface{}) error
// RemovePersistedGlobal implements sql.PersistableSession
func (s *DoltSession) RemovePersistedGlobal(sysVarName string) error {
if s.globalsConf == nil {
return ErrSessionNotPeristable
}
sysVar, _, err := validatePersistableSysVar(sysVarName)
if err != nil {
return err
@@ -72,6 +83,10 @@ func (s *DoltSession) RemovePersistedGlobal(sysVarName string) error {
// RemoveAllPersistedGlobals implements sql.PersistableSession
func (s *DoltSession) RemoveAllPersistedGlobals() error {
if s.globalsConf == nil {
return ErrSessionNotPeristable
}
allVars := make([]string, s.globalsConf.Size())
i := 0
s.globalsConf.Iter(func(k, v string) bool {
@@ -87,11 +102,19 @@ func (s *DoltSession) RemoveAllPersistedGlobals() error {
// RemoveAllPersistedGlobals implements sql.PersistableSession
func (s *DoltSession) GetPersistedValue(k string) (interface{}, error) {
if s.globalsConf == nil {
return nil, ErrSessionNotPeristable
}
return getPersistedValue(s.globalsConf, k)
}
// SystemVariablesInConfig returns a list of System Variables associated with the session
func (s *DoltSession) SystemVariablesInConfig() ([]sql.SystemVariable, error) {
if s.globalsConf == nil {
return nil, ErrSessionNotPeristable
}
return SystemVariablesInConfig(s.globalsConf)
}
@@ -155,6 +155,7 @@ type DatabaseSessionState struct {
readReplica *env.Remote
TempTableRoot *doltdb.RootValue
TempTableEditSession *editor.TableEditSession
tmpTablesDir string
// Same as InitialDbState.Err, this signifies that this
// DatabaseSessionState is invalid. LookupDbState returning a
@@ -1091,6 +1092,7 @@ func (sess *Session) AddDB(ctx *sql.Context, dbState InitialDbState) error {
// TODO: get rid of all repo state reader / writer stuff. Until we do, swap out the reader with one of our own, and
// the writer with one that errors out
sessionState.dbData = dbState.DbData
sessionState.tmpTablesDir = dbState.DbData.Rsw.TempTableFilesDir()
adapter := NewSessionStateAdapter(sess, db.Name(), dbState.Remotes, dbState.Branches)
sessionState.dbData.Rsr = adapter
sessionState.dbData.Rsw = adapter
@@ -17,15 +17,12 @@ package dsess
import (
"context"
"fmt"
"path/filepath"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
)
// SessionStateAdapter is an adapter for env.RepoStateReader in SQL contexts, getting information about the repo state
@@ -155,14 +152,5 @@ func (s SessionStateAdapter) RemoveBackup(ctx context.Context, name string) erro
}
func (s SessionStateAdapter) TempTableFilesDir() string {
//todo: save tempfile in dbState on server startup?
return mustAbs(dbfactory.DoltDir, "temptf")
}
func mustAbs(path ...string) string {
absPath, err := filesys.LocalFS.Abs(filepath.Join(path...))
if err != nil {
panic(err)
}
return absPath
return s.session.GetDbStates()[s.dbName].tmpTablesDir
}
@@ -20,14 +20,11 @@ import (
"encoding/json"
"errors"
"io"
"os"
"path/filepath"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
"github.com/dolthub/dolt/go/store/types"
)
@@ -44,22 +41,6 @@ type JSONWriter struct {
rowsWritten int
}
func OpenJSONWriter(path string, fs filesys.WritableFS, outSch schema.Schema) (*JSONWriter, error) {
err := fs.MkDirs(filepath.Dir(path))
if err != nil {
return nil, err
}
wr, err := fs.OpenForWrite(path, os.ModePerm)
if err != nil {
return nil, err
}
return NewJSONWriter(wr, outSch)
}
func NewJSONWriter(wr io.WriteCloser, outSch schema.Schema) (*JSONWriter, error) {
bwr := bufio.NewWriterSize(wr, WriteBufSize)
err := iohelp.WriteAll(bwr, []byte(jsonHeader))
@@ -19,8 +19,6 @@ import (
"context"
"errors"
"io"
"os"
"path/filepath"
"strings"
"unicode"
"unicode/utf8"
@@ -29,7 +27,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
)
// writeBufSize is the size of the buffer used when writing a csv file. It is set at the package level and all
@@ -45,24 +42,6 @@ type CSVWriter struct {
useCRLF bool // True to use \r\n as the line terminator
}
// OpenCSVWriter creates a file at the given path in the given filesystem and writes out rows based on the Schema,
// and CSVFileInfo provided
func OpenCSVWriter(path string, fs filesys.WritableFS, outSch schema.Schema, info *CSVFileInfo) (*CSVWriter, error) {
err := fs.MkDirs(filepath.Dir(path))
if err != nil {
return nil, err
}
wr, err := fs.OpenForWrite(path, os.ModePerm)
if err != nil {
return nil, err
}
return NewCSVWriter(wr, outSch, info)
}
// NewCSVWriter writes rows to the given WriteCloser based on the Schema and CSVFileInfo provided
func NewCSVWriter(wr io.WriteCloser, outSch schema.Schema, info *CSVFileInfo) (*CSVWriter, error) {
@@ -16,6 +16,7 @@ package csv
import (
"context"
"os"
"testing"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
@@ -91,7 +92,16 @@ Andy Anderson,27,
rows := getSampleRows()
fs := filesys.NewInMemFS(nil, nil, root)
csvWr, err := OpenCSVWriter(path, fs, rowSch, info)
filePath, err := fs.Abs(path)
if err != nil {
t.Fatal("Could not open create filepath for CSVWriter", err)
}
writer, err := fs.OpenForWrite(filePath, os.ModePerm)
if err != nil {
t.Fatal("Could not open writer for CSVWriter", err)
}
csvWr, err := NewCSVWriter(writer, rowSch, info)
if err != nil {
t.Fatal("Could not open CSVWriter", err)
@@ -123,7 +133,15 @@ Andy Anderson|27|
rows := getSampleRows()
fs := filesys.NewInMemFS(nil, nil, root)
csvWr, err := OpenCSVWriter(path, fs, rowSch, info)
filePath, err := fs.Abs(path)
if err != nil {
t.Fatal("Could not open create filepath for CSVWriter", err)
}
writer, err := fs.OpenForWrite(filePath, os.ModePerm)
if err != nil {
t.Fatal("Could not open writer for CSVWriter", err)
}
csvWr, err := NewCSVWriter(writer, rowSch, info)
if err != nil {
t.Fatal("Could not open CSVWriter", err)
@@ -17,8 +17,6 @@ package sqlexport
import (
"context"
"io"
"os"
"path/filepath"
"strings"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
@@ -29,7 +27,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
)
@@ -46,17 +43,8 @@ type SqlExportWriter struct {
editOpts editor.Options
}
// OpenSQLExportWriter returns a new SqlWriter for the table given writing to a file with the path given.
func OpenSQLExportWriter(ctx context.Context, path string, fs filesys.WritableFS, root *doltdb.RootValue, tableName string, sch schema.Schema, editOpts editor.Options) (*SqlExportWriter, error) {
err := fs.MkDirs(filepath.Dir(path))
if err != nil {
return nil, err
}
wr, err := fs.OpenForWrite(path, os.ModePerm)
if err != nil {
return nil, err
}
// OpenSQLExportWriter returns a new SqlWriter for the table with the writer given.
func OpenSQLExportWriter(ctx context.Context, wr io.WriteCloser, root *doltdb.RootValue, tableName string, sch schema.Schema, editOpts editor.Options) (*SqlExportWriter, error) {
allSchemas, err := root.GetAllSchemas(ctx)
if err != nil {
+4
View File
@@ -51,6 +51,10 @@ type WritableFS interface {
// it will be overwritten.
OpenForWrite(fp string, perm os.FileMode) (io.WriteCloser, error)
// OpenForWriteAppend opens a file for writing. The file will be created if it does not exist, and it will
// append only to that new file. If file exists, it will append to existing file.
OpenForWriteAppend(fp string, perm os.FileMode) (io.WriteCloser, error)
// WriteFile writes the entire data buffer to a given file. The file will be created if it does not exist,
// and if it does exist it will be overwritten.
WriteFile(fp string, data []byte) error
+22
View File
@@ -332,6 +332,28 @@ func (fs *InMemFS) OpenForWrite(fp string, perm os.FileMode) (io.WriteCloser, er
return &inMemFSWriteCloser{fp, parentDir, fs, bytes.NewBuffer(make([]byte, 0, 512)), fs.rwLock}, nil
}
// OpenForWriteAppend opens a file for writing. The file will be created if it does not exist, and if it does exist
// it will append to existing file.
func (fs *InMemFS) OpenForWriteAppend(fp string, perm os.FileMode) (io.WriteCloser, error) {
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fp = fs.getAbsPath(fp)
if exists, isDir := fs.exists(fp); exists && isDir {
return nil, ErrIsDir
}
dir := filepath.Dir(fp)
parentDir, err := fs.mkDirs(dir)
if err != nil {
return nil, err
}
return &inMemFSWriteCloser{fp, parentDir, fs, bytes.NewBuffer(make([]byte, 0, 512)), fs.rwLock}, nil
}
// WriteFile writes the entire data buffer to a given file. The file will be created if it does not exist,
// and if it does exist it will be overwritten.
func (fs *InMemFS) WriteFile(fp string, data []byte) error {
+13
View File
@@ -189,6 +189,19 @@ func (fs *localFS) OpenForWrite(fp string, perm os.FileMode) (io.WriteCloser, er
return os.OpenFile(fp, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perm)
}
// OpenForWriteAppend opens a file for writing. The file will be created if it does not exist, and it will
// append only to that new file. If file exists, it will append to existing file.
func (fs *localFS) OpenForWriteAppend(fp string, perm os.FileMode) (io.WriteCloser, error) {
var err error
fp, err = fs.Abs(fp)
if err != nil {
return nil, err
}
return os.OpenFile(fp, os.O_CREATE|os.O_APPEND|os.O_WRONLY, perm)
}
// WriteFile writes the entire data buffer to a given file. The file will be created if it does not exist,
// and if it does exist it will be overwritten.
func (fs *localFS) WriteFile(fp string, data []byte) error {
+301
View File
@@ -0,0 +1,301 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
}
teardown() {
assert_feature_version
teardown_common
}
@test "dump: no tables" {
run dolt dump
[ "$status" -eq 0 ]
[[ "$output" =~ "No tables to export." ]] || false
}
@test "dump: SQL type - dolt dump with multiple tables" {
dolt sql -q "CREATE TABLE new_table(pk int);"
dolt sql -q "INSERT INTO new_table VALUES (1);"
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt sql -q "create table enums (a varchar(10) primary key, b enum('one','two','three'))"
dolt sql -q "insert into enums values ('abc', 'one'), ('def', 'two')"
run dolt dump
[ "$status" -eq 0 ]
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f doltdump.sql ]
run grep INSERT doltdump.sql
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 6 ]
run grep CREATE doltdump.sql
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 3 ]
run dolt dump
[ "$status" -ne 0 ]
[[ "$output" =~ "doltdump.sql already exists" ]] || false
run dolt dump -f
[ "$status" -eq 0 ]
[[ "$output" =~ "Successfully exported data." ]] || false
run dolt sql < doltdump.sql
[ "$status" -eq 0 ]
[[ "$output" =~ "Rows inserted: 6 Rows updated: 0 Rows deleted: 0" ]] || false
}
@test "dump: SQL type - compare tables in database with tables imported file " {
dolt branch new_branch
dolt sql -q "CREATE TABLE new_table(pk int);"
dolt sql -q "INSERT INTO new_table VALUES (1);"
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt sql -q "CREATE TABLE keyless (c0 int, c1 int);"
dolt sql -q "INSERT INTO keyless VALUES (0,0),(2,2),(1,1),(1,1);"
dolt add .
dolt commit -m "create tables"
run dolt dump
[ "$status" -eq 0 ]
[ -f doltdump.sql ]
dolt checkout new_branch
dolt sql < doltdump.sql
dolt add .
dolt commit --allow-empty -m "create tables from doltdump"
run dolt diff --summary main new_branch
[ "$status" -eq 0 ]
[[ "$output" = "" ]] || false
}
@test "dump: SQL type - dolt dump with Indexes" {
dolt sql -q "CREATE TABLE new_table(pk int);"
dolt sql -q "INSERT INTO new_table VALUES (1);"
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt sql -q "CREATE TABLE onepk (pk1 BIGINT PRIMARY KEY, v1 BIGINT, v2 BIGINT);"
dolt sql -q "CREATE INDEX idx_v1 ON onepk(v1);"
dolt sql -q "INSERT INTO onepk VALUES (1, 99, 51), (2, 11, 55), (3, 88, 52), (4, 22, 54), (5, 77, 53);"
run dolt dump
[ "$status" -eq 0 ]
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f doltdump.sql ]
run dolt sql < doltdump.sql
[ "$status" -eq 0 ]
run dolt schema show onepk
[ "$status" -eq "0" ]
[[ "$output" =~ 'KEY `idx_v1` (`v1`)' ]] || false
}
@test "dump: SQL type - dolt dump with foreign key and import" {
skip "dolt dump foreign key option for import NOT implemented"
dolt sql -q "CREATE TABLE new_table(pk int);"
dolt sql -q "INSERT INTO new_table VALUES (1);"
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt sql -q "CREATE TABLE parent (id int PRIMARY KEY, pv1 int, pv2 int, INDEX v1 (pv1), INDEX v2 (pv2));"
dolt sql -q "CREATE TABLE child (id int primary key, cv1 int, cv2 int, CONSTRAINT fk_named FOREIGN KEY (cv1) REFERENCES parent(pv1));"
dolt add .
dolt commit -m "create tables"
run dolt dump
[ "$status" -eq 0 ]
[ -f doltdump.sql ]
run dolt sql < doltdump.sql
[ "$status" -eq 0 ]
}
@test "dump: SQL type - dolt dump with views/trigger" {
skip "dolt dump views/trigger NOT implemented"
dolt sql -q "CREATE TABLE test(pk BIGINT PRIMARY KEY, v1 BIGINT);"
dolt sql -q "CREATE TRIGGER trigger1 BEFORE INSERT ON test FOR EACH ROW SET new.v1 = -new.v1;"
dolt sql -q "CREATE VIEW view1 AS SELECT v1 FROM test;"
dolt sql -q "CREATE TABLE a (x INT PRIMARY KEY);"
dolt sql -q "CREATE TABLE b (y INT PRIMARY KEY);"
dolt sql -q "INSERT INTO test VALUES (1, 1);"
dolt sql -q "CREATE VIEW view2 AS SELECT y FROM b;"
dolt sql -q "CREATE TRIGGER trigger2 AFTER INSERT ON a FOR EACH ROW INSERT INTO b VALUES (new.x * 2);"
dolt sql -q "INSERT INTO a VALUES (2);"
}
@test "dump: SQL type - dolt dump with keyless tables" {
dolt sql -q "CREATE TABLE new_table(pk int);"
dolt sql -q "INSERT INTO new_table VALUES (1);"
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt sql -q "CREATE TABLE keyless (c0 int, c1 int);"
dolt sql -q "INSERT INTO keyless VALUES (0,0),(2,2),(1,1),(1,1);"
dolt sql -q "ALTER TABLE keyless ADD INDEX (c1);"
dolt sql -q "CREATE TABLE keyless_warehouse(warehouse_id int, warehouse_name longtext);"
dolt sql -q "INSERT into keyless_warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt add .
dolt commit -m "create tables"
run dolt dump
[ "$status" -eq 0 ]
[ -f doltdump.sql ]
dolt table rm keyless
run dolt sql < doltdump.sql
[ "$status" -eq 0 ]
run dolt sql -q "UPDATE keyless SET c0 = 4 WHERE c0 = 2;"
[ $status -eq 0 ]
run dolt sql -q "SELECT * FROM keyless ORDER BY c0;" -r csv
[ $status -eq 0 ]
[[ "${lines[1]}" = "0,0" ]] || false
[[ "${lines[2]}" = "1,1" ]] || false
[[ "${lines[3]}" = "1,1" ]] || false
[[ "${lines[4]}" = "4,2" ]] || false
}
@test "dump: SQL type - dolt dump with empty tables" {
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "CREATE TABLE keyless (c0 int, c1 int);"
dolt sql -q "CREATE TABLE test(pk BIGINT PRIMARY KEY, v1 BIGINT);"
dolt add .
dolt commit -m "create tables"
run dolt dump
[ "$status" -eq 0 ]
[ -f doltdump.sql ]
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
run dolt sql < doltdump.sql
[ "$status" -eq 0 ]
run grep CREATE doltdump.sql
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 3 ]
run grep INSERT doltdump.sql
[ "$status" -eq 1 ]
[ "${#lines[@]}" -eq 0 ]
}
@test "dump: CSV type - dolt dump with multiple tables and check -f flag" {
dolt sql -q "CREATE TABLE new_table(pk int);"
dolt sql -q "INSERT INTO new_table VALUES (1);"
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt sql -q "create table enums (a varchar(10) primary key, b enum('one','two','three'))"
dolt sql -q "insert into enums values ('abc', 'one'), ('def', 'two')"
run dolt dump -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f doltdump/enums.csv ]
[ -f doltdump/new_table.csv ]
[ -f doltdump/warehouse.csv ]
run dolt dump -r csv
[ "$status" -ne 0 ]
[[ "$output" =~ "enums.csv already exists" ]] || false
rm doltdump/enums.csv
run dolt dump -r csv
[ "$status" -ne 0 ]
[[ "$output" =~ "new_table.csv already exists" ]] || false
rm doltdump/enums.csv
rm doltdump/new_table.csv
run dolt dump -r csv
[ "$status" -ne 0 ]
[[ "$output" =~ "warehouse.csv already exists" ]] || false
run dolt dump -f -r csv
[ "$status" -eq 0 ]
[[ "$output" =~ "Successfully exported data." ]] || false
[ -f doltdump/enums.csv ]
[ -f doltdump/new_table.csv ]
[ -f doltdump/warehouse.csv ]
}
@test "dump: CSV type - compare tables in database with tables imported from corresponding files " {
dolt sql -q "CREATE TABLE new_table(pk int);"
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "CREATE TABLE keyless (c0 int, c1 int);"
dolt add .
dolt commit -m "create tables"
dolt branch new_branch
dolt sql -q "INSERT INTO new_table VALUES (1);"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
dolt sql -q "INSERT INTO keyless VALUES (0,0),(2,2),(1,1),(1,1);"
dolt add .
dolt commit -m "insert to tables"
run dolt dump -r csv
[ "$status" -eq 0 ]
[ -f doltdump/keyless.csv ]
[ -f doltdump/new_table.csv ]
[ -f doltdump/warehouse.csv ]
dolt checkout new_branch
dolt table import -r new_table doltdump/new_table.csv
dolt table import -r warehouse doltdump/warehouse.csv
dolt table import -r keyless doltdump/keyless.csv
dolt add .
dolt commit --allow-empty -m "create tables from doltdump"
run dolt diff --summary main new_branch
[ "$status" -eq 0 ]
[[ "$output" = "" ]] || false
}
@test "dump: CSV type - dolt dump with empty tables" {
dolt branch new_branch
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "CREATE TABLE keyless (c0 int, c1 int);"
dolt sql -q "CREATE TABLE test(pk BIGINT PRIMARY KEY, v1 BIGINT);"
dolt add .
dolt commit -m "create tables"
run dolt dump -r csv
[ "$status" -eq 0 ]
[ -f doltdump/keyless.csv ]
[ -f doltdump/test.csv ]
[ -f doltdump/warehouse.csv ]
dolt checkout new_branch
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext);"
dolt sql -q "CREATE TABLE keyless (c0 int, c1 int);"
dolt sql -q "CREATE TABLE test(pk BIGINT PRIMARY KEY, v1 BIGINT);"
dolt table import -r warehouse doltdump/warehouse.csv
dolt table import -r keyless doltdump/keyless.csv
dolt table import -r test doltdump/test.csv
dolt add .
dolt commit --allow-empty -m "create tables from doltdump"
run dolt diff --summary main new_branch
[ "$status" -eq 0 ]
[[ "$output" = "" ]] || false
}
@@ -1,28 +1,28 @@
{
"rows": [
{
"id": 0,
"first name": "tim",
"last name": "sehn",
"title": "ceo",
"start date": "",
"end date": ""
},
{
"id": 1,
"first name": "aaron",
"last name": "son",
"title": "founder",
"start date": "",
"end date": ""
},
{
"id": 2,
"first name": "brian",
"last name": "hendricks",
"title": "founder",
"start date": "",
"end date": ""
}
]
{
"rows": [
{
"id": 0,
"first name": "tim",
"last name": "sehn",
"title": "ceo",
"start date": "",
"end date": ""
},
{
"id": 1,
"first name": "aaron",
"last name": "son",
"title": "founder",
"start date": "",
"end date": ""
},
{
"id": 2,
"first name": "brian",
"last name": "hendricks",
"title": "founder",
"start date": "",
"end date": ""
}
]
}
@@ -107,6 +107,29 @@ start_sql_server() {
wait_for_connection $PORT 5000
}
start_sql_server_with_config() {
DEFAULT_DB="$1"
let PORT="$$ % (65536-1024) + 1024"
echo "
log_level: debug
user:
name: dolt
listener:
host: 0.0.0.0
port: $PORT
max_connections: 10
behavior:
autocommit: false
" > .cliconfig.yaml
cat "$2" >> .cliconfig.yaml
dolt sql-server --config .cliconfig.yaml &
SERVER_PID=$!
wait_for_connection $PORT 5000
}
start_sql_multi_user_server() {
DEFAULT_DB="$1"
let PORT="$$ % (65536-1024) + 1024"
@@ -567,5 +567,17 @@ SQL
run dolt sql -q "select * from c where val = 2" -r csv
[ $status -eq 0 ]
[[ "$output" =~ "2,2" ]] || false
}
@test "primary-key-changes: can't add a primary key on a column containing NULLs" {
dolt sql -q "create table t (pk int, c1 int)"
dolt sql -q "insert into t values (NULL, NULL)"
run dolt sql -q "alter table t add primary key(pk)"
skip "This should fail on some sort of constraint error"
[ $status -eq 1 ]
# This is the current failure mode
run dolt sql -q "update t set c1=1"
[ $status -eq 1 ]
[[ "$output" =~ "received nil" ]] || false
}
+22 -1
View File
@@ -1,3 +1,4 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
make_repo() {
@@ -87,7 +88,7 @@ seed_repos_with_tables_with_use_statements() {
@test "sql-multi-db: join on multiple databases with same name" {
seed_repos_with_tables_with_use_statements
dolt sql --multi-db-dir ./ -b -q "
USE repo1;
USE repo1;
CREATE TABLE r2_t1 (pk BIGINT, c1 BIGINT, PRIMARY KEY(pk));
INSERT INTO r2_t1 (pk, c1) values (2,200),(3,300),(4,400);"
run dolt sql --multi-db-dir ./ -q "select * from repo1.r2_t1 join repo2.r2_t1 on repo1.r2_t1.pk=repo2.r2_t1.pk"
@@ -95,3 +96,23 @@ seed_repos_with_tables_with_use_statements() {
[ "$status" -eq 0 ]
[[ ! $output =~ "Not unique table/alias" ]] || false
}
@test "sql-multi-db: fetch multiple databases with appropriate tempdir" {
seed_repos_with_tables_with_use_statements
mkdir remote1
mkdir -p subremotes/repo1
cd subremotes/repo1
dolt init
dolt remote add origin file://../../remote1
dolt push origin main
cd ..
dolt clone file://../remote1 repo2
cd ..
run dolt sql --multi-db-dir ./subremotes -b -q "
USE repo2;
select dolt_fetch() as f;" -r csv
[ "$status" -eq 0 ]
[[ "${lines[1]}" =~ "f" ]] || false
[[ "${lines[2]}" =~ "1" ]] || false
}
+34
View File
@@ -1196,3 +1196,37 @@ while True:
[ "$status" -eq 0 ]
[[ "$output" =~ "b" ]] || false
}
@test "sql-server: fetch uses database tempdir from different working directory" {
skiponwindows "Has dependencies that are missing on the Jenkins Windows installation."
mkdir remote1
cd repo2
dolt remote add remote1 file://../remote1
dolt push -u remote1 main
cd ..
rm -rf repo1
dolt clone file://./remote1 repo1
cd repo1
dolt remote add remote1 file://../remote1
cd ../repo2
dolt sql -q "create table test (a int)"
dolt commit -am "new commit"
dolt push -u remote1 main
cd ../repo1
REPO_PATH=$(pwd)
cd ..
echo "
databases:
- name: repo1
path: $REPO_PATH
" > server.yaml
start_sql_server_with_config repo1 server.yaml
server_query repo1 1 "select dolt_fetch() as f" "f\n1"
}