Merge remote-tracking branch 'origin/main' into taylor/two-dot-log-cli

This commit is contained in:
Taylor Bantle
2022-10-17 16:20:09 -07:00
92 changed files with 839 additions and 1699 deletions

View File

@@ -1,34 +0,0 @@
name: Push Docker Image to DockerHub
on:
workflow_dispatch:
inputs:
version:
description: 'SemVer format release tag, i.e. 0.24.5'
required: true
repository_dispatch:
types: [ push-docker-image ]
jobs:
docker-image-push:
name: Push Docker Image
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./docker/Dockerfile
push: true
tags: ${{ secrets.DOCKER_HUB_USERNAME }}/dolt:${{ github.event.inputs.version || github.event.client_payload.version }} , ${{ secrets.DOCKER_HUB_USERNAME }}/dolt:latest
build-args: |
DOLT_VERSION=${{ github.event.inputs.version || github.event.client_payload.version }}

View File

@@ -37,10 +37,15 @@ jobs:
env:
FILE: ${{ format('{0}/go/cmd/dolt/dolt.go', github.workspace) }}
NEW_VERSION: ${{ needs.format-version.outputs.version }}
- name: Update Dockerfile
run: sed -i -e 's/ARG DOLT_VERSION=.*/ARG DOLT_VERSION='"$NEW_VERSION"'/' "$FILE"
env:
FILE: ${{ format('{0}/docker/Dockerfile', github.workspace) }}
NEW_VERSION: ${{ needs.format-version.outputs.version }}
- uses: EndBug/add-and-commit@v7
with:
message: ${{ format('[ga-bump-release] Update Dolt version to {0} and release v{0}', needs.format-version.outputs.version) }}
add: ${{ format('{0}/go/cmd/dolt/dolt.go', github.workspace) }}
add: ${{ format('[{0}/go/cmd/dolt/dolt.go, {0}/docker/Dockerfile]', github.workspace) }}
cwd: "."
- name: Build Binaries
id: build_binaries
@@ -164,14 +169,3 @@ jobs:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: release-dolt
client-payload: '{"version": "${{ needs.format-version.outputs.version }}", "actor": "${{ github.actor }}"}'
docker-image-push:
needs: [ format-version, create-release ]
runs-on: ubuntu-22.04
steps:
- name: Trigger Push Docker Image
uses: peter-evans/repository-dispatch@v1
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
event-type: push-docker-image
client-payload: '{"version": "${{ needs.format-version.outputs.version }}"}'

View File

@@ -1,7 +1,7 @@
# syntax=docker/dockerfile:1.3-labs
FROM --platform=linux/amd64 ubuntu:22.04
ARG DOLT_VERSION
ARG DOLT_VERSION=0.50.5
ADD https://github.com/dolthub/dolt/releases/download/v${DOLT_VERSION}/dolt-linux-amd64.tar.gz dolt-linux-amd64.tar.gz
RUN tar zxvf dolt-linux-amd64.tar.gz && \

View File

@@ -40,9 +40,21 @@ var cfgDocs = cli.CommandDocumentationContent{
ShortDesc: `Get and set repository or global options`,
LongDesc: `You can query/set/replace/unset options with this command.
When reading, the values are read from the global and repository local configuration files, and options {{.LessThan}}--global{{.GreaterThan}}, and {{.LessThan}}--local{{.GreaterThan}} can be used to tell the command to read from only that location.
When writing, the new value is written to the repository local configuration file by default, and options {{.LessThan}}--global{{.GreaterThan}}, can be used to tell the command to write to that location (you can say {{.LessThan}}--local{{.GreaterThan}} but that is the default).
When reading, the values are read from the global and repository local configuration files, and options {{.LessThan}}--global{{.GreaterThan}}, and {{.LessThan}}--local{{.GreaterThan}} can be used to tell the command to read from only that location.
When writing, the new value is written to the repository local configuration file by default, and options {{.LessThan}}--global{{.GreaterThan}}, can be used to tell the command to write to that location (you can say {{.LessThan}}--local{{.GreaterThan}} but that is the default).
Valid configuration variables:
- core.editor - lets you edit 'commit' or 'tag' messages by launching the set editor.
- creds.add_url - sets the endpoint used to authenticate a client for 'dolt login'.
- doltlab.insecure - boolean flag used to authenticate a client against DoltLab.
- init.defaultbranch - allows overriding the default branch name e.g. when initializing a new repository.
- metrics.disabled - boolean flag disables sending metrics when true.
- user.creds - sets user keypairs for authenticating with doltremoteapi
- user.email - sets name used in the author and committer field of commit objects
- user.name - sets email used in the author and committer field of commit objects
- remotes.default_host - sets default host for authenticating eith doltremoteapi
- remotes.default_port - sets default port for authenticating eith doltremoteapi
`,
Synopsis: []string{

View File

@@ -187,9 +187,8 @@ func secondsSince(start time.Time) float64 {
// nullWriter is a no-op SqlRowWriter implementation
type nullWriter struct{}
func (n nullWriter) WriteRow(ctx context.Context, r row.Row) error { return nil }
func (n nullWriter) Close(ctx context.Context) error { return nil }
func (n nullWriter) WriteSqlRow(ctx context.Context, r sql.Row) error { return nil }
func (n nullWriter) Close(ctx context.Context) error { return nil }
func printEmptySetResult(start time.Time) {
seconds := secondsSince(start)

View File

@@ -57,7 +57,7 @@ import (
)
const (
Version = "0.50.3"
Version = "0.50.5"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}

View File

@@ -57,7 +57,7 @@ require (
require (
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible
github.com/cenkalti/backoff/v4 v4.1.3
github.com/dolthub/go-mysql-server v0.12.1-0.20221012045247-2eb37b2d80e0
github.com/dolthub/go-mysql-server v0.12.1-0.20221014214651-c2bf09248ff9
github.com/google/flatbuffers v2.0.6+incompatible
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/mitchellh/go-ps v1.0.0

View File

@@ -178,8 +178,8 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20221012045247-2eb37b2d80e0 h1:lmmFbgh51CHtwM+skbDuzN3j7fdgkLsn0omS1C0TZxA=
github.com/dolthub/go-mysql-server v0.12.1-0.20221012045247-2eb37b2d80e0/go.mod h1:9Q9FhWO82GrV4he13V2ZuDE0T/eDZbPVMOWLcZluOvg=
github.com/dolthub/go-mysql-server v0.12.1-0.20221014214651-c2bf09248ff9 h1:xGTHflXQ6gWZFvgXIyHHrBtmIEWoLEc70dSY6SGPl/g=
github.com/dolthub/go-mysql-server v0.12.1-0.20221014214651-c2bf09248ff9/go.mod h1:9Q9FhWO82GrV4he13V2ZuDE0T/eDZbPVMOWLcZluOvg=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=

View File

@@ -20,13 +20,9 @@ import (
"encoding/binary"
"errors"
"fmt"
"io"
"sort"
"strings"
"github.com/dolthub/dolt/go/libraries/doltcore/rowconv"
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/utils/set"
@@ -677,130 +673,6 @@ func (refOp ForeignKeyReferentialAction) ReducedString() string {
}
}
// ValidateData ensures that the foreign key is valid by comparing the index data from the given table
// against the index data from the referenced table. Returns an error for each violation.
func (fk ForeignKey) ValidateData(
ctx context.Context,
childSch schema.Schema,
childRowData, childIdxData, parentIdxData types.Map,
childDef, parentDef schema.Index,
vrw types.ValueReadWriter,
) error {
// Any unresolved foreign key does not yet reference any indexes, therefore there's no need to error here.
// Data will be validated whenever the foreign key is resolved.
if !fk.IsResolved() {
return nil
}
if fk.ReferencedTableIndex != parentDef.Name() {
return fmt.Errorf("cannot validate data as wrong referenced index was given: expected `%s` but received `%s`",
fk.ReferencedTableIndex, parentDef.Name())
}
tagMap := make(map[uint64]uint64, len(fk.TableColumns))
for i, childTag := range fk.TableColumns {
tagMap[childTag] = fk.ReferencedTableColumns[i]
}
// FieldMappings ignore columns not in the tagMap
fm, err := rowconv.NewFieldMapping(childDef.Schema(), parentDef.Schema(), tagMap)
if err != nil {
return err
}
rc, err := rowconv.NewRowConverter(ctx, vrw, fm)
if err != nil {
return err
}
rdr, err := noms.NewNomsMapReader(ctx, childIdxData, childDef.Schema())
if err != nil {
return err
}
var violatingRows []row.Row
for {
childIdxRow, err := rdr.ReadRow(ctx)
if err == io.EOF {
break
}
if err != nil {
return err
}
// Check if there are any NULL values, as they should be skipped
hasNulls := false
_, err = childIdxRow.IterSchema(childDef.Schema(), func(tag uint64, val types.Value) (stop bool, err error) {
if types.IsNull(val) {
hasNulls = true
return true, nil
}
return false, nil
})
if err != nil {
return err
}
if hasNulls {
continue
}
parentIdxRow, err := rc.Convert(childIdxRow)
if err != nil {
return err
}
if row.IsEmpty(parentIdxRow) {
continue
}
partial, err := row.ReduceToIndexPartialKey(parentDef, parentIdxRow)
if err != nil {
return err
}
indexIter := noms.NewNomsRangeReader(parentDef.Schema(), parentIdxData,
[]*noms.ReadRange{{Start: partial, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partial)}},
)
switch _, err = indexIter.ReadRow(ctx); err {
case nil:
continue // parent table contains child key
case io.EOF:
childFullKey, err := childIdxRow.NomsMapKey(childDef.Schema()).Value(ctx)
if err != nil {
return err
}
childKey, err := childDef.ToTableTuple(ctx, childFullKey.(types.Tuple), childIdxRow.Format())
if err != nil {
return err
}
childVal, ok, err := childRowData.MaybeGetTuple(ctx, childKey)
if err != nil {
return err
}
if !ok {
childKeyStr, _ := types.EncodedValue(ctx, childKey)
return fmt.Errorf("could not find row value for key %s on table `%s`", childKeyStr, fk.TableName)
}
childRow, err := row.FromNoms(childSch, childKey, childVal)
if err != nil {
return err
}
violatingRows = append(violatingRows, childRow)
default:
return err
}
}
if len(violatingRows) == 0 {
return nil
} else {
return &ForeignKeyViolationError{
ForeignKey: fk,
Schema: childSch,
ViolationRows: violatingRows,
}
}
}
// ColumnHasFkRelationship returns a foreign key that uses this tag. Returns n
func (fkc *ForeignKeyCollection) ColumnHasFkRelationship(tag uint64) (ForeignKey, bool) {
for _, key := range fkc.AllKeys() {

View File

@@ -379,32 +379,6 @@ func setConflicts(ctx context.Context, cons durable.ConflictIndex, tbl, mergeTbl
return tableToUpdate, nil
}
func getTableInfoFromRoot(ctx context.Context, tblName string, root *doltdb.RootValue) (
ok bool,
table *doltdb.Table,
sch schema.Schema,
h hash.Hash,
err error,
) {
table, ok, err = root.GetTable(ctx, tblName)
if err != nil {
return false, nil, nil, hash.Hash{}, err
}
if ok {
h, err = table.HashOf()
if err != nil {
return false, nil, nil, hash.Hash{}, err
}
sch, err = table.GetSchema(ctx)
if err != nil {
return false, nil, nil, hash.Hash{}, err
}
}
return ok, table, sch, h, nil
}
func calcTableMergeStats(ctx context.Context, tbl *doltdb.Table, mergeTbl *doltdb.Table) (MergeStats, error) {
ms := MergeStats{Operation: TableModified}

View File

@@ -289,10 +289,6 @@ func createCVsForPartialKeyMatches(
return createdViolation, nil
}
func makePartialDescriptor(desc val.TupleDesc, n int) val.TupleDesc {
return val.NewTupleDescriptor(desc.Types[:n]...)
}
func makePartialKey(kb *val.TupleBuilder, idxSch schema.Index, tblSch schema.Schema, k, v val.Tuple, pool pool.BuffPool) (val.Tuple, bool) {
for i, tag := range idxSch.IndexedColumnTags() {
if j, ok := tblSch.GetPKCols().TagToIdx[tag]; ok {

View File

@@ -284,6 +284,10 @@ func translateGeometryField(value types.Value, idx int, b *val.TupleBuilder) {
l := types.ConvertTypesMultiLineStringToSQLMultiLineString(value.(types.MultiLineString))
b.PutGeometry(idx, l.Serialize())
case types.MultiPolygonKind:
p := types.ConvertTypesMultiPolygonToSQLMultiPolygon(value.(types.MultiPolygon))
b.PutGeometry(idx, p.Serialize())
default:
panic(fmt.Sprintf("unexpected NomsKind for geometry (%d)", nk))
}

View File

@@ -240,6 +240,7 @@ func nomsKindsFromQueryTypes(qt query.Type) []types.NomsKind {
types.PolygonKind,
types.MultiPointKind,
types.MultiLineStringKind,
types.MultiPolygonKind,
}
case query.Type_JSON:

View File

@@ -21,17 +21,12 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/libraries/utils/set"
)
// ErrMappingFileRead is an error returned when a mapping file cannot be read
var ErrMappingFileRead = errors.New("error reading mapping file")
// ErrUnmarshallingMapping is an error used when a mapping file cannot be converted from json
var ErrUnmarshallingMapping = errors.New("error unmarshalling mapping")
// ErrEmptyMapping is an error returned when the mapping is empty (No src columns, no destination columns)
var ErrEmptyMapping = errors.New("empty mapping error")
@@ -46,12 +41,6 @@ func (err *BadMappingErr) Error() string {
return fmt.Sprintf("Mapping file attempted to map %s to %s, but one or both of those fields are unknown.", err.srcField, err.destField)
}
// IsBadMappingErr returns true if the error is a BadMappingErr
func IsBadMappingErr(err error) bool {
_, ok := err.(*BadMappingErr)
return ok
}
// NameMapper is a simple interface for mapping a string to another string
type NameMapper map[string]string
@@ -86,34 +75,6 @@ type FieldMapping struct {
SrcToDest map[uint64]uint64
}
// MapsAllDestPKs checks that each PK column in DestSch has a corresponding column in SrcSch
func (fm *FieldMapping) MapsAllDestPKs() bool {
ds := set.NewUint64Set(nil)
for _, v := range fm.SrcToDest {
ds.Add(v)
}
for _, tag := range fm.DestSch.GetPKCols().Tags {
if !ds.Contains(tag) {
return false
}
}
return true
}
func InvertMapping(fm *FieldMapping) *FieldMapping {
invertedMap := make(map[uint64]uint64)
for k, v := range fm.SrcToDest {
invertedMap[v] = k
}
return &FieldMapping{
SrcSch: fm.DestSch,
DestSch: fm.SrcSch,
SrcToDest: invertedMap,
}
}
// NewFieldMapping creates a FieldMapping from a source schema, a destination schema, and a map from tags in the source
// schema to tags in the dest schema.
func NewFieldMapping(srcSch, destSch schema.Schema, srcTagToDestTag map[uint64]uint64) (*FieldMapping, error) {
@@ -134,15 +95,6 @@ func NewFieldMapping(srcSch, destSch schema.Schema, srcTagToDestTag map[uint64]u
return &FieldMapping{srcSch, destSch, srcTagToDestTag}, nil
}
// Returns the identity mapping for the schema given.
func IdentityMapping(sch schema.Schema) *FieldMapping {
fieldMapping, err := TagMapping(sch, sch)
if err != nil {
panic("Error creating identity mapping")
}
return fieldMapping
}
// TagMapping takes a source schema and a destination schema and maps all columns which have a matching tag in the
// source and destination schemas.
func TagMapping(srcSch, destSch schema.Schema) (*FieldMapping, error) {
@@ -226,41 +178,6 @@ func NameMapperFromFile(mappingFile string, FS filesys.ReadableFS) (NameMapper,
return nm, nil
}
// TagMappingWithNameFallback takes a source schema and a destination schema and maps columns
// by matching tags first, then attempts to match by column name for any columns that didn't
// match with an exact tag.
func TagMappingWithNameFallback(srcSch, destSch schema.Schema) (*FieldMapping, error) {
successes := 0
srcCols := srcSch.GetAllCols()
destCols := destSch.GetAllCols()
srcToDest := make(map[uint64]uint64, destCols.Size())
err := destCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
srcCol, ok := srcCols.GetByTag(tag)
if !ok {
srcCol, ok = srcCols.GetByName(col.Name)
if !ok {
return false, nil
}
}
srcToDest[srcCol.Tag] = col.Tag
successes++
return false, nil
})
if err != nil {
return nil, err
}
if successes == 0 {
return nil, ErrEmptyMapping
}
return NewFieldMapping(srcSch, destSch, srcToDest)
}
// TagMappingByTagAndName takes a source schema and a destination schema and maps
// pks by tag and non-pks by name.
func TagMappingByTagAndName(srcSch, destSch schema.Schema) (*FieldMapping, error) {
@@ -276,8 +193,8 @@ func TagMappingByTagAndName(srcSch, destSch schema.Schema) (*FieldMapping, error
if j == -1 {
continue
}
srcTag := srcSch.GetPKCols().GetAtIndex(i).Tag
dstTag := destSch.GetPKCols().GetAtIndex(j).Tag
srcTag := srcSch.GetPKCols().GetByIndex(i).Tag
dstTag := destSch.GetPKCols().GetByIndex(j).Tag
srcToDest[srcTag] = dstTag
successes++
}
@@ -285,8 +202,8 @@ func TagMappingByTagAndName(srcSch, destSch schema.Schema) (*FieldMapping, error
if j == -1 {
continue
}
srcTag := srcSch.GetNonPKCols().GetAtIndex(i).Tag
dstTag := destSch.GetNonPKCols().GetAtIndex(j).Tag
srcTag := srcSch.GetNonPKCols().GetByIndex(i).Tag
dstTag := destSch.GetNonPKCols().GetByIndex(j).Tag
srcToDest[srcTag] = dstTag
successes++
}
@@ -297,29 +214,3 @@ func TagMappingByTagAndName(srcSch, destSch schema.Schema) (*FieldMapping, error
return NewFieldMapping(srcSch, destSch, srcToDest)
}
// TypedToUntypedMapping takes a schema and creates a mapping to an untyped schema with all the same columns.
func TypedToUntypedMapping(sch schema.Schema) (*FieldMapping, error) {
untypedSch, err := untyped.UntypeSchema(sch)
if err != nil {
return nil, err
}
identityMap := make(map[uint64]uint64)
err = sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
identityMap[tag] = tag
return false, nil
})
if err != nil {
return nil, err
}
mapping, err := NewFieldMapping(sch, untypedSch, identityMap)
if err != nil {
panic(err)
}
return mapping, nil
}

View File

@@ -105,10 +105,6 @@ func TestFieldMapping(t *testing.T) {
if !reflect.DeepEqual(mapping.SrcToDest, test.expected) {
t.Error("Mapping does not match expected. Expected:", test.expected, "Actual:", mapping.SrcToDest)
}
//if test.identity != mapping.IsIdentityMapping() {
// t.Error("identity expected", test.identity, "actual:", !test.identity)
//}
}
}
}

View File

@@ -100,12 +100,6 @@ func (rc *RowConverter) ConvertWithWarnings(inRow row.Row, warnFn WarnFunction)
return rc.convert(inRow, warnFn)
}
// Convert takes an input row, maps its columns to destination columns, and performs any type conversion needed to
// create a row of the expected destination schema.
func (rc *RowConverter) Convert(inRow row.Row) (row.Row, error) {
return rc.convert(inRow, nil)
}
// convert takes a row and maps its columns to their destination columns, automatically performing any type conversion
// needed, and using the optional WarnFunction to let callers log warnings on any type conversion errors.
func (rc *RowConverter) convert(inRow row.Row, warnFn WarnFunction) (row.Row, error) {

View File

@@ -17,12 +17,7 @@ package rowconv
import (
"context"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/types"
)
@@ -39,52 +34,6 @@ var srcCols = schema.NewColCollection(
var srcSch = schema.MustSchemaFromCols(srcCols)
func TestRowConverter(t *testing.T) {
mapping, err := TypedToUntypedMapping(srcSch)
require.NoError(t, err)
vrw := types.NewMemoryValueStore()
rConv, err := NewRowConverter(context.Background(), vrw, mapping)
if err != nil {
t.Fatal("Error creating row converter")
}
id, _ := uuid.NewRandom()
tt := types.Timestamp(time.Now())
inRow, err := row.New(vrw.Format(), srcSch, row.TaggedValues{
0: types.UUID(id),
1: types.Float(1.25),
2: types.Uint(12345678),
3: types.Bool(true),
4: types.Int(-1234),
5: types.String("string string string"),
6: tt,
})
require.NoError(t, err)
outData, err := rConv.Convert(inRow)
require.NoError(t, err)
destSch := mapping.DestSch
expected, err := row.New(vrw.Format(), destSch, row.TaggedValues{
0: types.String(id.String()),
1: types.String("1.25"),
2: types.String("12345678"),
3: types.String("1"),
4: types.String("-1234"),
5: types.String("string string string"),
6: types.String(tt.String()),
})
require.NoError(t, err)
if !row.AreEqual(outData, expected, destSch) {
t.Error("\n", row.Fmt(context.Background(), expected, destSch), "!=\n", row.Fmt(context.Background(), outData, destSch))
}
}
func TestUnneccessaryConversion(t *testing.T) {
mapping, err := TagMapping(srcSch, srcSch)
if err != nil {

View File

@@ -118,10 +118,6 @@ func (cc *ColCollection) GetColumns() []Column {
return colsCopy
}
func (cc *ColCollection) GetAtIndex(i int) Column {
return cc.cols[i]
}
// GetColumnNames returns a list of names of the columns.
func (cc *ColCollection) GetColumnNames() []string {
names := make([]string, len(cc.cols))
@@ -177,7 +173,7 @@ func (cc *ColCollection) Iter(cb func(tag uint64, col Column) (stop bool, err er
return nil
}
// IterInSortOrder iterates over all the columns from lowest tag to highest tag.
// IterInSortedOrder iterates over all the columns from lowest tag to highest tag.
func (cc *ColCollection) IterInSortedOrder(cb func(tag uint64, col Column) (stop bool)) {
for _, tag := range cc.SortedTags {
val := cc.TagToCol[tag]
@@ -199,7 +195,7 @@ func (cc *ColCollection) GetByName(name string) (Column, bool) {
return InvalidCol, false
}
// GetByNameCaseInensitive takes the name of a column and returns the column and true if there is a column with that
// GetByNameCaseInsensitive takes the name of a column and returns the column and true if there is a column with that
// name ignoring case. Otherwise InvalidCol and false are returned. If multiple columns have the same case-insensitive
// name, the first declared one is returned.
func (cc *ColCollection) GetByNameCaseInsensitive(name string) (Column, bool) {
@@ -245,39 +241,15 @@ func ColCollsAreEqual(cc1, cc2 *ColCollection) bool {
if cc1.Size() != cc2.Size() {
return false
}
// Pks Cols need to be in the same order and equivalent.
for i := 0; i < cc1.Size(); i++ {
if !cc1.GetAtIndex(i).Equals(cc2.GetAtIndex(i)) {
if !cc1.cols[i].Equals(cc2.cols[i]) {
return false
}
}
return true
}
// ColCollsAreCompatible determines whether two ColCollections are compatible with each other. Compatible columns have
// the same tags and storage types, but may have different names, constraints or SQL type parameters.
func ColCollsAreCompatible(cc1, cc2 *ColCollection) bool {
if cc1.Size() != cc2.Size() {
return false
}
areCompatible := true
_ = cc1.Iter(func(tag uint64, col1 Column) (stop bool, err error) {
col2, ok := cc2.GetByTag(tag)
if !ok || !col1.Compatible(col2) {
areCompatible = false
return true, nil
}
return false, nil
})
return areCompatible
}
// MapColCollection applies a function to each column in a ColCollection and creates a new ColCollection from the results.
func MapColCollection(cc *ColCollection, cb func(col Column) Column) *ColCollection {
mapped := make([]Column, cc.Size())

View File

@@ -209,8 +209,8 @@ func ArePrimaryKeySetsDiffable(format *types.NomsBinFormat, fromSch, toSch Schem
}
for i := 0; i < cc1.Size(); i++ {
c1 := cc1.GetAtIndex(i)
c2 := cc2.GetAtIndex(i)
c1 := cc1.GetByIndex(i)
c2 := cc2.GetByIndex(i)
if (c1.Tag != c2.Tag) || (c1.IsPartOfPK != c2.IsPartOfPK) {
return false
}

View File

@@ -222,6 +222,8 @@ func bitTypeConverter(ctx context.Context, src *bitType, destTi TypeInfo) (tc Ty
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -252,6 +252,8 @@ func blobStringTypeConverter(ctx context.Context, src *blobStringType, destTi Ty
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -239,6 +239,8 @@ func boolTypeConverter(ctx context.Context, src *boolType, destTi TypeInfo) (tc
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -233,6 +233,8 @@ func datetimeTypeConverter(ctx context.Context, src *datetimeType, destTi TypeIn
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -272,6 +272,8 @@ func decimalTypeConverter(ctx context.Context, src *decimalType, destTi TypeInfo
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -239,6 +239,8 @@ func enumTypeConverter(ctx context.Context, src *enumType, destTi TypeInfo) (tc
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -263,6 +263,8 @@ func floatTypeConverter(ctx context.Context, src *floatType, destTi TypeInfo) (t
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -55,6 +55,8 @@ func (ti *geometryType) ConvertNomsValueToValue(v types.Value) (interface{}, err
return types.ConvertTypesMultiPointToSQLMultiPoint(val), nil
case types.MultiLineString:
return types.ConvertTypesMultiLineStringToSQLMultiLineString(val), nil
case types.MultiPolygon:
return types.ConvertTypesMultiPolygonToSQLMultiPolygon(val), nil
default:
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), v.Kind())
}
@@ -87,6 +89,10 @@ func (ti *geometryType) ReadFrom(nbf *types.NomsBinFormat, reader types.CodecRea
if val, err = reader.ReadMultiLineString(); err != nil {
return nil, err
}
case types.MultiPolygonKind:
if val, err = reader.ReadMultiPolygon(); err != nil {
return nil, err
}
case types.GeometryKind:
// Note: GeometryKind is no longer written
// included here for backward compatibility
@@ -148,6 +154,8 @@ func (ti *geometryType) FormatValue(v types.Value) (*string, error) {
return MultiPointType.FormatValue(val)
case types.MultiLineString:
return MultiLineStringType.FormatValue(val)
case types.MultiPolygon:
return MultiPolygonType.FormatValue(val)
case types.Geometry:
switch inner := val.Inner.(type) {
case types.Point:
@@ -160,6 +168,8 @@ func (ti *geometryType) FormatValue(v types.Value) (*string, error) {
return MultiPointType.FormatValue(inner)
case types.MultiLineString:
return MultiLineStringType.FormatValue(inner)
case types.MultiPolygon:
return MultiPolygonType.FormatValue(val)
default:
return nil, fmt.Errorf(`"%v" has unexpectedly encountered a value of type "%T" from embedded type`, ti.String(), v.Kind())
}
@@ -189,7 +199,10 @@ func (ti *geometryType) IsValid(v types.Value) bool {
case types.Geometry,
types.Point,
types.LineString,
types.Polygon:
types.Polygon,
types.MultiPoint,
types.MultiLineString,
types.MultiPolygon:
return true
default:
return false
@@ -249,6 +262,8 @@ func geometryTypeConverter(ctx context.Context, src *geometryType, destTi TypeIn
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -253,6 +253,8 @@ func inlineBlobTypeConverter(ctx context.Context, src *inlineBlobType, destTi Ty
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -291,6 +291,8 @@ func intTypeConverter(ctx context.Context, src *intType, destTi TypeInfo) (tc Ty
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -181,6 +181,8 @@ func jsonTypeConverter(ctx context.Context, src *jsonType, destTi TypeInfo) (tc
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -181,6 +181,8 @@ func linestringTypeConverter(ctx context.Context, src *linestringType, destTi Ty
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -181,6 +181,8 @@ func multilinestringTypeConverter(ctx context.Context, src *multilinestringType,
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -181,6 +181,8 @@ func multipointTypeConverter(ctx context.Context, src *multipointType, destTi Ty
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -0,0 +1,229 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package typeinfo
import (
"context"
"fmt"
"strconv"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/store/types"
)
// This is a dolt implementation of the MySQL type Point, thus most of the functionality
// within is directly reliant on the go-mysql-server implementation.
type multipolygonType struct {
sqlMultiPolygonType sql.MultiPolygonType
}
var _ TypeInfo = (*multipolygonType)(nil)
var MultiPolygonType = &multipolygonType{sql.MultiPolygonType{}}
// ConvertNomsValueToValue implements TypeInfo interface.
func (ti *multipolygonType) ConvertNomsValueToValue(v types.Value) (interface{}, error) {
// Check for null
if _, ok := v.(types.Null); ok || v == nil {
return nil, nil
}
// Expect a types.MultiPolygon, return a sql.MultiPolygon
if val, ok := v.(types.MultiPolygon); ok {
return types.ConvertTypesMultiPolygonToSQLMultiPolygon(val), nil
}
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), v.Kind())
}
// ReadFrom reads a go value from a noms types.CodecReader directly
func (ti *multipolygonType) ReadFrom(nbf *types.NomsBinFormat, reader types.CodecReader) (interface{}, error) {
k := reader.ReadKind()
switch k {
case types.MultiPolygonKind:
p, err := reader.ReadMultiPolygon()
if err != nil {
return nil, err
}
return ti.ConvertNomsValueToValue(p)
case types.NullKind:
return nil, nil
}
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), k)
}
// ConvertValueToNomsValue implements TypeInfo interface.
func (ti *multipolygonType) ConvertValueToNomsValue(ctx context.Context, vrw types.ValueReadWriter, v interface{}) (types.Value, error) {
// Check for null
if v == nil {
return types.NullValue, nil
}
// Convert to sql.MultiPolygon
mpoly, err := ti.sqlMultiPolygonType.Convert(v)
if err != nil {
return nil, err
}
return types.ConvertSQLMultiPolygonToTypesMultiPolygon(mpoly.(sql.MultiPolygon)), nil
}
// Equals implements TypeInfo interface.
func (ti *multipolygonType) Equals(other TypeInfo) bool {
if other == nil {
return false
}
if o, ok := other.(*multipolygonType); ok {
// if either ti or other has defined SRID, then check SRID value; otherwise,
return (!ti.sqlMultiPolygonType.DefinedSRID && !o.sqlMultiPolygonType.DefinedSRID) || ti.sqlMultiPolygonType.SRID == o.sqlMultiPolygonType.SRID
}
return false
}
// FormatValue implements TypeInfo interface.
func (ti *multipolygonType) FormatValue(v types.Value) (*string, error) {
if val, ok := v.(types.MultiPolygon); ok {
resStr := string(types.SerializeMultiPolygon(val))
return &resStr, nil
}
if _, ok := v.(types.Null); ok || v == nil {
return nil, nil
}
return nil, fmt.Errorf(`"%v" has unexpectedly encountered a value of type "%T" from embedded type`, ti.String(), v.Kind())
}
// GetTypeIdentifier implements TypeInfo interface.
func (ti *multipolygonType) GetTypeIdentifier() Identifier {
return MultiPolygonTypeIdentifier
}
// GetTypeParams implements TypeInfo interface.
func (ti *multipolygonType) GetTypeParams() map[string]string {
return map[string]string{"SRID": strconv.FormatUint(uint64(ti.sqlMultiPolygonType.SRID), 10),
"DefinedSRID": strconv.FormatBool(ti.sqlMultiPolygonType.DefinedSRID)}
}
// IsValid implements TypeInfo interface.
func (ti *multipolygonType) IsValid(v types.Value) bool {
if _, ok := v.(types.MultiPolygon); ok {
return true
}
if _, ok := v.(types.Null); ok || v == nil {
return true
}
return false
}
// NomsKind implements TypeInfo interface.
func (ti *multipolygonType) NomsKind() types.NomsKind {
return types.MultiPolygonKind
}
// Promote implements TypeInfo interface.
func (ti *multipolygonType) Promote() TypeInfo {
return &multipolygonType{ti.sqlMultiPolygonType.Promote().(sql.MultiPolygonType)}
}
// String implements TypeInfo interface.
func (ti *multipolygonType) String() string {
return "MultiPolygon"
}
// ToSqlType implements TypeInfo interface.
func (ti *multipolygonType) ToSqlType() sql.Type {
return ti.sqlMultiPolygonType
}
// multipolygonTypeConverter is an internal function for GetTypeConverter that handles the specific type as the source TypeInfo.
func multipolygonTypeConverter(ctx context.Context, src *multipolygonType, destTi TypeInfo) (tc TypeConverter, needsConversion bool, err error) {
switch dest := destTi.(type) {
case *bitType:
return func(ctx context.Context, vrw types.ValueReadWriter, v types.Value) (types.Value, error) {
return types.Uint(0), nil
}, true, nil
case *blobStringType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *boolType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *datetimeType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *decimalType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *enumType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *floatType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *geometryType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *inlineBlobType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *intType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *jsonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *linestringType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multilinestringType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *setType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *timeType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *uintType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *uuidType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *varBinaryType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *varStringType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *yearType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
default:
return nil, false, UnhandledTypeConversion.New(src.String(), destTi.String())
}
}
func CreateMultiPolygonTypeFromParams(params map[string]string) (TypeInfo, error) {
var (
err error
sridVal uint64
def bool
)
if s, ok := params["SRID"]; ok {
sridVal, err = strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
}
if d, ok := params["DefinedSRID"]; ok {
def, err = strconv.ParseBool(d)
if err != nil {
return nil, err
}
}
return &multipolygonType{sqlMultiPolygonType: sql.MultiPolygonType{SRID: uint32(sridVal), DefinedSRID: def}}, nil
}

View File

@@ -181,6 +181,8 @@ func pointTypeConverter(ctx context.Context, src *pointType, destTi TypeInfo) (t
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -181,6 +181,8 @@ func polygonTypeConverter(ctx context.Context, src *polygonType, destTi TypeInfo
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -222,6 +222,8 @@ func setTypeConverter(ctx context.Context, src *setType, destTi TypeInfo) (tc Ty
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -164,6 +164,8 @@ func timeTypeConverter(ctx context.Context, src *timeType, destTi TypeInfo) (tc
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -79,6 +79,8 @@ func GetTypeConverter(ctx context.Context, srcTi TypeInfo, destTi TypeInfo) (tc
return multilinestringTypeConverter(ctx, src, destTi)
case *multipointType:
return multipointTypeConverter(ctx, src, destTi)
case *multipolygonType:
return multipolygonTypeConverter(ctx, src, destTi)
case *pointType:
return pointTypeConverter(ctx, src, destTi)
case *polygonType:

View File

@@ -53,6 +53,7 @@ const (
PolygonTypeIdentifier Identifier = "polygon"
MultiPointTypeIdentifier Identifier = "multipoint"
MultiLineStringTypeIdentifier Identifier = "multilinestring"
MultiPolygonTypeIdentifier Identifier = "multipolygon"
)
var Identifiers = map[Identifier]struct{}{
@@ -81,6 +82,7 @@ var Identifiers = map[Identifier]struct{}{
PolygonTypeIdentifier: {},
MultiPointTypeIdentifier: {},
MultiLineStringTypeIdentifier: {},
MultiPolygonTypeIdentifier: {},
}
// TypeInfo is an interface used for encoding type information.
@@ -179,6 +181,8 @@ func FromSqlType(sqlType sql.Type) (TypeInfo, error) {
return &multipointType{}, nil
case sql.MultiLineStringType{}.String():
return &multilinestringType{}, nil
case sql.MultiPolygonType{}.String():
return &multipolygonType{}, nil
case sql.GeometryType{}.String():
return &geometryType{sqlGeometryType: sqlType.(sql.GeometryType)}, nil
default:
@@ -290,6 +294,8 @@ func FromTypeParams(id Identifier, params map[string]string) (TypeInfo, error) {
return CreateMultiPointTypeFromParams(params)
case MultiLineStringTypeIdentifier:
return CreateMultiLineStringTypeFromParams(params)
case MultiPolygonTypeIdentifier:
return CreateMultiPolygonTypeFromParams(params)
case SetTypeIdentifier:
return CreateSetTypeFromParams(params)
case TimeTypeIdentifier:

View File

@@ -359,6 +359,7 @@ func generateTypeInfoArrays(t *testing.T) ([][]TypeInfo, [][]types.Value) {
{PolygonType},
{MultiPointType},
{MultiLineStringType},
{MultiPolygonType},
{GeometryType},
generateSetTypes(t, 16),
{TimeType},
@@ -394,9 +395,10 @@ func generateTypeInfoArrays(t *testing.T) ([][]TypeInfo, [][]types.Value) {
json.MustTypesJSON(`false`), json.MustTypesJSON(`{"a": 1, "b": []}`)}, //JSON
{types.LineString{SRID: 0, Points: []types.Point{{SRID: 0, X: 1, Y: 2}, {SRID: 0, X: 3, Y: 4}}}}, // LineString
{types.Point{SRID: 0, X: 1, Y: 2}}, // Point
{types.Polygon{SRID: 0, Lines: []types.LineString{{SRID: 0, Points: []types.Point{{SRID: 0, X: 0, Y: 0}, {SRID: 0, X: 0, Y: 1}, {SRID: 0, X: 1, Y: 1}, {SRID: 0, X: 0, Y: 0}}}}}}, // Polygon
{types.MultiPoint{SRID: 0, Points: []types.Point{{SRID: 0, X: 1, Y: 2}, {SRID: 0, X: 3, Y: 4}}}}, // MultiPoint
{types.MultiLineString{SRID: 0, Lines: []types.LineString{{SRID: 0, Points: []types.Point{{SRID: 0, X: 0, Y: 0}, {SRID: 0, X: 0, Y: 1}, {SRID: 0, X: 1, Y: 1}, {SRID: 0, X: 0, Y: 0}}}}}}, // Polygon
{types.Polygon{SRID: 0, Lines: []types.LineString{{SRID: 0, Points: []types.Point{{SRID: 0, X: 0, Y: 0}, {SRID: 0, X: 0, Y: 1}, {SRID: 0, X: 1, Y: 1}, {SRID: 0, X: 0, Y: 0}}}}}}, // Polygon
{types.MultiPoint{SRID: 0, Points: []types.Point{{SRID: 0, X: 1, Y: 2}, {SRID: 0, X: 3, Y: 4}}}}, // MultiPoint
{types.MultiLineString{SRID: 0, Lines: []types.LineString{{SRID: 0, Points: []types.Point{{SRID: 0, X: 0, Y: 0}, {SRID: 0, X: 0, Y: 1}, {SRID: 0, X: 1, Y: 1}, {SRID: 0, X: 0, Y: 0}}}}}}, // MultiLineString
{types.MultiPolygon{SRID: 0, Polygons: []types.Polygon{{SRID: 0, Lines: []types.LineString{{SRID: 0, Points: []types.Point{{SRID: 0, X: 0, Y: 0}, {SRID: 0, X: 0, Y: 1}, {SRID: 0, X: 1, Y: 1}, {SRID: 0, X: 0, Y: 0}}}}}}}}, // MultiPolygon
{types.Geometry{Inner: types.Point{SRID: 0, X: 1, Y: 2}}}, // Geometry holding a Point
{types.Uint(1), types.Uint(5), types.Uint(64), types.Uint(42), types.Uint(192)}, //Set
{types.Int(0), types.Int(1000000 /*"00:00:01"*/), types.Int(113000000 /*"00:01:53"*/), types.Int(247019000000 /*"68:36:59"*/), types.Int(458830485214 /*"127:27:10.485214"*/)}, //Time

View File

@@ -291,6 +291,8 @@ func uintTypeConverter(ctx context.Context, src *uintType, destTi TypeInfo) (tc
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -169,6 +169,8 @@ func uuidTypeConverter(ctx context.Context, src *uuidType, destTi TypeInfo) (tc
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -281,6 +281,8 @@ func varBinaryTypeConverter(ctx context.Context, src *varBinaryType, destTi Type
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -304,6 +304,8 @@ func varStringTypeConverter(ctx context.Context, src *varStringType, destTi Type
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -180,6 +180,8 @@ func yearTypeConverter(ctx context.Context, src *yearType, destTi TypeInfo) (tc
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *multipolygonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:

View File

@@ -430,7 +430,7 @@ func ConfigureReplicationDatabaseHook(ctx *sql.Context, p DoltDatabaseProvider,
}
err = newEnv.AddRemote(r)
if err != nil {
if err != env.ErrRemoteAlreadyExists && err != nil {
return err
}
@@ -448,7 +448,11 @@ func ConfigureReplicationDatabaseHook(ctx *sql.Context, p DoltDatabaseProvider,
}
// CloneDatabaseFromRemote implements DoltDatabaseProvider interface
func (p DoltDatabaseProvider) CloneDatabaseFromRemote(ctx *sql.Context, dbName, branch, remoteName, remoteUrl string, remoteParams map[string]string) error {
func (p DoltDatabaseProvider) CloneDatabaseFromRemote(
ctx *sql.Context,
dbName, branch, remoteName, remoteUrl string,
remoteParams map[string]string,
) error {
p.mu.Lock()
defer p.mu.Unlock()
@@ -459,7 +463,7 @@ func (p DoltDatabaseProvider) CloneDatabaseFromRemote(ctx *sql.Context, dbName,
return fmt.Errorf("cannot create DB, file exists at %s", dbName)
}
err := p.cloneDatabaseFromRemote(ctx, dbName, remoteName, branch, remoteUrl, remoteParams)
dEnv, err := p.cloneDatabaseFromRemote(ctx, dbName, remoteName, branch, remoteUrl, remoteParams)
if err != nil {
// Make a best effort to clean up any artifacts on disk from a failed clone
// before we return the error
@@ -473,7 +477,7 @@ func (p DoltDatabaseProvider) CloneDatabaseFromRemote(ctx *sql.Context, dbName,
return err
}
return nil
return ConfigureReplicationDatabaseHook(ctx, p, dbName, dEnv)
}
// cloneDatabaseFromRemote encapsulates the inner logic for cloning a database so that if any error
@@ -484,26 +488,26 @@ func (p DoltDatabaseProvider) cloneDatabaseFromRemote(
ctx *sql.Context,
dbName, remoteName, branch, remoteUrl string,
remoteParams map[string]string,
) error {
) (*env.DoltEnv, error) {
if p.remoteDialer == nil {
return fmt.Errorf("unable to clone remote database; no remote dialer configured")
return nil, fmt.Errorf("unable to clone remote database; no remote dialer configured")
}
// TODO: params for AWS, others that need them
r := env.NewRemote(remoteName, remoteUrl, nil)
srcDB, err := r.GetRemoteDB(ctx, types.Format_Default, p.remoteDialer)
if err != nil {
return err
return nil, err
}
dEnv, err := actions.EnvForClone(ctx, srcDB.ValueReadWriter().Format(), r, dbName, p.fs, "VERSION", env.GetCurrentUserHomeDir)
if err != nil {
return err
return nil, err
}
err = actions.CloneRemote(ctx, srcDB, remoteName, branch, dEnv)
if err != nil {
return err
return nil, err
}
err = dEnv.RepoStateWriter().UpdateBranch(dEnv.RepoState.CWBHeadRef().GetPath(), env.BranchConfig{
@@ -514,7 +518,7 @@ func (p DoltDatabaseProvider) cloneDatabaseFromRemote(
sess := dsess.DSessFromSess(ctx.Session)
fkChecks, err := ctx.GetSessionVariable(ctx, "foreign_key_checks")
if err != nil {
return err
return nil, err
}
opts := editor.Options{
@@ -525,17 +529,22 @@ func (p DoltDatabaseProvider) cloneDatabaseFromRemote(
db, err := NewDatabase(ctx, dbName, dEnv.DbData(), opts)
if err != nil {
return err
return nil, err
}
p.databases[formatDbMapKeyName(db.Name())] = db
dbstate, err := GetInitialDBState(ctx, db)
if err != nil {
return err
return nil, err
}
return sess.AddDB(ctx, dbstate)
err = sess.AddDB(ctx, dbstate)
if err != nil {
return nil, err
}
return dEnv, nil
}
// DropDatabase implements the sql.MutableDatabaseProvider interface

View File

@@ -240,7 +240,7 @@ func ordinalMappingsForSecondaryIndex(sch schema.Schema, def schema.Index) (ord
ord = make(val.OrdinalMapping, secondary.Size())
for i := range ord {
name := secondary.GetAtIndex(i).Name
name := secondary.GetByIndex(i).Name
ord[i] = -1
pks := sch.GetPKCols().GetColumns()

View File

@@ -288,7 +288,7 @@ func (s *durableIndexState) coversAllColumns(i *doltIndex) bool {
}
covers := true
for i := 0; i < cols.Size(); i++ {
col := cols.GetAtIndex(i)
col := cols.GetByIndex(i)
if _, ok := idxCols.GetByNameCaseInsensitive(col.Name); !ok {
covers = false
break

View File

@@ -286,6 +286,8 @@ func deserializeGeometry(buf []byte) (v interface{}) {
v, _ = sql.DeserializeMPoint(buf, false, srid)
case sql.WKBMultiLineID:
v, _ = sql.DeserializeMLine(buf, false, srid)
case sql.WKBMultiPolyID:
v, _ = sql.DeserializeMPoly(buf, false, srid)
default:
panic(fmt.Sprintf("unknown geometry type %d", typ))
}

View File

@@ -334,7 +334,7 @@ func SqlRowAsTupleString(r sql.Row, tableSch schema.Schema) (string, error) {
if seenOne {
b.WriteRune(',')
}
col := tableSch.GetAllCols().GetAtIndex(i)
col := tableSch.GetAllCols().GetByIndex(i)
str := "NULL"
if val != nil {
str, err = interfaceValueAsSqlString(col.TypeInfo, val)
@@ -534,7 +534,7 @@ func interfaceValueAsSqlString(ti typeinfo.TypeInfo, value interface{}) (string,
return "", fmt.Errorf("typeinfo.VarStringTypeIdentifier is not types.String")
}
return quoteAndEscapeString(string(s)), nil
case typeinfo.GeometryTypeIdentifier, typeinfo.PointTypeIdentifier, typeinfo.LineStringTypeIdentifier, typeinfo.PolygonTypeIdentifier, typeinfo.MultiPointTypeIdentifier, typeinfo.MultiLineStringTypeIdentifier:
case typeinfo.GeometryTypeIdentifier, typeinfo.PointTypeIdentifier, typeinfo.LineStringTypeIdentifier, typeinfo.PolygonTypeIdentifier, typeinfo.MultiPointTypeIdentifier, typeinfo.MultiLineStringTypeIdentifier, typeinfo.MultiPolygonTypeIdentifier:
return singleQuote + str + singleQuote, nil
default:
return str, nil

View File

@@ -223,9 +223,9 @@ func AlterTableAddPrimaryKeys(tableName string, pks *schema.ColCollection) strin
for i := 0; i < pks.Size(); i++ {
if i == 0 {
b.WriteString(pks.GetAtIndex(i).Name)
b.WriteString(pks.GetByIndex(i).Name)
} else {
b.WriteString("," + pks.GetAtIndex(i).Name)
b.WriteString("," + pks.GetByIndex(i).Name)
}
}
b.WriteRune(')')

View File

@@ -142,7 +142,7 @@ func DoltKeyAndMappingFromSqlRow(ctx context.Context, vrw types.ValueReadWriter,
}
for i := 0; i < numCols; i++ {
schCol := allCols.GetAtIndex(i)
schCol := allCols.GetByIndex(i)
val := r[i]
if val == nil {
continue

View File

@@ -2110,7 +2110,7 @@ func (t *AlterableDoltTable) AddForeignKey(ctx *sql.Context, sqlFk sql.ForeignKe
//TODO: use the primary key as-is
var refPkTags []uint64
for _, i := range refSch.GetPkOrdinals() {
refPkTags = append(refPkTags, refSch.GetAllCols().GetAtIndex(i).Tag)
refPkTags = append(refPkTags, refSch.GetAllCols().GetByIndex(i).Tag)
}
var colNames []string
@@ -2328,7 +2328,7 @@ func (t *AlterableDoltTable) UpdateForeignKey(ctx *sql.Context, fkName string, s
//TODO: use the primary key as-is
var refPkTags []uint64
for _, i := range refSch.GetPkOrdinals() {
refPkTags = append(refPkTags, refSch.GetAllCols().GetAtIndex(i).Tag)
refPkTags = append(refPkTags, refSch.GetAllCols().GetByIndex(i).Tag)
}
var colNames []string

View File

@@ -368,7 +368,7 @@ func ordinalMappingsFromSchema(from sql.Schema, to schema.Schema) (km, vm val.Or
func makeOrdinalMapping(from sql.Schema, to *schema.ColCollection) (m val.OrdinalMapping) {
m = make(val.OrdinalMapping, len(to.GetColumns()))
for i := range m {
name := to.GetAtIndex(i).Name
name := to.GetByIndex(i).Name
for j, col := range from {
if col.Name == name {
m[i] = j

View File

@@ -1,92 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"context"
"io"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
)
// CompositeTableReader is a TableReader implementation which will concatenate the results
// of multiple TableReader instances into a single set of results.
type CompositeTableReader struct {
sch schema.Schema
readers []ReadCloser
idx int
}
// NewCompositeTableReader creates a new CompositeTableReader instance from a slice of TableReadClosers.
func NewCompositeTableReader(readers []ReadCloser) (*CompositeTableReader, error) {
if len(readers) == 0 {
panic("nothing to iterate")
}
sch := readers[0].GetSchema()
for i := 1; i < len(readers); i++ {
otherSch := readers[i].GetSchema()
if !schema.SchemasAreEqual(sch, otherSch) {
panic("readers must have the same schema")
}
}
return &CompositeTableReader{sch: sch, readers: readers, idx: 0}, nil
}
// GetSchema gets the schema of the rows that this reader will return
func (rd *CompositeTableReader) GetSchema() schema.Schema {
return rd.sch
}
// ReadRow reads a row from a table. If there is a bad row the returned error will be non nil, and calling
// IsBadRow(err) will be return true. This is a potentially non-fatal error and callers can decide if they want to
// continue on a bad row, or fail.
func (rd *CompositeTableReader) ReadRow(ctx context.Context) (row.Row, error) {
for rd.idx < len(rd.readers) {
r, err := rd.readers[rd.idx].ReadRow(ctx)
if err == io.EOF {
rd.idx++
continue
} else if err != nil {
return nil, err
}
return r, nil
}
return nil, io.EOF
}
// VerifySchema checks that the incoming schema matches the schema from the existing table
func (rd *CompositeTableReader) VerifySchema(outSch schema.Schema) (bool, error) {
return schema.VerifyInSchema(rd.sch, outSch)
}
// Close should release resources being held
func (rd *CompositeTableReader) Close(ctx context.Context) error {
var firstErr error
for _, rdr := range rd.readers {
err := rdr.Close(ctx)
if firstErr == nil {
firstErr = err
}
}
return firstErr
}

View File

@@ -1,93 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"context"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/types"
)
func TestCompositeTableReader(t *testing.T) {
const (
numReaders = 7
numItemsPerReader = 7
)
ctx := context.Background()
coll := schema.NewColCollection(
schema.NewColumn("id", 0, types.UintKind, true, schema.NotNullConstraint{}),
schema.NewColumn("val", 1, types.IntKind, false),
)
sch, err := schema.SchemaFromCols(coll)
require.NoError(t, err)
var readers []ReadCloser
var expectedKeys []uint64
var expectedVals []int64
for i := 0; i < numReaders; i++ {
var rows []row.Row
for j := 0; j < numItemsPerReader; j++ {
idx := j + (i * numItemsPerReader)
expectedKeys = append(expectedKeys, uint64(idx))
expectedVals = append(expectedVals, int64(idx))
r, err := row.New(types.Format_Default, sch, row.TaggedValues{
0: types.Uint(uint64(idx)),
1: types.Int(idx),
})
require.NoError(t, err)
rows = append(rows, r)
}
imt := NewInMemTableWithData(sch, rows)
rd := NewInMemTableReader(imt)
readers = append(readers, rd)
}
compositeRd, err := NewCompositeTableReader(readers)
require.NoError(t, err)
var keys []uint64
var vals []int64
for {
r, err := compositeRd.ReadRow(ctx)
if err == io.EOF {
break
}
assert.NoError(t, err)
val0, ok := r.GetColVal(0)
assert.True(t, ok)
val1, ok := r.GetColVal(1)
assert.True(t, ok)
keys = append(keys, uint64(val0.(types.Uint)))
vals = append(vals, int64(val1.(types.Int)))
}
assert.Equal(t, expectedKeys, keys)
assert.Equal(t, expectedVals, vals)
err = compositeRd.Close(ctx)
assert.NoError(t, err)
}

View File

@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
@@ -437,14 +436,14 @@ func (kte *keylessTableEditor) flush(ctx context.Context) error {
}
kte.eg.Go(func() (err error) {
kte.tbl, err = applyEdits(ctx, tbl, acc, kte.indexEds, nil)
kte.tbl, err = applyEdits(ctx, tbl, acc, kte.indexEds)
return err
})
return nil
}
func applyEdits(ctx context.Context, tbl *doltdb.Table, acc keylessEditAcc, indexEds []*IndexEditor, errFunc PKDuplicateCb) (_ *doltdb.Table, retErr error) {
func applyEdits(ctx context.Context, tbl *doltdb.Table, acc keylessEditAcc, indexEds []*IndexEditor) (_ *doltdb.Table, retErr error) {
rowData, err := tbl.GetNomsRowData(ctx)
if err != nil {
return nil, err
@@ -463,7 +462,7 @@ func applyEdits(ctx context.Context, tbl *doltdb.Table, acc keylessEditAcc, inde
}
ed := rowData.Edit()
iter := table.NewMapPointReader(rowData, keys...)
iter := newMapPointReader(rowData, keys)
var ok bool
for {
@@ -568,6 +567,44 @@ func applyEdits(ctx context.Context, tbl *doltdb.Table, acc keylessEditAcc, inde
return tbl.UpdateNomsRows(ctx, rowData)
}
type pointReader struct {
m types.Map
keys []types.Tuple
emptyTuple types.Tuple
idx int
}
func newMapPointReader(m types.Map, keys []types.Tuple) *pointReader {
return &pointReader{
m: m,
keys: keys,
emptyTuple: types.EmptyTuple(m.Format()),
}
}
// NextTuple implements types.MapIterator.
func (pr *pointReader) NextTuple(ctx context.Context) (k, v types.Tuple, err error) {
if pr.idx >= len(pr.keys) {
return types.Tuple{}, types.Tuple{}, io.EOF
}
k = pr.keys[pr.idx]
v = pr.emptyTuple
var ok bool
// todo: optimize by implementing MapIterator.Seek()
v, ok, err = pr.m.MaybeGetTuple(ctx, k)
pr.idx++
if err != nil {
return types.Tuple{}, types.Tuple{}, err
} else if !ok {
return k, pr.emptyTuple, nil
}
return k, v, nil
}
// for deletes (cardinality < 1): |ok| is set false
func initializeCardinality(val types.Tuple, card int64) (v types.Tuple, ok bool, err error) {
if card < 1 {

View File

@@ -162,28 +162,3 @@ func (rd *InMemTableReader) GetSchema() schema.Schema {
func (rd *InMemTableReader) VerifySchema(outSch schema.Schema) (bool, error) {
return schema.VerifyInSchema(rd.tt.sch, outSch)
}
// InMemTableWriter is an implementation of a RowWriter for an InMemTable
type InMemTableWriter struct {
tt *InMemTable
}
// NewInMemTableWriter creates an instance of a RowWriter from an InMemTable
func NewInMemTableWriter(imt *InMemTable) *InMemTableWriter {
return &InMemTableWriter{imt}
}
// WriteRow will write a row to a table
func (w *InMemTableWriter) WriteRow(ctx context.Context, r row.Row) error {
return w.tt.AppendRow(r)
}
// Close should flush all writes, release resources being held
func (w *InMemTableWriter) Close(ctx context.Context) error {
return nil
}
// GetSchema gets the schema of the rows that this writer writes
func (w *InMemTableWriter) GetSchema() schema.Schema {
return w.tt.sch
}

View File

@@ -76,12 +76,8 @@ func TestInMemTable(t *testing.T) {
imt := NewInMemTable(rowSch)
func() {
var wr TableWriteCloser
wr = NewInMemTableWriter(imt)
defer wr.Close(context.Background())
for _, row := range rows {
err := wr.WriteRow(context.Background(), row)
for _, r := range rows {
err := imt.AppendRow(r)
if err != nil {
t.Fatal("Failed to write row")
@@ -111,114 +107,3 @@ func TestInMemTable(t *testing.T) {
}
}()
}
func TestPipeRows(t *testing.T) {
imt := NewInMemTableWithData(rowSch, rows)
imtt2 := NewInMemTable(rowSch)
var err error
func() {
rd := NewInMemTableReader(imt)
defer rd.Close(context.Background())
wr := NewInMemTableWriter(imtt2)
defer wr.Close(context.Background())
_, _, err = PipeRows(context.Background(), rd, wr, false)
}()
if err != nil {
t.Error("Error piping rows from reader to writer", err)
}
if imt.NumRows() != imtt2.NumRows() {
t.Error("Row counts should match")
}
for i := 0; i < imt.NumRows(); i++ {
r1, err1 := imt.GetRow(i)
r2, err2 := imtt2.GetRow(i)
if err1 != nil || err2 != nil {
t.Fatal("Couldn't Get row.")
}
if !row.AreEqual(r1, r2, rowSch) {
t.Error("Rows should be the same.", row.Fmt(context.Background(), r1, rowSch), "!=", row.Fmt(context.Background(), r2, rowSch))
}
}
}
func TestReadAllRows(t *testing.T) {
imt := NewInMemTableWithData(rowSch, rows)
var err error
var numBad int
var results []row.Row
func() {
rd := NewInMemTableReader(imt)
defer rd.Close(context.Background())
results, numBad, err = ReadAllRows(context.Background(), rd, true)
}()
if err != nil {
t.Fatal("Error reading rows")
}
if len(rows) != len(results) {
t.Error("Unexpected count.")
}
if numBad != 0 {
t.Error("Unexpected BadRow Count")
}
for i := 0; i < len(rows); i++ {
if !row.AreEqual(rows[i], results[i], rowSch) {
t.Error(row.Fmt(context.Background(), rows[i], rowSch), "!=", row.Fmt(context.Background(), results[i], rowSch))
}
}
}
/*
func TestReadAllRowsToMap(t *testing.T) {
imt := NewInMemTableWithData(rowSch, rows)
greatIndex := rowSch.GetFieldIndex("is_great")
var err error
var numBad int
var results map[types.Value][]row.Row
func() {
rd := NewInMemTableReader(imt)
defer rd.Close()
results, numBad, err = ReadAllRowsToMap(rd, greatIndex, true)
}()
if err != nil {
t.Fatal("Error reading rows")
}
if numBad != 0 {
t.Error("Unexpected BadRow Count")
}
if len(results) != 2 {
t.Error("Unexpected count.")
}
if len(results[types.Bool(true)]) != 2 || len(results[types.Bool(false)]) != 1 {
t.Error("Unexpected count for one or more values of is_great")
}
for _, great := range []types.Bool{types.Bool(true), types.Bool(false)} {
for i, j := 0, 0; i < len(rows); i++ {
rowIsGreat, _ := rows[i].CurrData().GetField(greatIndex)
if rowIsGreat == great {
if !RowsEqualIgnoringSchema(rows[i], results[great][j]) {
t.Error(RowFmt(rows[i]), "!=", RowFmt(results[great][j]))
}
j++
}
}
}
}
*/

View File

@@ -1,94 +0,0 @@
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"context"
"errors"
"io"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
)
// PipeRows will read a row from given TableReader and write it to the provided RowWriter. It will do this
// for every row until the TableReader's ReadRow method returns io.EOF or encounters an error in either reading
// or writing. The caller will need to handle closing the tables as necessary. If contOnBadRow is true, errors reading
// or writing will be ignored and the pipe operation will continue.
//
// Returns a tuple: (number of rows written, number of errors ignored, error). In the case that err is non-nil, the
// row counter fields in the tuple will be set to -1.
func PipeRows(ctx context.Context, rd Reader, wr RowWriter, contOnBadRow bool) (int, int, error) {
var numBad, numGood int
for {
r, err := rd.ReadRow(ctx)
if err != nil && err != io.EOF {
if IsBadRow(err) && contOnBadRow {
numBad++
continue
}
return -1, -1, err
} else if err == io.EOF && r == nil {
break
} else if r == nil {
// row equal to nil should
return -1, -1, errors.New("reader returned nil row with err==nil")
}
err = wr.WriteRow(ctx, r)
if err != nil {
return -1, -1, err
} else {
numGood++
}
}
return numGood, numBad, nil
}
// ReadAllRows reads all rows from a TableReader and returns a slice containing those rows. Usually this is used
// for testing, or with very small data sets.
func ReadAllRows(ctx context.Context, rd Reader, contOnBadRow bool) ([]row.Row, int, error) {
var rows []row.Row
var err error
badRowCount := 0
for {
var r row.Row
r, err = rd.ReadRow(ctx)
if err != nil && err != io.EOF || r == nil {
if IsBadRow(err) {
badRowCount++
if contOnBadRow {
continue
}
}
break
}
rows = append(rows, r)
}
if err == nil || err == io.EOF {
return rows, badRowCount, nil
}
return nil, badRowCount, err
}

View File

@@ -1,157 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"context"
"io"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/store/types"
)
type keylessTableReader struct {
iter types.MapIterator
sch schema.Schema
row row.Row
remainingCopies uint64
bounded bool
remainingEntries uint64
}
var _ SqlTableReader = &keylessTableReader{}
var _ ReadCloser = &keylessTableReader{}
// GetSchema implements the TableReader interface.
func (rdr *keylessTableReader) GetSchema() schema.Schema {
return rdr.sch
}
// ReadSqlRow implements the SqlTableReader interface.
func (rdr *keylessTableReader) ReadRow(ctx context.Context) (row.Row, error) {
if rdr.remainingCopies <= 0 {
if rdr.bounded && rdr.remainingEntries == 0 {
return nil, io.EOF
}
key, val, err := rdr.iter.Next(ctx)
if err != nil {
return nil, err
} else if key == nil {
return nil, io.EOF
}
rdr.row, rdr.remainingCopies, err = row.KeylessRowsFromTuples(key.(types.Tuple), val.(types.Tuple))
if err != nil {
return nil, err
}
if rdr.remainingEntries > 0 {
rdr.remainingEntries -= 1
}
if rdr.remainingCopies == 0 {
return nil, row.ErrZeroCardinality
}
}
rdr.remainingCopies -= 1
return rdr.row, nil
}
// ReadSqlRow implements the SqlTableReader interface.
func (rdr *keylessTableReader) ReadSqlRow(ctx context.Context) (sql.Row, error) {
r, err := rdr.ReadRow(ctx)
if err != nil {
return nil, err
}
return sqlutil.DoltRowToSqlRow(r, rdr.sch)
}
// Close implements the TableReadCloser interface.
func (rdr *keylessTableReader) Close(_ context.Context) error {
return nil
}
func newKeylessTableReader(ctx context.Context, tbl *doltdb.Table, sch schema.Schema, buffered bool) (*keylessTableReader, error) {
rows, err := tbl.GetNomsRowData(ctx)
if err != nil {
return nil, err
}
return newKeylessTableReaderForRows(ctx, rows, sch, buffered)
}
func newKeylessTableReaderForRows(ctx context.Context, rows types.Map, sch schema.Schema, buffered bool) (*keylessTableReader, error) {
var err error
var iter types.MapIterator
if buffered {
iter, err = rows.Iterator(ctx)
} else {
iter, err = rows.BufferedIterator(ctx)
}
if err != nil {
return nil, err
}
return &keylessTableReader{
iter: iter,
sch: sch,
}, nil
}
func newKeylessTableReaderForPartition(ctx context.Context, tbl *doltdb.Table, sch schema.Schema, start, end uint64) (SqlTableReader, error) {
rows, err := tbl.GetNomsRowData(ctx)
if err != nil {
return nil, err
}
iter, err := rows.BufferedIteratorAt(ctx, start)
if err != nil {
return nil, err
}
return &keylessTableReader{
iter: iter,
sch: sch,
remainingEntries: end - start,
bounded: true,
}, nil
}
func newKeylessTableReaderFrom(ctx context.Context, tbl *doltdb.Table, sch schema.Schema, val types.Value) (SqlTableReader, error) {
rows, err := tbl.GetNomsRowData(ctx)
if err != nil {
return nil, err
}
iter, err := rows.IteratorFrom(ctx, val)
if err != nil {
return nil, err
}
return &keylessTableReader{
iter: iter,
sch: sch,
}, nil
}

View File

@@ -1,82 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"context"
"io"
"github.com/dolthub/dolt/go/store/types"
)
type PointReader struct {
m types.Map
emptyTuple types.Tuple
keys []types.Tuple
idx int
}
var _ types.MapIterator = &PointReader{}
// read the map values for a set of map keys
func NewMapPointReader(m types.Map, keys ...types.Tuple) types.MapIterator {
return &PointReader{
m: m,
emptyTuple: types.EmptyTuple(m.Format()),
keys: keys,
}
}
// Next implements types.MapIterator.
func (pr *PointReader) Next(ctx context.Context) (k, v types.Value, err error) {
kt, vt, err := pr.NextTuple(ctx)
if err != nil {
return nil, nil, err
}
if !kt.Empty() {
k = kt
}
if !vt.Empty() {
v = vt
}
return k, v, nil
}
// NextTuple implements types.MapIterator.
func (pr *PointReader) NextTuple(ctx context.Context) (k, v types.Tuple, err error) {
if pr.idx >= len(pr.keys) {
return types.Tuple{}, types.Tuple{}, io.EOF
}
k = pr.keys[pr.idx]
v = pr.emptyTuple
var ok bool
// todo: optimize by implementing MapIterator.Seek()
v, ok, err = pr.m.MaybeGetTuple(ctx, k)
pr.idx++
if err != nil {
return types.Tuple{}, types.Tuple{}, err
} else if !ok {
return k, pr.emptyTuple, nil
}
return k, v, nil
}

View File

@@ -1,153 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"context"
"io"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
"github.com/dolthub/dolt/go/store/types"
)
type pkTableReader struct {
iter types.MapIterator
sch schema.Schema
}
var _ SqlTableReader = pkTableReader{}
var _ ReadCloser = pkTableReader{}
// GetSchema implements the TableReader interface.
func (rdr pkTableReader) GetSchema() schema.Schema {
return rdr.sch
}
// ReadRow implements the TableReader interface.
func (rdr pkTableReader) ReadRow(ctx context.Context) (row.Row, error) {
key, val, err := rdr.iter.Next(ctx)
if err != nil {
return nil, err
} else if key == nil {
return nil, io.EOF
}
return row.FromNoms(rdr.sch, key.(types.Tuple), val.(types.Tuple))
}
// ReadSqlRow implements the SqlTableReader interface.
func (rdr pkTableReader) ReadSqlRow(ctx context.Context) (sql.Row, error) {
key, val, err := rdr.iter.Next(ctx)
if err != nil {
return nil, err
} else if key == nil {
return nil, io.EOF
}
return noms.SqlRowFromTuples(rdr.sch, key.(types.Tuple), val.(types.Tuple))
}
// Close implements the TableReadCloser interface.
func (rdr pkTableReader) Close(_ context.Context) error {
return nil
}
func newPkTableReader(ctx context.Context, tbl *doltdb.Table, sch schema.Schema, buffered bool) (pkTableReader, error) {
rows, err := tbl.GetNomsRowData(ctx)
if err != nil {
return pkTableReader{}, err
}
return newPkTableReaderForRows(ctx, rows, sch, buffered)
}
func newPkTableReaderForRows(ctx context.Context, rows types.Map, sch schema.Schema, buffered bool) (pkTableReader, error) {
var err error
var iter types.MapIterator
if buffered {
iter, err = rows.Iterator(ctx)
} else {
iter, err = rows.BufferedIterator(ctx)
}
if err != nil {
return pkTableReader{}, err
}
return pkTableReader{
iter: iter,
sch: sch,
}, nil
}
func newPkTableReaderFrom(ctx context.Context, tbl *doltdb.Table, sch schema.Schema, val types.Value) (SqlTableReader, error) {
rows, err := tbl.GetNomsRowData(ctx)
if err != nil {
return nil, err
}
iter, err := rows.IteratorFrom(ctx, val)
if err != nil {
return nil, err
}
return pkTableReader{
iter: iter,
sch: sch,
}, nil
}
type partitionTableReader struct {
SqlTableReader
remaining uint64
}
var _ SqlTableReader = &partitionTableReader{}
func newPkTableReaderForPartition(ctx context.Context, tbl *doltdb.Table, sch schema.Schema, start, end uint64) (SqlTableReader, error) {
rows, err := tbl.GetNomsRowData(ctx)
if err != nil {
return nil, err
}
iter, err := rows.BufferedIteratorAt(ctx, start)
if err != nil {
return nil, err
}
return &partitionTableReader{
SqlTableReader: pkTableReader{
iter: iter,
sch: sch,
},
remaining: end - start,
}, nil
}
// ReadSqlRow implements the SqlTableReader interface.
func (rdr *partitionTableReader) ReadSqlRow(ctx context.Context) (sql.Row, error) {
if rdr.remaining == 0 {
return nil, io.EOF
}
rdr.remaining -= 1
return rdr.SqlTableReader.ReadSqlRow(ctx)
}

View File

@@ -1,71 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
"context"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/utils/async"
)
var _ ReadCloser = (*AsyncReadAheadTableReader)(nil)
// AsyncReadAheadTableReader is a TableReadCloser implementation that spins up a go routine to keep reading data into
// a buffered channel so that it is ready when the caller wants it.
type AsyncReadAheadTableReader struct {
backingReader ReadCloser
reader *async.AsyncReader
}
// NewAsyncReadAheadTableReader creates a new AsyncReadAheadTableReader
func NewAsyncReadAheadTableReader(tr ReadCloser, bufferSize int) *AsyncReadAheadTableReader {
read := func(ctx context.Context) (interface{}, error) {
return tr.ReadRow(ctx)
}
reader := async.NewAsyncReader(read, bufferSize)
return &AsyncReadAheadTableReader{tr, reader}
}
// Start the worker routine reading rows to the channel
func (tr *AsyncReadAheadTableReader) Start(ctx context.Context) error {
return tr.reader.Start(ctx)
}
// GetSchema gets the schema of the rows that this reader will return
func (tr *AsyncReadAheadTableReader) GetSchema() schema.Schema {
return tr.backingReader.GetSchema()
}
// ReadRow reads a row from a table. If there is a bad row the returned error will be non nil, and calling
// IsBadRow(err) will be return true. This is a potentially non-fatal error and callers can decide if they want to
// continue on a bad row, or fail.
func (tr *AsyncReadAheadTableReader) ReadRow(ctx context.Context) (row.Row, error) {
obj, err := tr.reader.Read()
if err != nil {
return nil, err
}
return obj.(row.Row), err
}
// Close releases resources being held
func (tr *AsyncReadAheadTableReader) Close(ctx context.Context) error {
_ = tr.reader.Close()
return tr.backingReader.Close(ctx)
}

View File

@@ -61,7 +61,6 @@ func testIterator(t *testing.T, iter RowIter, expected []sql.Row) {
require.Equal(t, io.EOF, err)
}
var colSqlType = sch.GetAllCols().GetAtIndex(0).TypeInfo.ToSqlType()
var sch = schema.MustSchemaFromCols(schema.NewColCollection(
schema.NewColumn("pk", 0, types.UintKind, true),
schema.NewColumn("col1", 1, types.UintKind, false)))

View File

@@ -16,6 +16,8 @@ package table
import (
"context"
"errors"
"io"
"github.com/dolthub/go-mysql-server/sql"
@@ -60,3 +62,41 @@ type SqlTableReader interface {
// ReadSqlRow reads a row from a table as go-mysql-server sql.Row.
ReadSqlRow(ctx context.Context) (sql.Row, error)
}
// PipeRows will read a row from given TableReader and write it to the provided RowWriter. It will do this
// for every row until the TableReader's ReadRow method returns io.EOF or encounters an error in either reading
// or writing. The caller will need to handle closing the tables as necessary. If contOnBadRow is true, errors reading
// or writing will be ignored and the pipe operation will continue.
//
// Returns a tuple: (number of rows written, number of errors ignored, error). In the case that err is non-nil, the
// row counter fields in the tuple will be set to -1.
func PipeRows(ctx context.Context, rd SqlRowReader, wr SqlRowWriter, contOnBadRow bool) (int, int, error) {
var numBad, numGood int
for {
r, err := rd.ReadSqlRow(ctx)
if err != nil && err != io.EOF {
if IsBadRow(err) && contOnBadRow {
numBad++
continue
}
return -1, -1, err
} else if err == io.EOF && r == nil {
break
} else if r == nil {
// row equal to nil should
return -1, -1, errors.New("reader returned nil row with err==nil")
}
err = wr.WriteSqlRow(ctx, r)
if err != nil {
return -1, -1, err
} else {
numGood++
}
}
return numGood, numBad, nil
}

View File

@@ -18,23 +18,9 @@ import (
"context"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
)
// RowWriter knows how to write table rows to some destination
type RowWriter interface {
// WriteRow writes a row to the destination of this writer
WriteRow(ctx context.Context, r row.Row) error
}
// TableWriteCloser is an interface for writing rows to a table, that can be closed
type TableWriteCloser interface {
RowWriter
Closer
}
type SqlRowWriter interface {
TableWriteCloser
Closer
WriteSqlRow(ctx context.Context, r sql.Row) error
}

View File

@@ -24,12 +24,10 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/vitess/go/sqltypes"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
"github.com/dolthub/dolt/go/store/types"
)
const jsonHeader = `{"rows": [`
@@ -72,77 +70,6 @@ func (j *RowWriter) GetSchema() schema.Schema {
return j.sch
}
// WriteRow encodes the row given into JSON format and writes it, returning any error
func (j *RowWriter) WriteRow(ctx context.Context, r row.Row) error {
if j.rowsWritten == 0 {
err := iohelp.WriteAll(j.bWr, []byte(j.header))
if err != nil {
return err
}
}
allCols := j.sch.GetAllCols()
colValMap := make(map[string]interface{}, allCols.Size())
if err := allCols.Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
val, ok := r.GetColVal(tag)
if !ok || types.IsNull(val) {
return false, nil
}
switch col.TypeInfo.GetTypeIdentifier() {
case typeinfo.DatetimeTypeIdentifier,
typeinfo.DecimalTypeIdentifier,
typeinfo.EnumTypeIdentifier,
typeinfo.InlineBlobTypeIdentifier,
typeinfo.SetTypeIdentifier,
typeinfo.TimeTypeIdentifier,
typeinfo.TupleTypeIdentifier,
typeinfo.UuidTypeIdentifier,
typeinfo.VarBinaryTypeIdentifier,
typeinfo.YearTypeIdentifier:
v, err := col.TypeInfo.FormatValue(val)
if err != nil {
return true, err
}
val = types.String(*v)
case typeinfo.BitTypeIdentifier,
typeinfo.BoolTypeIdentifier,
typeinfo.VarStringTypeIdentifier,
typeinfo.UintTypeIdentifier,
typeinfo.IntTypeIdentifier,
typeinfo.FloatTypeIdentifier:
// use primitive type
}
colValMap[col.Name] = val
return false, nil
}); err != nil {
return err
}
data, err := marshalToJson(colValMap)
if err != nil {
return errors.New("marshaling did not work")
}
if j.rowsWritten != 0 {
_, err := j.bWr.WriteString(j.separator)
if err != nil {
return err
}
}
newErr := iohelp.WriteAll(j.bWr, data)
if newErr != nil {
return newErr
}
j.rowsWritten++
return nil
}
func (j *RowWriter) WriteSqlRow(ctx context.Context, row sql.Row) error {
// The Type.SQL() call takes in a SQL context to determine the output character set for types that use a collation.
// The context given is not a SQL context, so we force the `utf8mb4` character set to be used, as it is the most

View File

@@ -24,7 +24,6 @@ import (
"github.com/xitongsys/parquet-go/source"
"github.com/xitongsys/parquet-go/writer"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
@@ -96,15 +95,6 @@ func (pwr *ParquetWriter) GetSchema() schema.Schema {
return pwr.sch
}
// WriteRow will write a row to a table
func (pwr *ParquetWriter) WriteRow(ctx context.Context, r row.Row) error {
sqlRow, err := sqlutil.DoltRowToSqlRow(r, pwr.GetSchema())
if err != nil {
return err
}
return pwr.WriteSqlRow(ctx, sqlRow)
}
func (pwr *ParquetWriter) WriteSqlRow(ctx context.Context, r sql.Row) error {
colValStrs := make([]*string, pwr.sch.GetAllCols().Size())

View File

@@ -21,11 +21,11 @@ import (
"os"
"testing"
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/assert"
"github.com/xitongsys/parquet-go-source/local"
"github.com/xitongsys/parquet-go/reader"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/store/types"
@@ -54,41 +54,21 @@ type Person struct {
Title string `parquet:"name=title, type=BYTE_ARRAY, convertedtype=UTF8, repetitiontype=OPTIONAL"`
}
func mustRow(r row.Row, err error) row.Row {
if err != nil {
panic(err)
}
return r
}
func getSampleRows() (rows []row.Row) {
return []row.Row{
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("Bill Billerson"),
ageColTag: types.Uint(32),
titleColTag: types.String("Senior Dufus")})),
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("Rob Robertson"),
ageColTag: types.Uint(25),
titleColTag: types.String("Dufus")})),
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("John Johnson"),
ageColTag: types.Uint(21),
titleColTag: types.String("")})),
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("Andy Anderson"),
ageColTag: types.Uint(27),
/* title = NULL */})),
func getSampleRows() []sql.Row {
return []sql.Row{
{"Bill Billerson", 32, "Senior Dufus"},
{"Rob Robertson", 25, "Dufus"},
{"John Johnson", 21, ""},
{"Andy Anderson", 27, nil},
}
}
func writeToParquet(pWr *ParquetWriter, rows []row.Row, t *testing.T) {
func writeToParquet(pWr *ParquetWriter, rows []sql.Row, t *testing.T) {
func() {
defer pWr.Close(context.Background())
for _, row := range rows {
err := pWr.WriteRow(context.Background(), row)
err := pWr.WriteSqlRow(context.Background(), row)
if err != nil {
t.Fatal("Failed to write row")
}

View File

@@ -20,10 +20,6 @@ import (
"strings"
)
func csvSplitLineRuneDelim(str string, delim rune, escapedQuotes bool) ([]*string, error) {
return csvSplitLine(str, string(delim), escapedQuotes)
}
func csvSplitLine(str string, delim string, escapedQuotes bool) ([]*string, error) {
if strings.IndexRune(delim, '"') != -1 {
panic("delims cannot contain quotes")

View File

@@ -26,7 +26,6 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
@@ -81,31 +80,6 @@ func NewCSVWriter(wr io.WriteCloser, outSch schema.Schema, info *CSVFileInfo) (*
return csvw, nil
}
// WriteRow will write a row to a table
func (csvw *CSVWriter) WriteRow(ctx context.Context, r row.Row) error {
allCols := csvw.sch.GetAllCols()
colValStrs := make([]*string, allCols.Size())
sqlRow, err := sqlutil.DoltRowToSqlRow(r, csvw.sch)
if err != nil {
return err
}
for i, val := range sqlRow {
if val == nil {
colValStrs[i] = nil
} else {
v, err := sqlutil.SqlColToStr(csvw.sch.GetAllCols().GetAtIndex(i).TypeInfo.ToSqlType(), val)
if err != nil {
return err
}
colValStrs[i] = &v
}
}
return csvw.write(colValStrs)
}
func (csvw *CSVWriter) WriteSqlRow(ctx context.Context, r sql.Row) error {
colValStrs := make([]*string, csvw.sch.GetAllCols().Size())
for i, val := range r {
@@ -114,7 +88,7 @@ func (csvw *CSVWriter) WriteSqlRow(ctx context.Context, r sql.Row) error {
} else {
var v string
var err error
colType := csvw.sch.GetAllCols().GetAtIndex(i).TypeInfo.ToSqlType()
colType := csvw.sch.GetAllCols().GetByIndex(i).TypeInfo.ToSqlType()
// Due to BIT's unique output, we special-case writing the integer specifically for CSV
if _, ok := colType.(sql.BitType); ok {
v = strconv.FormatUint(val.(uint64), 10)

View File

@@ -19,7 +19,8 @@ import (
"os"
"testing"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
@@ -43,33 +44,21 @@ var inCols = []schema.Column{
var colColl = schema.NewColCollection(inCols...)
var rowSch = schema.MustSchemaFromCols(colColl)
func getSampleRows() (rows []row.Row) {
return []row.Row{
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("Bill Billerson"),
ageColTag: types.Uint(32),
titleColTag: types.String("Senior Dufus")})),
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("Rob Robertson"),
ageColTag: types.Uint(25),
titleColTag: types.String("Dufus")})),
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("John Johnson"),
ageColTag: types.Uint(21),
titleColTag: types.String("")})),
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameColTag: types.String("Andy Anderson"),
ageColTag: types.Uint(27),
/* title = NULL */})),
func getSampleRows() []sql.Row {
return []sql.Row{
{"Bill Billerson", 32, "Senior Dufus"},
{"Rob Robertson", 25, "Dufus"},
{"John Johnson", 21, ""},
{"Andy Anderson", 27, nil},
}
}
func writeToCSV(csvWr *CSVWriter, rows []row.Row, t *testing.T) {
func writeToCSV(csvWr *CSVWriter, rows []sql.Row, t *testing.T) {
func() {
defer csvWr.Close(context.Background())
for _, row := range rows {
err := csvWr.WriteRow(context.Background(), row)
for _, r := range rows {
err := csvWr.WriteSqlRow(context.Background(), r)
if err != nil {
t.Fatal("Failed to write row")

View File

@@ -23,7 +23,6 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
@@ -81,64 +80,6 @@ func (w *BatchSqlExportWriter) GetSchema() schema.Schema {
return w.sch
}
// WriteRow will write a row to a table
func (w *BatchSqlExportWriter) WriteRow(ctx context.Context, r row.Row) error {
if err := w.maybeWriteDropCreate(ctx); err != nil {
return err
}
// Previous write was last insert
if w.numInserts > 0 && r == nil {
return iohelp.WriteLine(w.wr, ";")
}
// Reached max number of inserts on one line
if w.numInserts == batchSize {
// Reset count
w.numInserts = 0
// End line
err := iohelp.WriteLine(w.wr, ";")
if err != nil {
return err
}
}
// Append insert values as tuples
var stmt string
if w.numInserts == 0 {
// Get insert prefix string
prefix, err := sqlfmt.InsertStatementPrefix(w.tableName, w.sch)
if err != nil {
return nil
}
// Write prefix
err = iohelp.WriteWithoutNewLine(w.wr, prefix)
if err != nil {
return nil
}
} else {
stmt = ", "
}
// Get insert tuple string
tuple, err := sqlfmt.RowAsTupleString(r, w.sch)
if err != nil {
return err
}
// Write insert tuple
err = iohelp.WriteWithoutNewLine(w.wr, stmt+tuple)
if err != nil {
return nil
}
// Increase count of inserts written on this line
w.numInserts++
return err
}
func (w *BatchSqlExportWriter) WriteSqlRow(ctx context.Context, r sql.Row) error {
if err := w.maybeWriteDropCreate(ctx); err != nil {
return err

View File

@@ -23,7 +23,6 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
@@ -77,21 +76,6 @@ func (w *SqlExportWriter) GetSchema() schema.Schema {
return w.sch
}
// WriteRow will write a row to a table
func (w *SqlExportWriter) WriteRow(ctx context.Context, r row.Row) error {
if err := w.maybeWriteDropCreate(ctx); err != nil {
return err
}
stmt, err := sqlfmt.RowAsInsertStmt(r, w.tableName, w.sch)
if err != nil {
return err
}
return iohelp.WriteLine(w.wr, stmt)
}
func (w *SqlExportWriter) WriteSqlRow(ctx context.Context, r sql.Row) error {
if r == nil {
return nil

View File

@@ -19,7 +19,7 @@ import (
"strings"
"testing"
"github.com/google/uuid"
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -40,9 +40,7 @@ func (*StringBuilderCloser) Close() error {
}
func TestEndToEnd(t *testing.T) {
id := uuid.MustParse("00000000-0000-0000-0000-000000000000")
tableName := "people"
dropCreateStatement := sqlfmt.DropTableIfExistsStmt(tableName) + "\n" +
"CREATE TABLE `people` (\n" +
" `id` varchar(16383) NOT NULL,\n" +
@@ -57,7 +55,7 @@ func TestEndToEnd(t *testing.T) {
type test struct {
name string
rows []row.Row
rows []sql.Row
sch schema.Schema
expectedOutput string
}
@@ -65,9 +63,10 @@ func TestEndToEnd(t *testing.T) {
tests := []test{
{
name: "two rows",
rows: rs(
dtestutils.NewTypedRow(id, "some guy", 100, false, strPointer("normie")),
dtestutils.NewTypedRow(id, "guy personson", 0, true, strPointer("officially a person"))),
rows: []sql.Row{
{"00000000-0000-0000-0000-000000000000", "some guy", 100, 0, "normie"},
{"00000000-0000-0000-0000-000000000000", "guy personson", 0, 1, "officially a person"},
},
sch: dtestutils.TypedSchema,
expectedOutput: dropCreateStatement + "\n" +
"INSERT INTO `people` (`id`,`name`,`age`,`is_married`,`title`) " +
@@ -112,7 +111,7 @@ func TestEndToEnd(t *testing.T) {
}
for _, r := range tt.rows {
assert.NoError(t, w.WriteRow(ctx, r))
assert.NoError(t, w.WriteSqlRow(ctx, r))
}
assert.NoError(t, w.Close(ctx))

View File

@@ -25,7 +25,6 @@ import (
"github.com/fatih/color"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
@@ -168,10 +167,6 @@ func (w *FixedWidthTableWriter) WriteColoredRow(ctx context.Context, r sql.Row,
return nil
}
func (w *FixedWidthTableWriter) WriteRow(ctx context.Context, r row.Row) error {
panic("unimplemented")
}
func (w *FixedWidthTableWriter) sampleRow(r sql.Row, colors []*color.Color) (tableRow, error) {
row := tableRow{
columns: make([]string, len(r)),

View File

@@ -66,16 +66,6 @@ func NewRowFromStrings(nbf *types.NomsBinFormat, sch schema.Schema, valStrs []st
return row.New(nbf, sch, taggedVals)
}
// NewRowFromTaggedStrings takes an untyped schema and a map of column tag to string value and returns a row
func NewRowFromTaggedStrings(nbf *types.NomsBinFormat, sch schema.Schema, taggedStrs map[uint64]string) (row.Row, error) {
taggedVals := make(row.TaggedValues)
for tag, valStr := range taggedStrs {
taggedVals[tag] = types.String(valStr)
}
return row.New(nbf, sch, taggedVals)
}
// UntypeSchema takes a schema and returns a schema with the same columns, but with the types of each of those columns
// as types.StringKind
func UntypeSchema(sch schema.Schema) (schema.Schema, error) {
@@ -102,48 +92,6 @@ func UntypeSchema(sch schema.Schema) (schema.Schema, error) {
return newSch, nil
}
// UnkeySchema takes a schema and returns a schema with the same columns and types, but stripped of constraints and
// primary keys. Meant for use in result sets.
func UnkeySchema(sch schema.Schema) (schema.Schema, error) {
var cols []schema.Column
err := sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
col.IsPartOfPK = false
col.Constraints = nil
cols = append(cols, col)
return false, nil
})
if err != nil {
return nil, err
}
colColl := schema.NewColCollection(cols...)
return schema.UnkeyedSchemaFromCols(colColl), nil
}
// UntypeUnkeySchema takes a schema and returns a schema with the same columns, but stripped of constraints and primary
// keys and using only string types. Meant for displaying output and tests.
func UntypeUnkeySchema(sch schema.Schema) (schema.Schema, error) {
var cols []schema.Column
err := sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
col.Kind = types.StringKind
col.IsPartOfPK = false
col.Constraints = nil
col.TypeInfo = typeinfo.StringDefaultType
cols = append(cols, col)
return false, nil
})
if err != nil {
return nil, err
}
colColl := schema.NewColCollection(cols...)
return schema.UnkeyedSchemaFromCols(colColl), nil
}
// UntypedSchemaUnion takes an arbitrary number of schemas and provides the union of all of their key and non-key columns.
// The columns will all be of type types.StringKind and and IsPartOfPK will be false for every column, and all of the
// columns will be in the schemas non-key ColumnCollection. Columns that share tags must have compatible types.

View File

@@ -16,7 +16,6 @@ package edits
import (
"context"
"os"
"github.com/dolthub/dolt/go/store/types"
)
@@ -111,10 +110,3 @@ func (dbea *DiskBackedEditAcc) Close(ctx context.Context) error {
return nil
}
// best effort deletion ignores errors
func tryDeleteFiles(paths []string) {
for _, path := range paths {
_ = os.Remove(path)
}
}

View File

@@ -0,0 +1,161 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"github.com/dolthub/dolt/go/store/hash"
)
// MultiPolygon is a Noms Value wrapper around a string.
type MultiPolygon struct {
SRID uint32
Polygons []Polygon
}
// Value interface
func (v MultiPolygon) Value(ctx context.Context) (Value, error) {
return v, nil
}
func (v MultiPolygon) Equals(other Value) bool {
// Compare types
v2, ok := other.(MultiPolygon)
if !ok {
return false
}
// Compare SRID
if v.SRID != v2.SRID {
return false
}
// Compare lengths of polygons
if len(v.Polygons) != len(v2.Polygons) {
return false
}
// Compare each polygon
for i := 0; i < len(v.Polygons); i++ {
if !v.Polygons[i].Equals(v2.Polygons[i]) {
return false
}
}
return true
}
func (v MultiPolygon) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare types
v2, ok := other.(MultiPolygon)
if !ok {
return MultiPolygonKind < other.Kind(), nil
}
// Compare SRID
if v.SRID != v2.SRID {
return v.SRID < v2.SRID, nil
}
// Get shorter length
var n int
len1 := len(v.Polygons)
len2 := len(v2.Polygons)
if len1 < len2 {
n = len1
} else {
n = len2
}
// Compare each polygon until there is one that is less
for i := 0; i < n; i++ {
if !v.Polygons[i].Equals(v2.Polygons[i]) {
return v.Polygons[i].Less(nbf, v2.Polygons[i])
}
}
// Determine based off length
return len1 < len2, nil
}
func (v MultiPolygon) Hash(nbf *NomsBinFormat) (hash.Hash, error) {
return getHash(v, nbf)
}
func (v MultiPolygon) isPrimitive() bool {
return true
}
func (v MultiPolygon) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
return nil
}
func (v MultiPolygon) typeOf() (*Type, error) {
return PrimitiveTypeMap[MultiPolygonKind], nil
}
func (v MultiPolygon) Kind() NomsKind {
return MultiPolygonKind
}
func (v MultiPolygon) valueReadWriter() ValueReadWriter {
return nil
}
func (v MultiPolygon) writeTo(w nomsWriter, nbf *NomsBinFormat) error {
err := MultiPolygonKind.writeTo(w, nbf)
if err != nil {
return err
}
w.writeString(string(SerializeMultiPolygon(v)))
return nil
}
func readMultiPolygon(nbf *NomsBinFormat, b *valueDecoder) (MultiPolygon, error) {
buf := []byte(b.ReadString())
srid, _, geomType, err := DeserializeEWKBHeader(buf)
if err != nil {
return MultiPolygon{}, err
}
if geomType != WKBMultiPolyID {
return MultiPolygon{}, errors.New("not a multipolygon")
}
buf = buf[EWKBHeaderSize:]
return DeserializeTypesMPoly(buf, false, srid), nil
}
func (v MultiPolygon) readFrom(nbf *NomsBinFormat, b *binaryNomsReader) (Value, error) {
buf := []byte(b.ReadString())
srid, _, geomType, err := DeserializeEWKBHeader(buf)
if err != nil {
return MultiPolygon{}, err
}
if geomType != WKBMultiPolyID {
return MultiPolygon{}, errors.New("not a multipolygon")
}
buf = buf[EWKBHeaderSize:]
return DeserializeTypesMPoly(buf, false, srid), nil
}
func (v MultiPolygon) skip(nbf *NomsBinFormat, b *binaryNomsReader) {
b.skipString()
}
func (v MultiPolygon) HumanReadableString() string {
polys := make([]string, len(v.Polygons))
for i, l := range v.Polygons {
polys[i] = l.HumanReadableString()
}
s := fmt.Sprintf("SRID: %d MULTIPOLYGON(%s)", v.SRID, strings.Join(polys, ","))
return strconv.Quote(s)
}

View File

@@ -105,6 +105,7 @@ func init() {
KindToType[SerialMessageKind] = SerialMessage{}
KindToType[MultiPointKind] = MultiPoint{}
KindToType[MultiLineStringKind] = MultiLineString{}
KindToType[MultiPolygonKind] = MultiPolygon{}
SupportedKinds[BlobKind] = true
SupportedKinds[BoolKind] = true
@@ -135,6 +136,7 @@ func init() {
SupportedKinds[SerialMessageKind] = true
SupportedKinds[MultiPointKind] = true
SupportedKinds[MultiLineStringKind] = true
SupportedKinds[MultiPolygonKind] = true
if serial.MessageTypesKind != int(SerialMessageKind) {
panic("internal error: serial.MessageTypesKind != SerialMessageKind")
@@ -174,6 +176,7 @@ var KindToString = map[NomsKind]string{
SerialMessageKind: "SerialMessage",
MultiPointKind: "MultiPoint",
MultiLineStringKind: "MultiLineString",
MultiPolygonKind: "MultiPolygon",
}
// String returns the name of the kind.

View File

@@ -67,6 +67,14 @@ func ConvertTypesMultiLineStringToSQLMultiLineString(l MultiLineString) sql.Mult
return sql.MultiLineString{SRID: l.SRID, Lines: lines}
}
func ConvertTypesMultiPolygonToSQLMultiPolygon(p MultiPolygon) sql.MultiPolygon {
polys := make([]sql.Polygon, len(p.Polygons))
for i, p := range p.Polygons {
polys[i] = ConvertTypesPolygonToSQLPolygon(p)
}
return sql.MultiPolygon{SRID: p.SRID, Polygons: polys}
}
func ConvertSQLGeometryToTypesGeometry(p interface{}) Value {
switch inner := p.(type) {
case sql.Point:
@@ -79,6 +87,8 @@ func ConvertSQLGeometryToTypesGeometry(p interface{}) Value {
return ConvertSQLMultiPointToTypesMultiPoint(inner)
case sql.MultiLineString:
return ConvertSQLMultiLineStringToTypesMultiLineString(inner)
case sql.MultiPolygon:
return ConvertSQLMultiPolygonToTypesMultiPolygon(inner)
default:
panic("used an invalid type sql.Geometry.Inner")
}
@@ -120,6 +130,14 @@ func ConvertSQLMultiLineStringToTypesMultiLineString(p sql.MultiLineString) Mult
return MultiLineString{SRID: p.SRID, Lines: lines}
}
func ConvertSQLMultiPolygonToTypesMultiPolygon(p sql.MultiPolygon) MultiPolygon {
polys := make([]Polygon, len(p.Polygons))
for i, p := range p.Polygons {
polys[i] = ConvertSQLPolygonToTypesPolygon(p)
}
return MultiPolygon{SRID: p.SRID, Polygons: polys}
}
// TODO: all methods here just defer to their SQL equivalents, and assume we always receive good data
func DeserializeEWKBHeader(buf []byte) (uint32, bool, uint32, error) {
@@ -170,6 +188,14 @@ func DeserializeMLine(buf []byte, isBig bool, srid uint32) sql.MultiLineString {
return p
}
func DeserializeMPoly(buf []byte, isBig bool, srid uint32) sql.MultiPolygon {
p, err := sql.DeserializeMPoly(buf, isBig, srid)
if err != nil {
panic(err)
}
return p
}
// TODO: noms needs results to be in types
func DeserializeTypesPoint(buf []byte, isBig bool, srid uint32) Point {
@@ -191,3 +217,7 @@ func DeserializeTypesMPoint(buf []byte, isBig bool, srid uint32) MultiPoint {
func DeserializeTypesMLine(buf []byte, isBig bool, srid uint32) MultiLineString {
return ConvertSQLMultiLineStringToTypesMultiLineString(DeserializeMLine(buf, isBig, srid))
}
func DeserializeTypesMPoly(buf []byte, isBig bool, srid uint32) MultiPolygon {
return ConvertSQLMultiPolygonToTypesMultiPolygon(DeserializeMPoly(buf, isBig, srid))
}

View File

@@ -52,6 +52,7 @@ type CodecReader interface {
ReadPolygon() (Polygon, error)
ReadMultiPoint() (MultiPoint, error)
ReadMultiLineString() (MultiLineString, error)
ReadMultiPolygon() (MultiPolygon, error)
ReadBlob() (Blob, error)
ReadJSON() (JSON, error)
}
@@ -110,6 +111,10 @@ func (r *valueDecoder) ReadMultiLineString() (MultiLineString, error) {
return readMultiLineString(nil, r)
}
func (r *valueDecoder) ReadMultiPolygon() (MultiPolygon, error) {
return readMultiPolygon(nil, r)
}
func (r *valueDecoder) ReadJSON() (JSON, error) {
return readJSON(r.vrw.Format(), r)
}
@@ -449,11 +454,35 @@ func (r *valueDecoder) readValue(nbf *NomsBinFormat) (Value, error) {
if err != nil {
return nil, err
}
if geomType != WKBPolyID {
if geomType != WKBMultiPointID {
return nil, ErrUnknownType
}
buf = buf[EWKBHeaderSize:]
return DeserializeTypesMPoint(buf, false, srid), nil
case MultiLineStringKind:
r.skipKind()
buf := []byte(r.ReadString())
srid, _, geomType, err := DeserializeEWKBHeader(buf)
if err != nil {
return nil, err
}
if geomType != WKBMultiLineID {
return nil, ErrUnknownType
}
buf = buf[EWKBHeaderSize:]
return DeserializeTypesMLine(buf, false, srid), nil
case MultiPolygonKind:
r.skipKind()
buf := []byte(r.ReadString())
srid, _, geomType, err := DeserializeEWKBHeader(buf)
if err != nil {
return nil, err
}
if geomType != WKBMultiPolyID {
return nil, ErrUnknownType
}
buf = buf[EWKBHeaderSize:]
return DeserializeTypesMPoly(buf, false, srid), nil
case TypeKind:
r.skipKind()
return r.readType()
@@ -515,9 +544,15 @@ func (r *valueDecoder) SkipValue(nbf *NomsBinFormat) error {
case PolygonKind:
r.skipKind()
r.skipString()
case MultiLineStringKind:
r.skipKind()
r.skipString()
case MultiPointKind:
r.skipKind()
r.skipString()
case MultiPolygonKind:
r.skipKind()
r.skipString()
case ListKind:
err := r.skipList(nbf)
if err != nil {

View File

@@ -69,3 +69,7 @@ func SerializeMultiPoint(p MultiPoint) []byte {
func SerializeMultiLineString(p MultiLineString) []byte {
return ConvertTypesMultiLineStringToSQLMultiLineString(p).Serialize()
}
func SerializeMultiPolygon(p MultiPolygon) []byte {
return ConvertTypesMultiPolygonToSQLMultiPolygon(p).Serialize()
}

View File

@@ -113,7 +113,7 @@ wait_for_connection(port=int(port_str), timeout_ms=int(timeout_ms), database=dat
start_sql_server() {
DEFAULT_DB="$1"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --host 0.0.0.0 --port=$PORT --user dolt &
SERVER_PID=$!
wait_for_connection $PORT 5000
@@ -124,7 +124,7 @@ start_sql_server() {
# this func)
start_sql_server_with_args() {
DEFAULT_DB=""
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server "$@" --port=$PORT &
SERVER_PID=$!
wait_for_connection $PORT 5000
@@ -132,7 +132,7 @@ start_sql_server_with_args() {
start_sql_server_with_config() {
DEFAULT_DB="$1"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
echo "
log_level: debug
@@ -155,7 +155,7 @@ behavior:
start_sql_multi_user_server() {
DEFAULT_DB="$1"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
echo "
log_level: debug
@@ -177,7 +177,7 @@ behavior:
start_multi_db_server() {
DEFAULT_DB="$1"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --host 0.0.0.0 --port=$PORT --user dolt --data-dir ./ &
SERVER_PID=$!
wait_for_connection $PORT 5000
@@ -186,15 +186,19 @@ start_multi_db_server() {
# stop_sql_server stops the SQL server. For cases where it's important
# to wait for the process to exit after the kill signal (e.g. waiting
# for an async replication push), pass 1.
# kill the process if it's still running
stop_sql_server() {
wait=$1
if [ ! -z "$SERVER_PID" ]; then
kill $SERVER_PID
if [ $wait ]; then
while ps -p $SERVER_PID > /dev/null; do
sleep .1;
done
fi;
serverpidinuse=$(lsof -i -P -n | grep LISTEN | grep $SERVER_PID | wc -l)
if [ $serverpidinuse -gt 0 ]; then
kill $SERVER_PID
if [ $wait ]; then
while ps -p $SERVER_PID > /dev/null; do
sleep .1;
done
fi;
fi
fi
SERVER_PID=
}
@@ -233,7 +237,7 @@ stop_sql_server() {
# * param7: Expected exception value of 1. Mutually exclusive with param6.
#
server_query() {
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
server_query_with_port "$PORT" "$@"
}
@@ -246,3 +250,16 @@ server_query_with_port() {
echo Executing server_query
python3 -u -c "$PYTHON_QUERY_SCRIPT" -- "$PYTEST_DIR" "$1" "$PORT" "$2" "$3" "$4" "$5" "$6" "$7"
}
definePORT() {
getPORT=""
for i in {0..9}
do
let getPORT="($$ + $i) % (65536-1024) + 1024"
portinuse=$(lsof -i -P -n | grep LISTEN | grep $attemptedPORT | wc -l)
if [ $portinuse -eq 0 ]; then
echo "$getPORT"
break
fi
done
}

View File

@@ -119,8 +119,8 @@ SQL
dolt clone "file://./rem1/newdb" "dbs2/newdb"
# this is a hack: we have to change our persisted global server
# vars for the sql command to work on the replica TODO: fix this
# mess
# vars for the sql command to work on the replica
# TODO: fix this mess
dolt config --global --unset sqlserver.global.dolt_replicate_to_remote
run dolt sql --data-dir=dbs2 -q "use newdb; show tables" -r csv
@@ -129,6 +129,53 @@ SQL
[[ "$output" =~ "new_table" ]] || false
}
@test "replication-multidb: push newly cloned database" {
pushd .
mkdir -p "${TMPDIRS}/rem2"
# push all the dbs to remote1
for i in {1..3}; do
cd "${TMPDIRS}/dbs1/repo${i}"
dolt push remote1 main
# also create a new remote2 for each DB but don't push to it
dolt remote add remote2 "file://../../rem2/repo${i}"
done
popd
dolt config --global --add sqlserver.global.dolt_replicate_to_remote remote2
dolt sql -q "set @@persist.dolt_replication_remote_url_template = 'file://$TMPDIRS/rem2/{database}'"
mkdir -p "${TMPDIRS}/dbs2"
dolt sql --data-dir=dbs2 <<SQL
call dolt_clone('file://${TMPDIRS}/rem1/repo1', 'repo1');
use repo1;
create table new_table (b int primary key);
call dolt_commit('-Am', 'new table');
call dolt_clone('file://${TMPDIRS}/rem1/repo2', 'repo2');
SQL
mkdir -p "${TMPDIRS}/dbs3"
cd $TMPDIRS
dolt clone "file://./rem2/repo1" "dbs3/repo1"
dolt clone "file://./rem2/repo2" "dbs3/repo2"
# this is a hack: we have to change our persisted global server
# vars for the sql command to work on the replica
# TODO: fix this mess
dolt config --global --unset sqlserver.global.dolt_replicate_to_remote
run dolt sql --data-dir=dbs3 -q "use repo1; show tables" -r csv
[ $status -eq 0 ]
[ "${#lines[@]}" -eq 3 ]
[[ "$output" =~ "new_table" ]] || false
run dolt sql --data-dir=dbs3 -q "use repo2; show tables" -r csv
[ $status -eq 0 ]
[ "${#lines[@]}" -eq 2 ]
[[ ! "$output" =~ "new_table" ]] || false
}
@test "replication-multidb: push newly created database with no commits" {
dolt config --global --add sqlserver.global.dolt_replicate_to_remote remote1
dolt sql -q "set @@persist.dolt_replication_remote_url_template = 'file://$TMPDIRS/rem1/{database}'"

View File

@@ -29,7 +29,7 @@ teardown() {
cp "$BATS_TEST_DIRNAME"/../../go/cmd/dolt/commands/sqlserver/testdata/chain_key.pem .
cp "$BATS_TEST_DIRNAME"/../../go/cmd/dolt/commands/sqlserver/testdata/chain_cert.pem .
cp "$BATS_TEST_DIRNAME"/../../go/libraries/utils/jwtauth/gen_keys/test_jwks.json .
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
TOKEN="`cat $BATS_TEST_DIRNAME/../../go/libraries/utils/jwtauth/gen_keys/token.jwt`"
cat >config.yml <<EOF

View File

@@ -55,7 +55,7 @@ teardown() {
@test "sql-privs: default user is root. create new user destroys default user." {
make_test_repo
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --host 0.0.0.0 --port=$PORT &
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
@@ -68,7 +68,7 @@ teardown() {
rm -f .dolt/sql-server.lock
# restarting server
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --host 0.0.0.0 --port=$PORT &
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
@@ -96,7 +96,7 @@ teardown() {
@test "sql-privs: yaml with no user is replaced with command line user" {
make_test_repo
touch server.yaml
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
echo "log_level: debug
@@ -118,7 +118,7 @@ behavior:
@test "sql-privs: yaml with user is also replaced with command line user" {
make_test_repo
touch server.yaml
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
echo "log_level: debug
user:

View File

@@ -53,7 +53,7 @@ teardown() {
cd repo1
dolt sql -q "create user dolt@'%' identified by '123'"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --port=$PORT --user dolt > log.txt 2>&1 &
SERVER_PID=$!
sleep 5
@@ -684,7 +684,7 @@ SQL
skiponwindows "Missing dependencies"
cd repo1
dolt sql -q 'create table test (id int primary key)'
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
cat >config.yml <<EOF
log_level: debug
behavior:
@@ -731,7 +731,7 @@ END""")
skiponwindows "Missing dependencies"
cd repo1
dolt sql -q 'create table test (id int primary key)'
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
cat >config.yml <<EOF
log_level: debug
user:
@@ -1156,7 +1156,7 @@ databases:
@test "sql-server: sql-server locks database" {
cd repo1
start_sql_server
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
run dolt sql-server -P $PORT
[ "$status" -eq 1 ]
}
@@ -1164,7 +1164,7 @@ databases:
@test "sql-server: multi dir sql-server locks out childen" {
start_sql_server
cd repo2
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
run dolt sql-server -P $PORT
[ "$status" -eq 1 ]
}
@@ -1173,7 +1173,7 @@ databases:
cd repo2
start_sql_server
cd ..
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
run dolt sql-server -P $PORT
[ "$status" -eq 1 ]
}
@@ -1183,7 +1183,7 @@ databases:
start_sql_server
server_query repo1 1 dolt "" "create database newdb" ""
cd newdb
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
run dolt sql-server -P $PORT
[ "$status" -eq 1 ]
}
@@ -1204,7 +1204,7 @@ databases:
skiponwindows "unix socket is not available on Windows"
cd repo2
DEFAULT_DB="repo2"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --port $PORT --user dolt >> log.txt 2>&1 &
SERVER_PID=$!
@@ -1228,7 +1228,7 @@ databases:
skiponwindows "unix socket is not available on Windows"
cd repo2
DEFAULT_DB="repo2"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --port $PORT --user dolt --socket > log.txt 2>&1 &
SERVER_PID=$!
@@ -1249,7 +1249,7 @@ databases:
run pwd
REPO_NAME=$output
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --port=$PORT --socket="$REPO_NAME/mysql.sock" --user dolt > log.txt 2>&1 &
SERVER_PID=$!
run wait_for_connection $PORT 5000
@@ -1264,7 +1264,7 @@ databases:
skiponwindows "unix socket is not available on Windows"
cd repo2
DEFAULT_DB="repo2"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
echo "
log_level: debug
@@ -1382,7 +1382,7 @@ s.close()
run dolt init --new-format
[ $status -eq 0 ]
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
dolt sql-server --host 0.0.0.0 --port=$PORT --user dolt &
SERVER_PID=$! # will get killed by teardown_common
sleep 5 # not using python wait so this works on windows
@@ -1420,6 +1420,11 @@ s.close()
run grep "failed to access 'mydb2' database: can no longer find .dolt dir on disk" server_log.txt
[ "${#lines[@]}" -eq 1 ]
# this tests fails sometimes as the server is stopped from the above error
# but stop_sql_server in teardown tries to kill process that is not running anymore,
# so start the server again, and it will be stopped in teardown
start_sql_server
}
@test "sql-server: dropping database that the server is running in should drop only the db itself not its nested dbs" {

View File

@@ -19,7 +19,7 @@ setup() {
dolt sql -q "CREATE TABLE warehouse(warehouse_id int primary key, warehouse_name longtext)"
dolt sql -q "INSERT into warehouse VALUES (1, 'UPS'), (2, 'TV'), (3, 'Table');"
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
USER="dolt"
dolt sql-server --host 0.0.0.0 --port=$PORT --user=$USER --loglevel=trace &
SERVER_PID=$!
@@ -167,3 +167,17 @@ EOF" -m "postgres"
@test "R RMariaDB client" {
Rscript $BATS_TEST_DIRNAME/r/rmariadb-test.r $USER $PORT $REPO_NAME
}
definePORT() {
getPORT=""
for i in {0..9}
do
let getPORT="($$ + $i) % (65536-1024) + 1024"
portinuse=$(lsof -i -P -n | grep LISTEN | grep $attemptedPORT | wc -l)
if [ $portinuse -eq 0 ]
then
echo "$getPORT"
break
fi
done
}

View File

@@ -7,7 +7,7 @@ setup() {
dolt init
let PORT="$$ % (65536-1024) + 1024"
PORT=$( definePORT )
USER="dolt"
dolt sql-server --host 0.0.0.0 --port=$PORT --user=$USER --loglevel=trace &
SERVER_PID=$!