Merge branch 'main' into james/mysql

This commit is contained in:
James Cor
2022-05-23 13:23:46 -07:00
committed by GitHub
16 changed files with 349 additions and 53 deletions
@@ -56,7 +56,7 @@ func NewEmptyConflictIndex(ctx context.Context, vrw types.ValueReadWriter, oursS
return ConflictIndexFromNomsMap(m, vrw), nil
case types.Format_DOLT_1:
kd, oursVD := prolly.MapDescriptorsFromScheam(oursSch)
kd, oursVD := prolly.MapDescriptorsFromSchema(oursSch)
theirsVD := prolly.ValueDescriptorFromSchema(theirsSch)
baseVD := prolly.ValueDescriptorFromSchema(baseSch)
ns := tree.NewNodeStore(prolly.ChunkStoreFromVRW(vrw))
@@ -127,7 +127,7 @@ func NewEmptyIndex(ctx context.Context, vrw types.ValueReadWriter, sch schema.Sc
return IndexFromNomsMap(m, vrw), nil
case types.Format_DOLT_1:
kd, vd := prolly.MapDescriptorsFromScheam(sch)
kd, vd := prolly.MapDescriptorsFromSchema(sch)
ns := tree.NewNodeStore(prolly.ChunkStoreFromVRW(vrw))
m, err := prolly.NewMapFromTuples(ctx, ns, kd, vd)
if err != nil {
@@ -70,7 +70,7 @@ func NewProllyRowConverter(inSch, outSch schema.Schema) (ProllyRowConverter, err
}
}
kd, vd := prolly.MapDescriptorsFromScheam(inSch)
kd, vd := prolly.MapDescriptorsFromSchema(inSch)
return ProllyRowConverter{
inSchema: inSch,
outSchema: outSch,
@@ -200,7 +200,7 @@ func TestAmbiguousColumnResolution(t *testing.T) {
func TestInsertInto(t *testing.T) {
if types.IsFormat_DOLT_1(types.Format_Default) {
for i := len(queries.InsertScripts) - 1; i >= 0; i-- {
//TODO: test uses keyless foreign key logic which is not yet fully implemented
//TODO: on duplicate key broken for foreign keys in new format
if queries.InsertScripts[i].Name == "Insert on duplicate key" {
queries.InsertScripts = append(queries.InsertScripts[:i], queries.InsertScripts[i+1:]...)
}
@@ -426,6 +426,7 @@ func TestDropDatabase(t *testing.T) {
}
func TestCreateForeignKeys(t *testing.T) {
//TODO: fix table alteration so that foreign keys may work once again
skipNewFormat(t)
enginetest.TestCreateForeignKeys(t, newDoltHarness(t))
}
@@ -435,7 +436,27 @@ func TestDropForeignKeys(t *testing.T) {
}
func TestForeignKeys(t *testing.T) {
skipNewFormat(t)
if types.IsFormat_DOLT_1(types.Format_Default) {
//TODO: fix table alteration so that foreign keys may work once again
skippedQueries := []string{
"ALTER TABLE SET NULL on non-nullable column",
"ALTER TABLE RENAME COLUMN",
"ALTER TABLE MODIFY COLUMN type change not allowed",
"ALTER TABLE MODIFY COLUMN type change allowed when lengthening string",
"ALTER TABLE MODIFY COLUMN type change only cares about foreign key columns",
"DROP COLUMN parent",
"DROP COLUMN child",
"Disallow change column to nullable with ON UPDATE SET NULL",
"Disallow change column to nullable with ON DELETE SET NULL",
}
for i := len(queries.ForeignKeyTests) - 1; i >= 0; i-- {
for _, skippedQuery := range skippedQueries {
if queries.ForeignKeyTests[i].Name == skippedQuery {
queries.ForeignKeyTests = append(queries.ForeignKeyTests[:i], queries.ForeignKeyTests[i+1:]...)
}
}
}
}
enginetest.TestForeignKeys(t, newDoltHarness(t))
}
@@ -829,7 +850,6 @@ func TestPersist(t *testing.T) {
}
func TestKeylessUniqueIndex(t *testing.T) {
skipNewFormat(t)
harness := newDoltHarness(t)
enginetest.TestKeylessUniqueIndex(t, harness)
}
@@ -912,6 +932,14 @@ func TestScriptsPrepared(t *testing.T) {
func TestInsertScriptsPrepared(t *testing.T) {
skipPreparedTests(t)
if types.IsFormat_DOLT_1(types.Format_Default) {
for i := len(queries.InsertScripts) - 1; i >= 0; i-- {
//TODO: on duplicate key broken for foreign keys in new format
if queries.InsertScripts[i].Name == "Insert on duplicate key" {
queries.InsertScripts = append(queries.InsertScripts[:i], queries.InsertScripts[i+1:]...)
}
}
}
enginetest.TestInsertScriptsPrepared(t, newDoltHarness(t))
}
@@ -963,11 +991,9 @@ func TestPrepared(t *testing.T) {
}
func TestPreparedInsert(t *testing.T) {
//TODO: on duplicate key broken for foreign keys in new format
skipNewFormat(t)
skipPreparedTests(t)
if types.IsFormat_DOLT_1(types.Format_Default) {
//TODO: test uses keyless foreign key logic which is not yet fully implemented
t.Skip("test uses keyless foreign key logic which is not yet fully implemented")
}
enginetest.TestPreparedInsert(t, newDoltHarness(t))
}
@@ -75,6 +75,8 @@ func GetField(td val.TupleDesc, i int, tup val.Tuple) (v interface{}, err error)
if ok {
v = deserializeGeometry(buf)
}
case val.Hash128Enc:
v, ok = td.GetHash128(i, tup)
default:
panic("unknown val.encoding")
}
@@ -135,6 +137,8 @@ func PutField(tb *val.TupleBuilder, i int, v interface{}) error {
return err
}
tb.PutJSON(i, buf)
case val.Hash128Enc:
tb.PutHash128(i, v.([]byte))
default:
panic(fmt.Sprintf("unknown encoding %v %v", enc, v))
}
@@ -22,6 +22,7 @@ import (
"golang.org/x/sync/errgroup"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/val"
)
@@ -32,6 +33,7 @@ type prollyIndexIter struct {
idx DoltIndex
indexIter prolly.MapIter
primary prolly.Map
keyless bool
// pkMap transforms indexRows index keys
// into primary index keys
@@ -61,7 +63,8 @@ func newProllyIndexIter(ctx *sql.Context, idx DoltIndex, rng prolly.Range, dprim
kd, _ := primary.Descriptors()
pkBld := val.NewTupleBuilder(kd)
pkMap := ordinalMappingFromIndex(idx)
km, vm := projectionMappings(idx.Schema(), idx.Schema().GetAllCols().GetColumnNames())
sch := idx.Schema()
km, vm := projectionMappings(sch, sch.GetAllCols().GetColumnNames())
eg, c := errgroup.WithContext(ctx)
@@ -69,6 +72,7 @@ func newProllyIndexIter(ctx *sql.Context, idx DoltIndex, rng prolly.Range, dprim
idx: idx,
indexIter: indexIter,
primary: primary,
keyless: schema.IsKeyless(sch),
pkBld: pkBld,
pkMap: pkMap,
eg: eg,
@@ -113,6 +117,12 @@ func (p prollyIndexIter) Next2(ctx *sql.Context, frame *sql.RowFrame) error {
func (p prollyIndexIter) queueRows(ctx context.Context) error {
defer close(p.rowChan)
// Keyless rows have hash and cardinality values which will not be included, but are a part of the keyMap/valMap
rLen := len(p.keyMap) + len(p.valMap)
if p.keyless {
rLen -= 2
}
for {
idxKey, _, err := p.indexIter.Next(ctx)
if err != nil {
@@ -125,7 +135,7 @@ func (p prollyIndexIter) queueRows(ctx context.Context) error {
}
pk := p.pkBld.Build(sharePool)
r := make(sql.Row, len(p.keyMap)+len(p.valMap))
r := make(sql.Row, rLen)
err = p.primary.Get(ctx, pk, func(key, value val.Tuple) error {
return p.rowFromTuples(key, value, r)
})
@@ -182,6 +192,12 @@ func ordinalMappingFromIndex(idx DoltIndex) (m val.OrdinalMapping) {
def := idx.Schema().Indexes().GetByName(idx.ID())
pks := def.PrimaryKeyTags()
if len(pks) == 0 { // keyless index
m = make(val.OrdinalMapping, 1)
m[0] = len(def.AllTags())
return m
}
m = make(val.OrdinalMapping, len(pks))
for i, pk := range pks {
@@ -55,20 +55,16 @@ func (n prollyFkIndexer) Partitions(ctx *sql.Context) (sql.PartitionIter, error)
// PartitionRows implements the interface sql.Table.
func (n prollyFkIndexer) PartitionRows(ctx *sql.Context, _ sql.Partition) (sql.RowIter, error) {
var idxWriter prollyIndexWriter
var idxWriter indexWriter
for _, secondaryWriter := range n.writer.secondary {
if secondaryWriter.name == n.index.ID() {
if secondaryWriter.Name() == n.index.ID() {
idxWriter = secondaryWriter
break
}
}
if idxWriter.name == "" {
if idxWriter == nil {
return nil, fmt.Errorf("unable to find writer for index `%s`", n.index.ID())
}
rangeIter, err := idxWriter.mut.IterRange(ctx, n.pRange)
if err != nil {
return nil, err
}
idxToPkMap := make(map[int]int)
pkColToOrdinal := make(map[int]int)
@@ -82,12 +78,20 @@ func (n prollyFkIndexer) PartitionRows(ctx *sql.Context, _ sql.Partition) (sql.R
}
if primary, ok := n.writer.primary.(prollyIndexWriter); ok {
rangeIter, err := idxWriter.(prollyIndexWriter).mut.IterRange(ctx, n.pRange)
if err != nil {
return nil, err
}
return &prollyFkPkRowIter{
rangeIter: rangeIter,
idxToPkMap: idxToPkMap,
primary: primary,
}, nil
} else {
rangeIter, err := idxWriter.(prollyKeylessSecondaryWriter).mut.IterRange(ctx, n.pRange)
if err != nil {
return nil, err
}
return &prollyFkKeylessRowIter{
rangeIter: rangeIter,
primary: n.writer.primary.(prollyKeylessWriter),
@@ -161,9 +165,12 @@ func (iter prollyFkKeylessRowIter) Next(ctx *sql.Context) (sql.Row, error) {
if k == nil {
return nil, io.EOF
}
hashId := k.GetField(k.Count() - 1)
iter.primary.valBld.PutHash128(0, hashId)
primaryKey := iter.primary.valBld.Build(sharePool)
nextRow := make(sql.Row, len(iter.primary.valMap))
err = iter.primary.mut.Get(ctx, k, func(tblKey, tblVal val.Tuple) error {
err = iter.primary.mut.Get(ctx, primaryKey, func(tblKey, tblVal val.Tuple) error {
for from := range iter.primary.valMap {
to := iter.primary.valMap.MapOrdinal(from)
if nextRow[to], err = index.GetField(iter.primary.valBld.Desc, from+1, tblVal); err != nil {
@@ -48,10 +48,10 @@ func getPrimaryProllyWriter(ctx context.Context, t *doltdb.Table, sqlSch sql.Sch
}, nil
}
func getKeylessProllyWriter(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema) (indexWriter, error) {
func getPrimaryKeylessProllyWriter(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema) (prollyKeylessWriter, error) {
idx, err := t.GetRowData(ctx)
if err != nil {
return prollyIndexWriter{}, err
return prollyKeylessWriter{}, err
}
m := durable.ProllyMapFromIndex(idx)
@@ -67,6 +67,7 @@ func getKeylessProllyWriter(ctx context.Context, t *doltdb.Table, sqlSch sql.Sch
}
type indexWriter interface {
Name() string
Map(ctx context.Context) (prolly.Map, error)
Insert(ctx context.Context, sqlRow sql.Row) error
Delete(ctx context.Context, sqlRow sql.Row) error
@@ -90,6 +91,10 @@ type prollyIndexWriter struct {
var _ indexWriter = prollyIndexWriter{}
func (m prollyIndexWriter) Name() string {
return m.name
}
func (m prollyIndexWriter) Map(ctx context.Context) (prolly.Map, error) {
return m.mut.Map(ctx)
}
@@ -239,6 +244,10 @@ type prollyKeylessWriter struct {
var _ indexWriter = prollyKeylessWriter{}
func (k prollyKeylessWriter) Name() string {
return k.name
}
func (k prollyKeylessWriter) Map(ctx context.Context) (prolly.Map, error) {
return k.mut.Map(ctx)
}
@@ -340,3 +349,123 @@ func (k prollyKeylessWriter) tuplesFromRow(sqlRow sql.Row) (hashId, value val.Tu
hashId = val.HashTupleFromValue(sharePool, value)
return
}
type prollyKeylessSecondaryWriter struct {
name string
mut prolly.MutableMap
primary prollyKeylessWriter
unique bool
keyBld *val.TupleBuilder
keyMap val.OrdinalMapping
valBld *val.TupleBuilder
valMap val.OrdinalMapping
}
var _ indexWriter = prollyKeylessSecondaryWriter{}
// Name implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) Name() string {
return writer.name
}
// Map implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) Map(ctx context.Context) (prolly.Map, error) {
return writer.mut.Map(ctx)
}
// Insert implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) Insert(ctx context.Context, sqlRow sql.Row) error {
for to := range writer.keyMap {
from := writer.keyMap.MapOrdinal(to)
if err := index.PutField(writer.keyBld, to, sqlRow[from]); err != nil {
return err
}
}
hashId, _, err := writer.primary.tuplesFromRow(sqlRow)
if err != nil {
return err
}
writer.keyBld.PutHash128(len(writer.keyBld.Desc.Types)-1, hashId.GetField(0))
indexKey := writer.keyBld.Build(sharePool)
ok, err := writer.mut.Has(ctx, indexKey)
if err != nil {
return err
} else if ok {
if writer.unique {
return sql.ErrUniqueKeyViolation.New()
}
return nil
}
return writer.mut.Put(ctx, indexKey, val.EmptyTuple)
}
// Delete implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) Delete(ctx context.Context, sqlRow sql.Row) error {
hashId, cardRow, err := writer.primary.tuplesFromRow(sqlRow)
if err != nil {
return err
}
err = writer.primary.mut.Get(ctx, hashId, func(k, v val.Tuple) (err error) {
if k != nil {
cardRow = v
}
return
})
if err != nil {
return err
}
for to := range writer.keyMap {
from := writer.keyMap.MapOrdinal(to)
if err := index.PutField(writer.keyBld, to, sqlRow[from]); err != nil {
return err
}
}
writer.keyBld.PutHash128(len(writer.keyBld.Desc.Types)-1, hashId.GetField(0))
indexKey := writer.keyBld.Build(sharePool)
// Indexes are always updated before the primary table, so we check if the deletion will cause the row to be removed
// from the primary. If not, then we just return.
card := val.ReadKeylessCardinality(cardRow)
if card > 1 {
return nil
}
return writer.mut.Delete(ctx, indexKey)
}
// Update implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) Update(ctx context.Context, oldRow sql.Row, newRow sql.Row) (err error) {
if err = writer.Delete(ctx, oldRow); err != nil {
return err
}
if err = writer.Insert(ctx, newRow); err != nil {
return err
}
return
}
// Commit implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) Commit(ctx context.Context) error {
return writer.mut.ApplyPending(ctx)
}
// Discard implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) Discard(ctx context.Context) error {
writer.mut.DiscardPending(ctx)
return nil
}
// HasEdits implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) HasEdits(ctx context.Context) bool {
return writer.mut.HasEdits()
}
// UniqueKeyError implements the interface indexWriter.
func (writer prollyKeylessSecondaryWriter) UniqueKeyError(ctx context.Context, sqlRow sql.Row) error {
//TODO: figure out what should go here
return fmt.Errorf("keyless index does not yet know how to handle unique key errors")
}
@@ -26,6 +26,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/val"
)
@@ -36,7 +37,7 @@ type prollyTableWriter struct {
dbName string
primary indexWriter
secondary []prollyIndexWriter
secondary []indexWriter
tbl *doltdb.Table
sch schema.Schema
@@ -52,14 +53,14 @@ type prollyTableWriter struct {
var _ TableWriter = &prollyTableWriter{}
func getSecondaryProllyIndexWriters(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema) ([]prollyIndexWriter, error) {
func getSecondaryProllyIndexWriters(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema) ([]indexWriter, error) {
s, err := t.GetIndexSet(ctx)
if err != nil {
return nil, err
}
definitions := sch.Indexes().AllIndexes()
writers := make([]prollyIndexWriter, len(definitions))
writers := make([]indexWriter, len(definitions))
for i, def := range definitions {
idxRows, err := s.GetIndex(ctx, sch, def.Name())
@@ -84,6 +85,41 @@ func getSecondaryProllyIndexWriters(ctx context.Context, t *doltdb.Table, sqlSch
return writers, nil
}
func getSecondaryKeylessProllyWriters(ctx context.Context, t *doltdb.Table, sqlSch sql.Schema, sch schema.Schema, primary prollyKeylessWriter) ([]indexWriter, error) {
s, err := t.GetIndexSet(ctx)
if err != nil {
return nil, err
}
definitions := sch.Indexes().AllIndexes()
writers := make([]indexWriter, len(definitions))
for i, def := range definitions {
idxRows, err := s.GetIndex(ctx, sch, def.Name())
if err != nil {
return nil, err
}
m := durable.ProllyMapFromIndex(idxRows)
m = prolly.ConvertToKeylessIndex(m)
keyMap, valMap := ordinalMappingsFromSchema(sqlSch, def.Schema())
keyDesc, valDesc := m.Descriptors()
writers[i] = prollyKeylessSecondaryWriter{
name: def.Name(),
mut: m.Mutate(),
primary: primary,
unique: def.IsUnique(),
keyBld: val.NewTupleBuilder(keyDesc),
keyMap: keyMap,
valBld: val.NewTupleBuilder(valDesc),
valMap: valMap,
}
}
return writers, nil
}
// Insert implements TableWriter.
func (w *prollyTableWriter) Insert(ctx *sql.Context, sqlRow sql.Row) error {
for _, wr := range w.secondary {
@@ -207,17 +243,25 @@ func (w *prollyTableWriter) Reset(ctx context.Context, sess *prollyWriteSession,
}
aiCol := autoIncrementColFromSchema(sch)
var newPrimary indexWriter
var newSecondaries []indexWriter
if _, ok := w.primary.(prollyKeylessWriter); ok {
newPrimary, err = getKeylessProllyWriter(ctx, tbl, sqlSch.Schema, sch)
newPrimary, err = getPrimaryKeylessProllyWriter(ctx, tbl, sqlSch.Schema, sch)
if err != nil {
return err
}
newSecondaries, err = getSecondaryKeylessProllyWriters(ctx, tbl, sqlSch.Schema, sch, newPrimary.(prollyKeylessWriter))
if err != nil {
return err
}
} else {
newPrimary, err = getPrimaryProllyWriter(ctx, tbl, sqlSch.Schema, sch)
}
if err != nil {
return err
}
newSecondaries, err := getSecondaryProllyIndexWriters(ctx, tbl, sqlSch.Schema, sch)
if err != nil {
return err
if err != nil {
return err
}
newSecondaries, err = getSecondaryProllyIndexWriters(ctx, tbl, sqlSch.Schema, sch)
if err != nil {
return err
}
}
w.tbl = tbl
@@ -233,12 +277,12 @@ func (w *prollyTableWriter) Reset(ctx context.Context, sess *prollyWriteSession,
func (w *prollyTableWriter) table(ctx context.Context) (t *doltdb.Table, err error) {
// flush primary row storage
m, err := w.primary.Map(ctx)
pm, err := w.primary.Map(ctx)
if err != nil {
return nil, err
}
t, err = w.tbl.UpdateRows(ctx, durable.IndexFromProllyMap(m))
t, err = w.tbl.UpdateRows(ctx, durable.IndexFromProllyMap(pm))
if err != nil {
return nil, err
}
@@ -249,14 +293,14 @@ func (w *prollyTableWriter) table(ctx context.Context) (t *doltdb.Table, err err
return nil, err
}
for _, wr := range w.secondary {
m, err := wr.mut.Map(ctx)
for _, wrSecondary := range w.secondary {
sm, err := wrSecondary.Map(ctx)
if err != nil {
return nil, err
}
idx := durable.IndexFromProllyMap(m)
idx := durable.IndexFromProllyMap(sm)
s, err = s.PutIndex(ctx, wr.name, idx)
s, err = s.PutIndex(ctx, wrSecondary.Name(), idx)
if err != nil {
return nil, err
}
@@ -66,18 +66,25 @@ func (s *prollyWriteSession) GetTableWriter(ctx context.Context, table, db strin
autoCol := autoIncrementColFromSchema(sch)
var pw indexWriter
var sws []indexWriter
if schema.IsKeyless(sch) {
pw, err = getKeylessProllyWriter(ctx, t, pkSch.Schema, sch)
pw, err = getPrimaryKeylessProllyWriter(ctx, t, pkSch.Schema, sch)
if err != nil {
return nil, err
}
sws, err = getSecondaryKeylessProllyWriters(ctx, t, pkSch.Schema, sch, pw.(prollyKeylessWriter))
if err != nil {
return nil, err
}
} else {
pw, err = getPrimaryProllyWriter(ctx, t, pkSch.Schema, sch)
}
if err != nil {
return nil, err
}
sws, err := getSecondaryProllyIndexWriters(ctx, t, pkSch.Schema, sch)
if err != nil {
return nil, err
if err != nil {
return nil, err
}
sws, err = getSecondaryProllyIndexWriters(ctx, t, pkSch.Schema, sch)
if err != nil {
return nil, err
}
}
twr := &prollyTableWriter{
+26 -5
View File
@@ -20,6 +20,7 @@ import (
"github.com/dolthub/vitess/go/vt/proto/query"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/prolly/tree"
@@ -49,7 +50,7 @@ func MapFromValue(v types.Value, sch schema.Schema, vrw types.ValueReadWriter) M
func ConflictMapFromValue(v types.Value, ourSchema, theirSchema, baseSchema schema.Schema, vrw types.ValueReadWriter) ConflictMap {
root := NodeFromValue(v)
kd, ourVD := MapDescriptorsFromScheam(ourSchema)
kd, ourVD := MapDescriptorsFromSchema(ourSchema)
theirVD := ValueDescriptorFromSchema(theirSchema)
baseVD := ValueDescriptorFromSchema(baseSchema)
ns := tree.NewNodeStore(ChunkStoreFromVRW(vrw))
@@ -66,7 +67,7 @@ func ChunkStoreFromVRW(vrw types.ValueReadWriter) chunks.ChunkStore {
panic("unknown ValueReadWriter")
}
func MapDescriptorsFromScheam(sch schema.Schema) (kd, vd val.TupleDesc) {
func MapDescriptorsFromSchema(sch schema.Schema) (kd, vd val.TupleDesc) {
kd = KeyDescriptorFromSchema(sch)
vd = ValueDescriptorFromSchema(sch)
return
@@ -80,7 +81,7 @@ func KeyDescriptorFromSchema(sch schema.Schema) val.TupleDesc {
var tt []val.Type
_ = sch.GetPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
tt = append(tt, val.Type{
Enc: encodingFromSqlType(col.TypeInfo.ToSqlType().Type()),
Enc: encodingFromTypeInfo(col.TypeInfo),
Nullable: columnNullable(col),
})
return
@@ -105,7 +106,7 @@ func ValueDescriptorFromSchema(sch schema.Schema) val.TupleDesc {
_ = sch.GetNonPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
tt = append(tt, val.Type{
Enc: encodingFromSqlType(col.TypeInfo.ToSqlType().Type()),
Enc: encodingFromTypeInfo(col.TypeInfo),
Nullable: col.IsNullable(),
})
return
@@ -113,8 +114,28 @@ func ValueDescriptorFromSchema(sch schema.Schema) val.TupleDesc {
return val.NewTupleDescriptor(tt...)
}
// ConvertToKeylessIndex converts the given map to a keyless index map.
func ConvertToKeylessIndex(m Map) Map {
newTypes := make([]val.Type, len(m.keyDesc.Types)+1)
copy(newTypes, m.keyDesc.Types)
newTypes[len(newTypes)-1] = val.Type{Enc: val.Hash128Enc}
newKeyDesc := val.NewTupleDescriptorWithComparator(m.keyDesc.Comparator(), newTypes...)
newTuples := m.tuples
newTuples.order = newKeyDesc
return Map{
tuples: newTuples,
keyDesc: newKeyDesc,
valDesc: m.valDesc,
}
}
// todo(andy): move this to typeinfo
func encodingFromSqlType(typ query.Type) val.Encoding {
func encodingFromTypeInfo(ti typeinfo.TypeInfo) val.Encoding {
if ti.GetTypeIdentifier() == typeinfo.UuidTypeIdentifier {
return val.Hash128Enc
}
typ := ti.ToSqlType().Type()
// todo(andy): replace temp encodings
switch typ {
case query.Type_DECIMAL:
+4
View File
@@ -227,6 +227,10 @@ func randomField(tb *val.TupleBuilder, idx int, typ val.Type) {
buf := make([]byte, (testRand.Int63()%40)+10)
testRand.Read(buf)
tb.PutByteString(idx, buf)
case val.Hash128Enc:
buf := make([]byte, 16)
testRand.Read(buf)
tb.PutHash128(idx, buf)
default:
panic("unknown encoding")
}
+16
View File
@@ -127,6 +127,8 @@ func sizeFromType(t Type) (ByteSize, bool) {
return timestampSize, true
case YearEnc:
return int16Size, true
case Hash128Enc:
return hash128Size, true
default:
return 0, false
}
@@ -406,6 +408,20 @@ func compareByteString(l, r []byte) int {
return bytes.Compare(l, r)
}
func readHash128(val []byte) []byte {
expectSize(val, hash128Size)
return val
}
func writeHash128(buf, val []byte) {
expectSize(buf, hash128Size)
copy(buf, val)
}
func compareHash128(l, r []byte) int {
return bytes.Compare(l, r)
}
func writeRaw(buf, val []byte) {
expectSize(buf, ByteSize(len(val)))
copy(buf, val)
+8
View File
@@ -227,6 +227,14 @@ func (tb *TupleBuilder) PutGeometry(i int, v []byte) {
tb.pos += sz
}
// PutHash128 writes a hash128 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutHash128(i int, v []byte) {
tb.Desc.expectEncoding(i, Hash128Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+hash128Size]
writeHash128(tb.fields[i], v)
tb.pos += hash128Size
}
// PutRaw writes a []byte to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutRaw(i int, buf []byte) {
if buf == nil {
+2
View File
@@ -103,6 +103,8 @@ func compare(typ Type, left, right []byte) int {
return compareString(readString(left), readString(right))
case ByteStringEnc:
return compareByteString(readByteString(left), readByteString(right))
case Hash128Enc:
return compareHash128(readHash128(left), readHash128(right))
default:
panic("unknown encoding")
}
+12
View File
@@ -331,6 +331,16 @@ func (td TupleDesc) GetGeometry(i int, tup Tuple) (v []byte, ok bool) {
return
}
func (td TupleDesc) GetHash128(i int, tup Tuple) (v []byte, ok bool) {
td.expectEncoding(i, Hash128Enc)
b := td.GetField(i, tup)
if b != nil {
v = b
ok = true
}
return
}
func (td TupleDesc) expectEncoding(i int, encodings ...Encoding) {
for _, enc := range encodings {
if enc == td.Types[i].Enc {
@@ -402,6 +412,8 @@ func (td TupleDesc) FormatValue(i int, value []byte) string {
return readString(value)
case ByteStringEnc:
return string(value)
case Hash128Enc:
return string(value)
default:
return string(value)
}