dead code

This commit is contained in:
Zach Musgrave
2026-02-12 16:00:31 -08:00
parent 6d263e143c
commit 6765ca756c
10 changed files with 0 additions and 437 deletions

View File

@@ -22,7 +22,6 @@
package types
import (
"fmt"
"math"
)
@@ -34,53 +33,3 @@ func Round(v Value) Value {
return val
}
}
func Increment(v Value) Value {
switch val := v.(type) {
case Int:
return Int(int64(val) + 1)
case Uint:
return Uint(uint64(val) + 1)
case Float:
return Float(float64(val) + 1)
default:
return val
}
}
func float64IsInt(f float64) bool {
return math.Trunc(f) == f
}
// convert float64 to int64 where f == i * 2^exp
func float64ToIntExp(f float64) (int64, int) {
if math.IsNaN(f) || math.IsInf(f, 0) {
panic(fmt.Errorf("%v is not a supported number", f))
}
if f == 0 {
return 0, 0
}
isNegative := math.Signbit(f)
f = math.Abs(f)
frac, exp := math.Frexp(f)
// frac is [.5, 1)
// Move frac up until it is an integer.
for !float64IsInt(frac) {
frac *= 2
exp--
}
if isNegative {
frac *= -1
}
return int64(frac), exp
}
// fracExpToFloat returns frac * 2 ** exp
func fracExpToFloat(frac int64, exp int) float64 {
return float64(frac) * math.Pow(2, float64(exp))
}

View File

@@ -149,11 +149,3 @@ func (v Geometry) skip(nbf *NomsBinFormat, b *binaryNomsReader) {
func (v Geometry) HumanReadableString() string {
return v.Inner.HumanReadableString()
}
func EncodeGeometryWKB(v Geometry) ([]byte, error) {
wr := &binaryNomsWriter{make([]byte, 128), 0}
if err := v.writeTo(wr, nil); err != nil {
return nil, err
}
return wr.data()[1:], nil // trim NomsKind
}

View File

@@ -94,90 +94,3 @@ func TestIncrementalLoadList(t *testing.T) {
assert.Equal(expectedCount+chunkReads[i], cs.Reads())
}
}
func SkipTestIncrementalLoadSet(t *testing.T) {
assert := assert.New(t)
ts := &chunks.TestStorage{}
cs := ts.NewView()
vs := NewValueStore(cs)
expected, err := NewSet(context.Background(), vs, getTestVals(vs)...)
require.NoError(t, err)
ref, err := vs.WriteValue(context.Background(), expected)
require.NoError(t, err)
refHash := ref.TargetHash()
actualVar, err := vs.ReadValue(context.Background(), refHash)
require.NoError(t, err)
actual := actualVar.(Set)
expectedCount := cs.Reads()
assert.Equal(1, expectedCount)
err = actual.Iter(context.Background(), func(v Value) (bool, error) {
expectedCount += isEncodedOutOfLine(v)
assert.Equal(expectedCount, cs.Reads())
return false, nil
})
require.NoError(t, err)
}
func SkipTestIncrementalLoadMap(t *testing.T) {
assert := assert.New(t)
ts := &chunks.TestStorage{}
cs := ts.NewView()
vs := NewValueStore(cs)
expected, err := NewMap(context.Background(), vs, getTestVals(vs)...)
require.NoError(t, err)
ref, err := vs.WriteValue(context.Background(), expected)
require.NoError(t, err)
refHash := ref.TargetHash()
actualVar, err := vs.ReadValue(context.Background(), refHash)
require.NoError(t, err)
actual := actualVar.(Map)
expectedCount := cs.Reads()
assert.Equal(1, expectedCount)
err = actual.Iter(context.Background(), func(k, v Value) (bool, error) {
expectedCount += isEncodedOutOfLine(k)
expectedCount += isEncodedOutOfLine(v)
assert.Equal(expectedCount, cs.Reads())
return false, nil
})
require.NoError(t, err)
}
func SkipTestIncrementalAddRef(t *testing.T) {
assert := assert.New(t)
ts := &chunks.TestStorage{}
cs := ts.NewView()
vs := NewValueStore(cs)
expectedItem := Float(42)
ref, err := vs.WriteValue(context.Background(), expectedItem)
require.NoError(t, err)
expected, err := NewList(context.Background(), vs, ref)
require.NoError(t, err)
ref, err = vs.WriteValue(context.Background(), expected)
require.NoError(t, err)
actualVar, err := vs.ReadValue(context.Background(), ref.TargetHash())
require.NoError(t, err)
assert.Equal(1, cs.Reads())
assert.True(expected.Equals(actualVar))
actual := actualVar.(List)
actualItem, err := actual.Get(context.Background(), 0)
require.NoError(t, err)
assert.Equal(2, cs.Reads())
assert.True(expectedItem.Equals(actualItem))
// do it again to make sure caching works.
actualItem, err = actual.Get(context.Background(), 0)
require.NoError(t, err)
assert.Equal(2, cs.Reads())
assert.True(expectedItem.Equals(actualItem))
}

View File

@@ -42,16 +42,6 @@ func NewJSONDoc(nbf *NomsBinFormat, vrw ValueReadWriter, value Value) (JSON, err
return JSON{valueImpl{vrw, nbf, w.data(), nil}}, nil
}
func NewTestJSONDoc(nbf *NomsBinFormat, vrw ValueReadWriter, buf []byte) (JSON, error) {
w := newBinaryNomsWriter()
if err := JSONKind.writeTo(&w, nbf); err != nil {
return emptyJSONDoc(nbf), err
}
w.writeString(string(buf))
return JSON{valueImpl{vrw, nbf, w.data(), nil}}, nil
}
// emptyJSONDoc creates and empty JSON value.
func emptyJSONDoc(nbf *NomsBinFormat) JSON {
w := newBinaryNomsWriter()
@@ -148,12 +138,6 @@ func (t JSON) Kind() NomsKind {
return JSONKind
}
func (t JSON) decoderSkipToFields() (valueDecoder, uint64) {
dec := t.decoder()
dec.skipKind()
return dec, uint64(1)
}
// Len implements the Value interface.
func (t JSON) Len() uint64 {
// TODO(andy): is this ever 0?

View File

@@ -26,8 +26,6 @@ import (
"errors"
"fmt"
"golang.org/x/sync/errgroup"
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -98,98 +96,6 @@ func NewMap(ctx context.Context, vrw ValueReadWriter, kv ...Value) (Map, error)
return newMap(seq.(orderedSequence)), nil
}
// NewStreamingMap takes an input channel of values and returns a value that
// will produce a finished Map when |.Wait()| is called. Values sent to the
// input channel must be alternating keys and values. (e.g. k1, v1, k2,
// v2...). Moreover keys need to be added to the channel in Noms sortorder,
// adding key values to the input channel out of order will result in an error.
// Once the input channel is closed by the caller, a finished Map will be
// available from the |Wait| call.
//
// See graph_builder.go for building collections with values that are not in
// order.
func NewStreamingMap(ctx context.Context, vrw ValueReadWriter, kvs <-chan Value) *StreamingMap {
d.PanicIfTrue(vrw == nil)
sm := &StreamingMap{}
sm.eg, sm.egCtx = errgroup.WithContext(ctx)
sm.eg.Go(func() error {
m, err := readMapInput(sm.egCtx, vrw, kvs)
sm.m = m
return err
})
return sm
}
type StreamingMap struct {
eg *errgroup.Group
egCtx context.Context
m Map
}
func (sm *StreamingMap) Wait() (Map, error) {
err := sm.eg.Wait()
return sm.m, err
}
// Done returns a signal channel which is closed once the StreamingMap is no
// longer reading from the key/values channel. A send to the key/value channel
// should be in a select with a read from this channel to ensure that the send
// does not deadlock.
func (sm *StreamingMap) Done() <-chan struct{} {
return sm.egCtx.Done()
}
func readMapInput(ctx context.Context, vrw ValueReadWriter, kvs <-chan Value) (Map, error) {
ch, err := newEmptyMapSequenceChunker(ctx, vrw)
if err != nil {
return EmptyMap, err
}
var lastK Value
nextIsKey := true
var k Value
LOOP:
for {
select {
case v, ok := <-kvs:
if !ok {
break LOOP
}
if nextIsKey {
k = v
if lastK != nil {
isLess, err := lastK.Less(ctx, vrw.Format(), k)
if err != nil {
return EmptyMap, err
}
if !isLess {
return EmptyMap, ErrKeysNotOrdered
}
}
lastK = k
nextIsKey = false
} else {
_, err := ch.Append(ctx, mapEntry{key: k, value: v})
if err != nil {
return EmptyMap, err
}
nextIsKey = true
}
case <-ctx.Done():
return EmptyMap, ctx.Err()
}
}
seq, err := ch.Done(ctx)
if err != nil {
return EmptyMap, err
}
return newMap(seq.(orderedSequence)), nil
}
// Diff computes the diff from |last| to |m| using the top-down algorithm,
// which completes as fast as possible while taking longer to return early
// results than left-to-right.

View File

@@ -39,16 +39,6 @@ type MapIterator interface {
Next(ctx context.Context) (k, v Value, err error)
}
type EmptyMapIterator struct{}
func (mtItr EmptyMapIterator) Next(ctx context.Context) (k, v Value, err error) {
return nil, nil, nil
}
func (mtItr EmptyMapIterator) NextTuple(ctx context.Context) (k, v Tuple, err error) {
return Tuple{}, Tuple{}, io.EOF
}
// mapIterator can efficiently iterate through a Noms Map.
type mapIterator struct {
sequenceIter sequenceIterator
@@ -136,49 +126,3 @@ func (m Map) RangeIterator(ctx context.Context, startIdx, endIdx uint64) (MapTup
return &mapRangeIter{collItr: collItr}, nil
}
// LimitingMapIterator iterates |iter| only returning up to |limit| results.
type LimitingMapIterator struct {
iter MapIterator
limit uint64
cnt uint64
}
var _ MapIterator = (*LimitingMapIterator)(nil)
// NewLimitingMapIterator returns a *LimitingMapIterator.
func NewLimitingMapIterator(iter MapIterator, limit uint64) *LimitingMapIterator {
return &LimitingMapIterator{
iter: iter,
limit: limit,
}
}
// Next implements MapIterator.
func (l *LimitingMapIterator) Next(ctx context.Context) (k, v Value, err error) {
if l.cnt == l.limit {
return nil, nil, nil
}
k, v, err = l.iter.Next(ctx)
if err != nil {
return nil, nil, err
}
if k == nil {
return nil, nil, nil
}
l.cnt++
return
}
// NextTuple implements MapIterator.
func (l *LimitingMapIterator) NextTuple(ctx context.Context) (k, v Tuple, err error) {
if l.cnt == l.limit {
return Tuple{}, Tuple{}, io.EOF
}
k, v, err = l.iter.NextTuple(ctx)
if err != nil {
return Tuple{}, Tuple{}, err
}
l.cnt++
return
}

View File

@@ -27,7 +27,6 @@ import (
"fmt"
"math/rand"
"sort"
"sync"
"testing"
"time"
@@ -322,67 +321,6 @@ func newMapTestSuite(size uint, expectChunkCount int, expectPrependChunkDiff int
}
}
func (suite *mapTestSuite) createStreamingMap(vs *ValueStore) Map {
kvChan := make(chan Value)
streamingMap := NewStreamingMap(context.Background(), vs, kvChan)
for _, entry := range suite.elems.entries.entries {
kvChan <- entry.key
kvChan <- entry.value
}
close(kvChan)
m, err := streamingMap.Wait()
suite.NoError(err)
return m
}
func (suite *mapTestSuite) TestStreamingMap() {
vs := newTestValueStore()
defer vs.Close()
m := suite.createStreamingMap(vs)
suite.True(suite.validate(m), "map not valid")
}
func (suite *mapTestSuite) TestStreamingMapOrder() {
vs := newTestValueStore()
defer vs.Close()
entries := mapEntrySlice{make([]mapEntry, len(suite.elems.entries.entries))}
copy(entries.entries, suite.elems.entries.entries)
entries.entries[0], entries.entries[1] = entries.entries[1], entries.entries[0]
kvChan := make(chan Value, len(entries.entries)*2)
for _, e := range entries.entries {
kvChan <- e.key
kvChan <- e.value
}
close(kvChan)
sm := NewStreamingMap(context.Background(), vs, kvChan)
_, err := sm.Wait()
suite.Assert().EqualError(err, ErrKeysNotOrdered.Error())
}
func (suite *mapTestSuite) TestStreamingMap2() {
wg := sync.WaitGroup{}
vs := newTestValueStore()
defer vs.Close()
wg.Add(2)
var m1, m2 Map
go func() {
m1 = suite.createStreamingMap(vs)
wg.Done()
}()
go func() {
m2 = suite.createStreamingMap(vs)
wg.Done()
}()
wg.Wait()
suite.True(suite.validate(m1), "map 'm1' not valid")
suite.True(suite.validate(m2), "map 'm2' not valid")
}
func TestMapSuite4K(t *testing.T) {
suite.Run(t, newMapTestSuite(12, 5, 2, 2, newNumber))
}

View File

@@ -200,10 +200,6 @@ func DeserializeEWKBHeader(buf []byte) (uint32, bool, uint32, error) {
return types.DeserializeEWKBHeader(buf)
}
func DeserializeWKBHeader(buf []byte) (bool, uint32, error) {
return types.DeserializeWKBHeader(buf)
}
func DeserializePoint(buf []byte, isBig bool, srid uint32) types.Point {
p, _, err := types.DeserializePoint(buf, isBig, srid)
if err != nil {

View File

@@ -171,60 +171,6 @@ type valueReadWriter interface {
valueReadWriter() ValueReadWriter
}
type TupleSlice []Tuple
func (vs TupleSlice) Equals(other TupleSlice) bool {
if len(vs) != len(other) {
return false
}
for i, v := range vs {
if !v.Equals(other[i]) {
return false
}
}
return true
}
func (vs TupleSlice) Contains(nbf *NomsBinFormat, v Tuple) bool {
for _, v := range vs {
if v.Equals(v) {
return true
}
}
return false
}
type TupleSort struct {
Tuples []Tuple
}
func (vs TupleSort) Len() int {
return len(vs.Tuples)
}
func (vs TupleSort) Swap(i, j int) {
vs.Tuples[i], vs.Tuples[j] = vs.Tuples[j], vs.Tuples[i]
}
func (vs TupleSort) Less(ctx context.Context, nbf *NomsBinFormat, i, j int) (bool, error) {
res, err := vs.Tuples[i].TupleCompare(ctx, nbf, vs.Tuples[j])
if err != nil {
return false, err
}
return res < 0, nil
}
func (vs TupleSort) Equals(other TupleSort) bool {
return TupleSlice(vs.Tuples).Equals(other.Tuples)
}
func (vs TupleSort) Contains(nbf *NomsBinFormat, v Tuple) bool {
return TupleSlice(vs.Tuples).Contains(nbf, v)
}
type valueImpl struct {
vrw ValueReadWriter
nbf *NomsBinFormat

View File

@@ -130,11 +130,6 @@ func newTestValueStore() *ValueStore {
return NewValueStore(ts.NewViewWithDefaultFormat())
}
func newTestValueStore_LD_1() *ValueStore {
ts := &chunks.TestStorage{}
return NewValueStore(ts.NewView())
}
// NewMemoryValueStore creates a simple struct that satisfies ValueReadWriter
// and is backed by a chunks.TestStore. Used for dolt operations outside of noms.
func NewMemoryValueStore() *ValueStore {