[ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh

This commit is contained in:
coffeegoddd
2022-08-05 17:21:26 +00:00
parent 60fd6472f8
commit b68814c796
37 changed files with 400 additions and 337 deletions

View File

@@ -16,15 +16,15 @@
//
// cli provides:
//
// * the interface for creating and managing hierarchical dolt commands. These typically have command lines that look like:
// app command [<options>]
// app command subcommand [<options>]
// app command subcommand1 subcommand2 [<options>]
// etc.
// - the interface for creating and managing hierarchical dolt commands. These typically have command lines that look like:
// app command [<options>]
// app command subcommand [<options>]
// app command subcommand1 subcommand2 [<options>]
// etc.
//
// * Command help and usage printing
// - Command help and usage printing
//
// * The interface for writing output to the user
// - The interface for writing output to the user
//
// * Argument parsing utility methods
// - Argument parsing utility methods
package cli

View File

@@ -32,16 +32,19 @@ var ErrZeroCardinality = fmt.Errorf("read row with zero cardinality")
// keylessRow is a Row without PRIMARY_KEY fields
//
// key: Tuple(
// Uint(schema.KeylessRowIdTag),
// UUID(hash.Of(tag1, val1, ..., tagN, valN))
// )
//
// Uint(schema.KeylessRowIdTag),
// UUID(hash.Of(tag1, val1, ..., tagN, valN))
// )
//
// val: Tuple(
// Uint(schema.KeylessRowCardinalityTag),
// Uint(cardinality),
// Uint(tag1), Value(val1),
// ...
// Uint(tagN), Value(valN)
// )
//
// Uint(schema.KeylessRowCardinalityTag),
// Uint(cardinality),
// Uint(tag1), Value(val1),
// ...
// Uint(tagN), Value(valN)
// )
type keylessRow struct {
key types.Tuple
val types.Tuple

View File

@@ -400,7 +400,7 @@ func (p DoltDatabaseProvider) DropDatabase(ctx *sql.Context, name string) error
return nil
}
//TODO: databaseForRevision should call checkout on the given branch/commit, returning a non-mutable session
// TODO: databaseForRevision should call checkout on the given branch/commit, returning a non-mutable session
// only if a non-branch revspec was indicated.
func (p DoltDatabaseProvider) databaseForRevision(ctx *sql.Context, revDB string) (sql.Database, dsess.InitialDbState, bool, error) {
if !strings.Contains(revDB, dbRevisionDelimiter) {

View File

@@ -236,19 +236,18 @@ func WriteCSVRow(wr *bufio.Writer, record []*string, delim string, useCRLF bool)
// Below is the method comment from csv.Writer.fieldNeedsQuotes. It is relevant
// to Dolt's quoting logic for NULLs and ""s, and for import/export compatibility
//
// fieldNeedsQuotes reports whether our field must be enclosed in quotes.
// Fields with a Comma, fields with a quote or newline, and
// fields which start with a space must be enclosed in quotes.
// We used to quote empty strings, but we do not anymore (as of Go 1.4).
// The two representations should be equivalent, but Postgres distinguishes
// quoted vs non-quoted empty string during database imports, and it has
// an option to force the quoted behavior for non-quoted CSV but it has
// no option to force the non-quoted behavior for quoted CSV, making
// CSV with quoted empty strings strictly less useful.
// Not quoting the empty string also makes this package match the behavior
// of Microsoft Excel and Google Drive.
// For Postgres, quote the data terminating string `\.`.
//
// fieldNeedsQuotes reports whether our field must be enclosed in quotes.
// Fields with a Comma, fields with a quote or newline, and
// fields which start with a space must be enclosed in quotes.
// We used to quote empty strings, but we do not anymore (as of Go 1.4).
// The two representations should be equivalent, but Postgres distinguishes
// quoted vs non-quoted empty string during database imports, and it has
// an option to force the quoted behavior for non-quoted CSV but it has
// no option to force the non-quoted behavior for quoted CSV, making
// CSV with quoted empty strings strictly less useful.
// Not quoting the empty string also makes this package match the behavior
// of Microsoft Excel and Google Drive.
// For Postgres, quote the data terminating string `\.`.
func fieldNeedsQuotes(field *string, delim string) bool {
if field != nil && *field == "" {
// special Dolt logic

View File

@@ -98,7 +98,7 @@ func newEventsDataDir(fs filesys.Filesys, homeDir string, doltDir string) *event
return &eventsDataDir{fs: fs, path: path}
}
// MakeEventsDir creates a new events data dir in the main dolt dir
// MakeEventsDir creates a new events data dir in the main dolt dir
func (evd *eventsDataDir) MakeEventsDir() error {
if exists, _ := evd.fs.Exists(evd.path); !exists {
if err := evd.fs.MkDirs(evd.path); err != nil {

View File

@@ -24,7 +24,7 @@ import (
"github.com/google/uuid"
)
//OpenCommitEditor allows user to write/edit commit message in temporary file
// OpenCommitEditor allows user to write/edit commit message in temporary file
func OpenCommitEditor(ed string, initialContents string) (string, error) {
filename := filepath.Join(os.TempDir(), uuid.New().String())
err := os.WriteFile(filename, []byte(initialContents), os.ModePerm)

View File

@@ -18,22 +18,29 @@ Package serial defines flatbuffer tables used to persist Dolt data structures.
The StoreRoot is the tip of a database hierarchy, Nodes are the basic recursive
tree type for encoding data, and supportive metadata types like ForeignKey,
TableSchema, Column, ...etc are stored inline. In order of decreasing hierarchy:
- StoreRoot is the tip of a database. Contains a map from dataset name to HEAD
rootish in the form of an AddressMap
- ex:
main -> abcdefghij0123456789
feature -> abcdefghij0123456789
- ex:
main -> abcdefghij0123456789
feature -> abcdefghij0123456789
- An AddressMap is itself a prolly tree (see NodeStore above) that can contains
a name->rootish mapping of arbitrary size
- A Rootish is informally a RootValue hash (like a working or staging hash), a
Commit hash (that points to a root value hash), or a Tag (which points to a
commit, and subsequently a root value hash).
- refer to tag.fbs, workingset.fbs, commit.fbs for details
- refer to tag.fbs, workingset.fbs, commit.fbs for details
- A RootValue is a static database version: tables, foreign keys, and a schema.
Refer to rootvalue.fbs for details.
- Schema encodes columns, the primary index, a secondary index, and check
constraints, all inline as metadata.
- Table is currently a wrapper for address references to the clustered index
tree, secondary indexes trees, autoincrement values, and conflict/violations
associated with a table.

View File

@@ -151,8 +151,8 @@ func (r *Resolver) GetChunkStore(ctx context.Context, str string) (chunks.ChunkS
}
// Resolve string to a dataset. If a config is present,
// - if no db prefix is present, assume the default db
// - if the db prefix is an alias, replace it
// - if no db prefix is present, assume the default db
// - if the db prefix is an alias, replace it
func (r *Resolver) GetDataset(ctx context.Context, str string) (datas.Database, types.ValueReadWriter, datas.Dataset, error) {
specStr, dbc := r.ResolvePathSpecAndGetDbConfig(str)
sp, err := spec.ForDatasetOpts(r.verbose(ctx, str, specStr), specOptsForConfig(r.config, dbc))
@@ -163,8 +163,8 @@ func (r *Resolver) GetDataset(ctx context.Context, str string) (datas.Database,
}
// Resolve string to a value path. If a config is present,
// - if no db spec is present, assume the default db
// - if the db spec is an alias, replace it
// - if no db spec is present, assume the default db
// - if the db spec is an alias, replace it
func (r *Resolver) GetPath(ctx context.Context, str string) (datas.Database, types.ValueReadWriter, types.Value, error) {
specStr, dbc := r.ResolvePathSpecAndGetDbConfig(str)
sp, err := spec.ForPathOpts(r.verbose(ctx, str, specStr), specOptsForConfig(r.config, dbc))

View File

@@ -107,13 +107,15 @@ var valueCommitType = nomdl.MustParseType(`Struct Commit {
// A commit has the following type:
//
// ```
// struct Commit {
// meta: M,
// parents: Set<Ref<Cycle<Commit>>>,
// parentsList: List<Ref<Cycle<Commit>>>,
// parentsClosure: Ref<Value>, // Map<Tuple,List<Ref<Value>>>,
// value: T,
// }
//
// struct Commit {
// meta: M,
// parents: Set<Ref<Cycle<Commit>>>,
// parentsList: List<Ref<Cycle<Commit>>>,
// parentsClosure: Ref<Value>, // Map<Tuple,List<Ref<Value>>>,
// value: T,
// }
//
// ```
// where M is a struct type and T is any type.
func newCommit(ctx context.Context, value types.Value, parentsList types.List, parentsClosure types.Ref, includeParentsClosure bool, meta types.Struct) (types.Struct, error) {

View File

@@ -27,6 +27,5 @@ the new storage format, this is the NodeStore.
// TODO commit closures
// TODO refwalks
*/
package datas

View File

@@ -177,9 +177,10 @@ func (pt *progressTracker) Validate(suite *PullSuite) {
}
// Source:
// -3-> C(L2) -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// -3-> C(L2) -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// Sink: Nada
func (suite *PullSuite) TestPullEverything() {
@@ -202,26 +203,28 @@ func (suite *PullSuite) TestPullEverything() {
}
// Source:
// -6-> C3(L5) -1-> N
// . \ -5-> L4 -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// 5 \ -2-> L1 -1-> N
// . \ -1-> L0
// C2(L4) -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// . \ -2-> L1 -1-> N
// 3 \ -1-> L0
// .
// C1(L2) -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// -6-> C3(L5) -1-> N
// . \ -5-> L4 -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// 5 \ -2-> L1 -1-> N
// . \ -1-> L0
// C2(L4) -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// . \ -2-> L1 -1-> N
// 3 \ -1-> L0
// .
// C1(L2) -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// Sink:
// -3-> C1(L2) -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// -3-> C1(L2) -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
func (suite *PullSuite) TestPullMultiGeneration() {
sinkL := buildListOfHeight(2, suite.sinkVRW)
suite.commitToSink(sinkL, nil)
@@ -251,29 +254,31 @@ func (suite *PullSuite) TestPullMultiGeneration() {
}
// Source:
// -6-> C2(L5) -1-> N
// . \ -5-> L4 -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// 4 \ -2-> L1 -1-> N
// . \ -1-> L0
// C1(L3) -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// -6-> C2(L5) -1-> N
// . \ -5-> L4 -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// 4 \ -2-> L1 -1-> N
// . \ -1-> L0
// C1(L3) -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// Sink:
// -5-> C3(L3') -1-> N
// . \ -3-> L2 -1-> N
// . \ \ -2-> L1 -1-> N
// . \ \ -1-> L0
// . \ - "oy!"
// 4
// .
// C1(L3) -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// -5-> C3(L3') -1-> N
// . \ -3-> L2 -1-> N
// . \ \ -2-> L1 -1-> N
// . \ \ -1-> L0
// . \ - "oy!"
// 4
// .
// C1(L3) -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
func (suite *PullSuite) TestPullDivergentHistory() {
sinkL := buildListOfHeight(3, suite.sinkVRW)
sinkAddr := suite.commitToSink(sinkL, nil)
@@ -306,23 +311,26 @@ func (suite *PullSuite) TestPullDivergentHistory() {
}
// Source:
// -6-> C2(L4) -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// . \ - "oy!"
// 5 \ -2-> L1 -1-> N
// . \ -1-> L0
// C1(L4) -1-> N
// \ -4-> L3 -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// -6-> C2(L4) -1-> N
// . \ -4-> L3 -1-> N
// . \ -3-> L2 -1-> N
// . \ - "oy!"
// 5 \ -2-> L1 -1-> N
// . \ -1-> L0
// C1(L4) -1-> N
// \ -4-> L3 -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// Sink:
// -5-> C1(L4) -1-> N
// \ -4-> L3 -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
//
// -5-> C1(L4) -1-> N
// \ -4-> L3 -1-> N
// \ -3-> L2 -1-> N
// \ -2-> L1 -1-> N
// \ -1-> L0
func (suite *PullSuite) TestPullUpdates() {
sinkL := buildListOfHeight(4, suite.sinkVRW)
suite.commitToSink(sinkL, nil)

View File

@@ -90,12 +90,14 @@ type WorkingSetSpec struct {
// A working set struct has the following type:
//
// ```
// struct WorkingSet {
// meta: M,
// workingRootRef: R,
// stagedRootRef: R,
// mergeState: R,
// }
//
// struct WorkingSet {
// meta: M,
// workingRootRef: R,
// stagedRootRef: R,
// mergeState: R,
// }
//
// ```
// where M is a struct type and R is a ref type.
func newWorkingSet(ctx context.Context, db *database, meta *WorkingSetMeta, workingRef, stagedRef types.Ref, mergeState *MergeState) (hash.Hash, types.Ref, error) {

View File

@@ -31,8 +31,10 @@ import (
// Apply applies a Patch (list of diffs) to a graph. It fulfills the
// following contract:
// Given 2 Noms graphs: a1 and a2:
// ApplyPatch(a1, Diff(a1, a2)) == a2
//
// Given 2 Noms graphs: a1 and a2:
// ApplyPatch(a1, Diff(a1, a2)) == a2
//
// This is useful for IncrementalUpdate() and possibly other problems. See
// updater.go for more information.
//
@@ -378,7 +380,8 @@ func (stack *patchStack) Len() int {
// offset is calculated by keeping a count of each add & remove. Due to the way
// way diffs are calculated, no offset is ever needed for 'add' operations. The
// offset for 'remove' operations are calculated as:
// stack.addCnt - stack.rmCnt
//
// stack.addCnt - stack.rmCnt
func (stack *patchStack) adjustIndexOffset(p types.Path, changeType types.DiffChangeType) (res int) {
parentPath := p[:len(p)-1]

View File

@@ -79,9 +79,9 @@ type differ struct {
// Diff function to stop processing.
// Diff returns the Differences in depth-first first order. A 'diff' is defined
// as one of the following conditions:
// * a Value is Added or Removed from a node in the graph
// * the type of a Value has changed in the graph
// * a primitive (i.e. Bool, Float, String, Ref or Blob) Value has changed.
// - a Value is Added or Removed from a node in the graph
// - the type of a Value has changed in the graph
// - a primitive (i.e. Bool, Float, String, Ref or Blob) Value has changed.
//
// A Difference is not returned when a non-primitive value has been modified. For
// example, a struct field has been changed from one Value of type Employee to
@@ -95,15 +95,16 @@ type differ struct {
// been closed to know if it needs to terminate diffing early. To function
// properly it needs to be executed concurrently with code that reads values from
// diffChan. The following is a typical invocation of Diff():
// dChan := make(chan Difference)
// sChan := make(chan struct{})
// go func() {
// d.Diff(s3, s4, dChan, sChan, leftRight)
// close(dChan)
// }()
// for dif := range dChan {
// <some code>
// }
//
// dChan := make(chan Difference)
// sChan := make(chan struct{})
// go func() {
// d.Diff(s3, s4, dChan, sChan, leftRight)
// close(dChan)
// }()
// for dif := range dChan {
// <some code>
// }
func Diff(ctx context.Context, v1, v2 types.Value, dChan chan<- Difference, leftRight bool, descFunc ShouldDescFunc) error {
f := func(ctx context.Context, d differ, v1, v2 types.Value) error {
return d.diff(ctx, nil, v1, v2)

View File

@@ -61,23 +61,23 @@ import (
// Go map keys corresponding to the set values and assigns each key a value of struct{}{}.
//
// When unmarshalling onto interface{} the following rules are used:
// - types.Bool -> bool
// - types.List -> []T, where T is determined recursively using the same rules.
// - types.Set -> depends on `noms:",set"` annotation and field type:
// - without the annotation, same as types.List
// - with the annotation, same as types.Map for map[T]struct{} fields and same as types.List for slice fields
// - types.Map -> map[T]V, where T and V is determined recursively using the
// same rules.
// - types.Float -> float64
// - types.String -> string
// - *types.Type -> *types.Type
// - types.Union -> interface
// - Everything else an error
// - types.Bool -> bool
// - types.List -> []T, where T is determined recursively using the same rules.
// - types.Set -> depends on `noms:",set"` annotation and field type:
// - without the annotation, same as types.List
// - with the annotation, same as types.Map for map[T]struct{} fields and same as types.List for slice fields
// - types.Map -> map[T]V, where T and V is determined recursively using the
// same rules.
// - types.Float -> float64
// - types.String -> string
// - *types.Type -> *types.Type
// - types.Union -> interface
// - Everything else an error
//
// Unmarshal returns an UnmarshalTypeMismatchError if:
// - a Noms value is not appropriate for a given target type
// - a Noms number overflows the target type
// - a Noms list is decoded into a Go array of a different length
// - a Noms value is not appropriate for a given target type
// - a Noms number overflows the target type
// - a Noms list is decoded into a Go array of a different length
func Unmarshal(ctx context.Context, nbf *types.NomsBinFormat, v types.Value, out interface{}) (err error) {
return UnmarshalOpt(ctx, nbf, v, Opt{}, out)
}

View File

@@ -75,22 +75,22 @@ import (
// tag value. The "noms" key in the Go struct field's tag value is the field
// name. Examples:
//
// // Field is ignored.
// Field int `noms:"-"`
// // Field is ignored.
// Field int `noms:"-"`
//
// // Field appears in a Noms struct as field "myName".
// MyName int
// // Field appears in a Noms struct as field "myName".
// MyName int
//
// // Field appears in a Noms struct as key "myName".
// Field int `noms:"myName"`
// // Field appears in a Noms struct as key "myName".
// Field int `noms:"myName"`
//
// // Field appears in a Noms struct as key "myName" and the field is
// // omitted from the object if its value is empty, as defined above.
// Field int `noms:"myName,omitempty"
// // Field appears in a Noms struct as key "myName" and the field is
// // omitted from the object if its value is empty, as defined above.
// Field int `noms:"myName,omitempty"
//
// // Field appears in a Noms struct as key "field" and the field is
// // omitted from the object if its value is empty, as defined above.
// Field int `noms:",omitempty"
// // Field appears in a Noms struct as key "field" and the field is
// // omitted from the object if its value is empty, as defined above.
// Field int `noms:",omitempty"
//
// The name of the Noms struct is the name of the Go struct where the first
// character is changed to upper case. You can also implement the

View File

@@ -122,7 +122,6 @@ func TestMarshalTypeType(tt *testing.T) {
})), ts)
}
//
func assertMarshalTypeErrorMessage(t *testing.T, v interface{}, expectedMessage string) {
_, err := MarshalType(types.Format_Default, v)
assert.Error(t, err)

View File

@@ -95,15 +95,18 @@ func NewThreeWay(resolve ResolveFunc) Policy {
// - If the values are primitives or Blob: conflict
// - If the values are maps:
// - if the same key was inserted or updated in both candidates:
// - first run this same algorithm on those two values to attempt to merge them
// - if the two merged values are still different: conflict
// - first run this same algorithm on those two values to attempt to merge them
// - if the two merged values are still different: conflict
// - if a key was inserted in one candidate and removed in the other: conflict
//
// - If the values are structs:
// - Same as map, except using field names instead of map keys
// - Same as map, except using field names instead of map keys
//
// - If the values are sets:
// - Apply the changes from both candidates to the parent to get the result. No conflicts are possible.
// - Apply the changes from both candidates to the parent to get the result. No conflicts are possible.
//
// - If the values are list:
// - Apply list-merge (see below)
// - Apply list-merge (see below)
//
// Merge rules for List are a bit more complex than Map, Struct, and Set due
// to a wider away of potential use patterns. A List might be a de-facto Map

View File

@@ -350,55 +350,68 @@ func (p *Parser) parseMapType() (*types.Type, error) {
}
// Value :
// Type
// Bool
// Float
// String
// List
// Set
// Map
// Struct
// Tuple
//
// Type
// Bool
// Float
// String
// List
// Set
// Map
// Struct
// Tuple
//
// Bool :
// `true`
// `false`
//
// `true`
// `false`
//
// Float :
// ...
//
// ...
//
// String :
// ...
//
// ...
//
// List :
// `[` Values? `]`
//
// `[` Values? `]`
//
// Values :
// Value
// Value `,` Values?
//
// Value
// Value `,` Values?
//
// Set :
// `set` `{` Values? `}`
//
// `set` `{` Values? `}`
//
// Map :
// `map` `{` MapEntries? `}`
//
// `map` `{` MapEntries? `}`
//
// MapEntries :
// MapEntry
// MapEntry `,` MapEntries?
//
// MapEntry
// MapEntry `,` MapEntries?
//
// MapEntry :
// Value `:` Value
//
// Value `:` Value
//
// Struct :
// `struct` StructName? `{` StructFields? `}`
//
// `struct` StructName? `{` StructFields? `}`
//
// StructFields :
// StructField
// StructField `,` StructFields?
//
// StructField
// StructField `,` StructFields?
//
// StructField :
// StructFieldName `:` Value
//
// StructFieldName `:` Value
func (p *Parser) parseValue(ctx context.Context) (types.Value, error) {
tok := p.lex.next()
switch tok {

View File

@@ -30,59 +30,61 @@
// 4. Run go test with the -perf <path to noms db> flag.
//
// Flags:
// -perf.mem Backs the database by a memory store, instead of nbs.
// -perf.prefix Gives the dataset IDs for test results a prefix.
// -perf.repeat Sets how many times tests are repeated ("reps").
// -perf.run Only run tests that match a regex (case insensitive).
// -perf.testdata Sets a custom path to the Noms testdata directory.
//
// -perf.mem Backs the database by a memory store, instead of nbs.
// -perf.prefix Gives the dataset IDs for test results a prefix.
// -perf.repeat Sets how many times tests are repeated ("reps").
// -perf.run Only run tests that match a regex (case insensitive).
// -perf.testdata Sets a custom path to the Noms testdata directory.
//
// PerfSuite also supports testify/suite style Setup/TearDown methods:
// Setup/TearDownSuite is called exactly once.
// Setup/TearDownRep is called for each repetition of the test runs, i.e. -perf.repeat times.
// Setup/TearDownTest is called for every test.
//
// Setup/TearDownSuite is called exactly once.
// Setup/TearDownRep is called for each repetition of the test runs, i.e. -perf.repeat times.
// Setup/TearDownTest is called for every test.
//
// Test results are written to Noms, along with a dump of the environment they were recorded in.
//
// Test names are derived from that "non-empty capitalized string": "Test" is omitted because it's
// redundant, and leading digits are omitted to allow for manual test ordering. For example:
//
// > cat ./samples/go/csv/csv-import/perf_test.go
// type perfSuite {
// suite.PerfSuite
// }
// > cat ./samples/go/csv/csv-import/perf_test.go
// type perfSuite {
// suite.PerfSuite
// }
//
// func (s *perfSuite) TestFoo() { ... }
// func (s *perfSuite) TestZoo() { ... }
// func (s *perfSuite) Test01Qux() { ... }
// func (s *perfSuite) Test02Bar() { ... }
// func (s *perfSuite) TestFoo() { ... }
// func (s *perfSuite) TestZoo() { ... }
// func (s *perfSuite) Test01Qux() { ... }
// func (s *perfSuite) Test02Bar() { ... }
//
// func TestPerf(t *testing.T) {
// suite.Run("csv-import", t, &perfSuite{})
// }
// func TestPerf(t *testing.T) {
// suite.Run("csv-import", t, &perfSuite{})
// }
//
// > noms serve &
// > go test -v ./samples/go/csv/... -perf http://localhost:8000 -perf.repeat 3
// (perf) RUN(1/3) Test01Qux (recorded as "Qux")
// (perf) PASS: Test01Qux (5s, paused 15s, total 20s)
// (perf) RUN(1/3) Test02Bar (recorded as "Bar")
// (perf) PASS: Test02Bar (15s, paused 2s, total 17s)
// (perf) RUN(1/3) TestFoo (recorded as "Foo")
// (perf) PASS: TestFoo (10s, paused 1s, total 11s)
// (perf) RUN(1/3) TestZoo (recorded as "Zoo")
// (perf) PASS: TestZoo (1s, paused 42s, total 43s)
// ...
// > noms serve &
// > go test -v ./samples/go/csv/... -perf http://localhost:8000 -perf.repeat 3
// (perf) RUN(1/3) Test01Qux (recorded as "Qux")
// (perf) PASS: Test01Qux (5s, paused 15s, total 20s)
// (perf) RUN(1/3) Test02Bar (recorded as "Bar")
// (perf) PASS: Test02Bar (15s, paused 2s, total 17s)
// (perf) RUN(1/3) TestFoo (recorded as "Foo")
// (perf) PASS: TestFoo (10s, paused 1s, total 11s)
// (perf) RUN(1/3) TestZoo (recorded as "Zoo")
// (perf) PASS: TestZoo (1s, paused 42s, total 43s)
// ...
//
// > noms show http://localhost:8000::csv-import
// {
// environment: ...
// tests: [{
// "Bar": {elapsed: 15s, paused: 2s, total: 17s},
// "Foo": {elapsed: 10s, paused: 1s, total: 11s},
// "Qux": {elapsed: 5s, paused: 15s, total: 20s},
// "Zoo": {elapsed: 1s, paused: 42s, total: 43s},
// }, ...]
// ...
// }
// > noms show http://localhost:8000::csv-import
// {
// environment: ...
// tests: [{
// "Bar": {elapsed: 15s, paused: 2s, total: 17s},
// "Foo": {elapsed: 10s, paused: 1s, total: 11s},
// "Qux": {elapsed: 5s, paused: 15s, total: 20s},
// "Zoo": {elapsed: 1s, paused: 42s, total: 43s},
// }, ...]
// ...
// }
package suite
import (

View File

@@ -14,9 +14,9 @@
/*
Package prolly includes:
1) Serialize to and from the flatbuffer messages defined in go/serial
2) Build trees of messages using a NodeStore abstraction
2) Traverse and search NodeStore and related data structures
1. Serialize to and from the flatbuffer messages defined in go/serial
2. Build trees of messages using a NodeStore abstraction
2. Traverse and search NodeStore and related data structures
NodeStore is the primary interface for building/reading tree data structures:
- AddressMap, ProllyTreeNode, CommitClosure are the current Node flatbuffer
@@ -28,11 +28,11 @@ NodeStore is the primary interface for building/reading tree data structures:
that reference other nodes, and 2) leaf nodes, whose values are the main storage
motivation
- Leaf nodes' values can be addresses.
- For example, blobs are stored in ProllyTreeNode leaves as value address.
The value address reference is the root hash of a tree stored separated. In
these cases, it is important to distinguish between 1) self-contained trees
of a single type; and 2) the datastore as a whole, comprised of several types
of trees.
- For example, blobs are stored in ProllyTreeNode leaves as value address.
The value address reference is the root hash of a tree stored separated. In
these cases, it is important to distinguish between 1) self-contained trees
of a single type; and 2) the datastore as a whole, comprised of several types
of trees.
// TODO ProllyTreeNode

View File

@@ -143,24 +143,24 @@ func (tc *chunker[S]) DeletePair(ctx context.Context, _, _ Item) error {
// The method proceeds from the deepest chunker recursively into its
// linked list parents:
//
// (1) If the current cursor and all of its parents are aligned with |next|,
// we are done.
// (1) If the current cursor and all of its parents are aligned with |next|,
// we are done.
//
// (2) In lockstep, a) append to the chunker and b) increment the cursor until
// we either meet condition (1) and return, or we synchronize and progress to
// (3) or (4). Synchronizing means that the current tree being built has
// reached a chunk boundary that aligns with a chunk boundary in the old tree
// being mutated. Synchronization means chunks between this boundary and
// |next| at the current cursor level will be unchanged and can be skipped.
// (2) In lockstep, a) append to the chunker and b) increment the cursor until
// we either meet condition (1) and return, or we synchronize and progress to
// (3) or (4). Synchronizing means that the current tree being built has
// reached a chunk boundary that aligns with a chunk boundary in the old tree
// being mutated. Synchronization means chunks between this boundary and
// |next| at the current cursor level will be unchanged and can be skipped.
//
// (3) All parent cursors are (1) current or (2) synchronized, or there are no
// parents, and we are done.
// (3) All parent cursors are (1) current or (2) synchronized, or there are no
// parents, and we are done.
//
// (4) The parent cursors are not aligned. Recurse into the parent. After
// parents are aligned, we need to reprocess the prefix of the current node in
// anticipation of impending edits that may edit the current chunk. Note that
// processPrefix is only necessary for the "fast forward" case where we
// synchronized the tree level before reaching |next|.
// (4) The parent cursors are not aligned. Recurse into the parent. After
// parents are aligned, we need to reprocess the prefix of the current node in
// anticipation of impending edits that may edit the current chunk. Note that
// processPrefix is only necessary for the "fast forward" case where we
// synchronized the tree level before reaching |next|.
func (tc *chunker[S]) AdvanceTo(ctx context.Context, next *Cursor) error {
cmp := tc.cur.Compare(next)
if cmp == 0 { // step (1)

View File

@@ -31,27 +31,31 @@ type MutationIter interface {
//
// The algorithm is structured as follows:
//
// - Create a new chunker, the main interface for building a new
// tree.
// - Create two cursors into the previous tree. Both cursors
// track key indexes in the old keyspace. The first tracks where
// a new edit will be applied relative to the old keyspace.
// The second indicates the most recent edit in the new tree
// relative to the old keyspace. The second cursor is embedded in
// the chunker, maintained by the chunker, and necessary precedes
// the first.
// - Create a new chunker, the main interface for building a new
// tree.
//
// - For every edit, first identify the key index in the old keyspace
// where the edit will be applied, and move the tracking cursor to
// that index.
// - Advance the chunker and the second cursor to the new edit point.
// Refer to the chunker.AdvanceTo docstring for details.
// - Add the edit to the chunker. This applies the edit to the in-progress
// NodeStore. The new NodeStore may expand or shrink relative to the
// old tree, but these details are internal to the chunker.
// - Repeat for every edit.
// - Create two cursors into the previous tree. Both cursors
// track key indexes in the old keyspace. The first tracks where
// a new edit will be applied relative to the old keyspace.
// The second indicates the most recent edit in the new tree
// relative to the old keyspace. The second cursor is embedded in
// the chunker, maintained by the chunker, and necessary precedes
// the first.
//
// - Finalize the chunker and resolve the tree's new root Node.
// - For every edit, first identify the key index in the old keyspace
// where the edit will be applied, and move the tracking cursor to
// that index.
//
// - Advance the chunker and the second cursor to the new edit point.
// Refer to the chunker.AdvanceTo docstring for details.
//
// - Add the edit to the chunker. This applies the edit to the in-progress
// NodeStore. The new NodeStore may expand or shrink relative to the
// old tree, but these details are internal to the chunker.
//
// - Repeat for every edit.
//
// - Finalize the chunker and resolve the tree's new root Node.
func ApplyMutations[S message.Serializer](
ctx context.Context,
ns NodeStore,

View File

@@ -444,11 +444,13 @@ func (cur *Cursor) fetchNode(ctx context.Context) (err error) {
//
// cur: L3 -> 4, L2 -> 2, L1 -> 5, L0 -> 2
// other: L3 -> 4, L2 -> 2, L1 -> 5, L0 -> 4
// res => -2 (from level 0)
//
// res => -2 (from level 0)
//
// cur: L3 -> 4, L2 -> 2, L1 -> 5, L0 -> 2
// other: L3 -> 4, L2 -> 3, L1 -> 5, L0 -> 4
// res => +1 (from level 2)
//
// res => +1 (from level 2)
func (cur *Cursor) Compare(other *Cursor) int {
return compareCursors(cur, other)
}

View File

@@ -53,7 +53,7 @@ var levelSalt = [...]uint64{
saltFromLevel(15),
}
// splitterFactory makes a nodeSplitter.
// splitterFactory makes a nodeSplitter.
type splitterFactory func(level uint8) nodeSplitter
var defaultSplitterFactory splitterFactory = newKeySplitter

View File

@@ -25,7 +25,7 @@ import (
"github.com/stretchr/testify/assert"
)
//var src = rand.New(rand.NewSource(time.Now().Unix()))
// var src = rand.New(rand.NewSource(time.Now().Unix()))
var src = rand.New(rand.NewSource(0))
func TestSkipList(t *testing.T) {

View File

@@ -23,15 +23,15 @@ import "github.com/dolthub/dolt/go/store/d"
// from one object to produce another object.
//
// The rules for determining whether |a| and |b| intersect are:
// - if either type is Value, return true
// - if either type is Union, return true iff at least one variant of |a| intersects with one variant of |b|
// - if |a| & |b| are not the same kind, return false
// - else
// - if both are structs, return true iff their names are equal or one name is "", they share a field name
// and the type of that field intersects
// - if both are refs, sets or lists, return true iff the element type intersects
// - if both are maps, return true iff they have a key with the same type and value types that intersect
// - else return true
// - if either type is Value, return true
// - if either type is Union, return true iff at least one variant of |a| intersects with one variant of |b|
// - if |a| & |b| are not the same kind, return false
// - else
// - if both are structs, return true iff their names are equal or one name is "", they share a field name
// and the type of that field intersects
// - if both are refs, sets or lists, return true iff the element type intersects
// - if both are maps, return true iff they have a key with the same type and value types that intersect
// - else return true
func ContainCommonSupertype(nbf *NomsBinFormat, a, b *Type) bool {
// Avoid cycles internally.
return containCommonSupertypeImpl(nbf, a, b, nil, nil)

View File

@@ -59,13 +59,14 @@ var (
)
// RegisterHRSCommenter is called to with three arguments:
// typename: the name of the struct this function will be applied to
// unique: an arbitrary string to differentiate functions that should be applied
// to different structs that have the same name (e.g. two implementations of
// the "Employee" type.
// commenter: an interface with a 'Comment()' function that gets called for all
// Values with this name. The function should verify the type of the Value
// and, if appropriate, return a non-empty string to be appended as the comment
//
// typename: the name of the struct this function will be applied to
// unique: an arbitrary string to differentiate functions that should be applied
// to different structs that have the same name (e.g. two implementations of
// the "Employee" type.
// commenter: an interface with a 'Comment()' function that gets called for all
// Values with this name. The function should verify the type of the Value
// and, if appropriate, return a non-empty string to be appended as the comment
func RegisterHRSCommenter(typename, unique string, commenter HRSCommenter) {
registryLock.Lock()
defer registryLock.Unlock()

View File

@@ -40,11 +40,14 @@ func newBlobMetaSequence(level uint64, tuples []metaTuple, vrw ValueReadWriter)
// advanceCursorToOffset advances the cursor as close as possible to idx
//
// If the cursor references a leaf sequence,
// advance to idx,
// and return the number of values preceding the idx
//
// advance to idx,
// and return the number of values preceding the idx
//
// If it references a meta-sequence,
// advance to the tuple containing idx,
// and return the number of leaf values preceding this tuple
//
// advance to the tuple containing idx,
// and return the number of leaf values preceding this tuple
func advanceCursorToOffset(cur *sequenceCursor, idx uint64) (uint64, error) {
seq := cur.seq

View File

@@ -37,9 +37,9 @@ var EmptyList List
// or more types. The type of the list will reflect the type of the elements in the list. For
// example:
//
// l := NewList(Float(1), Bool(true))
// fmt.Println(l.Type().Describe())
// // outputs List<Bool | Float>
// l := NewList(Float(1), Bool(true))
// fmt.Println(l.Type().Describe())
// // outputs List<Bool | Float>
//
// Lists, like all Noms values are immutable so the "mutation" methods return a new list.
type List struct {

View File

@@ -32,7 +32,7 @@ import (
"github.com/dolthub/dolt/go/store/hash"
)
//type ValueInRange func(Value) (bool, error)
// type ValueInRange func(Value) (bool, error)
type ValueInRange func(context.Context, Value) (bool, bool, error)
var ErrKeysNotOrdered = errors.New("streaming map keys not ordered")

View File

@@ -33,11 +33,11 @@ type CreateEditAcc func(nbf *NomsBinFormat) EditAccumulator
// CreateEditAccForMapEdits allows users to define the EditAccumulator that should be used when creating a MapEditor via
// the Map.Edit method. In most cases you should call:
//
// func init() {
// types.CreateEditAccForMapEdits = func() EditAccumulator {
// return edits.NewAsyncSortedEdits(10000, 4, 2) // configure your own constants
// }
// }
// func init() {
// types.CreateEditAccForMapEdits = func() EditAccumulator {
// return edits.NewAsyncSortedEdits(10000, 4, 2) // configure your own constants
// }
// }
var CreateEditAccForMapEdits CreateEditAcc = NewDumbEditAccumulator
// EditAccumulator is an interface for a datastructure that can have edits added to it. Once all edits are

View File

@@ -53,7 +53,7 @@ func hashValueBytes(item sequenceItem, sp sequenceSplitter) error {
})
}
// newSplitterFn makes a sequenceSplitter.
// newSplitterFn makes a sequenceSplitter.
type newSplitterFn func(fmt *NomsBinFormat, salt byte) sequenceSplitter
// hashValueBytesFn translates |item| into a byte stream to provide to |sp|.

View File

@@ -28,10 +28,13 @@ import (
// a. be a super type of the input type
// b. have all unions flattened (no union inside a union)
// c. have all unions folded, which means the union
// 1. have at most one element each of kind Ref, Set, List, and Map
// 2. have at most one struct element with a given name
// 1. have at most one element each of kind Ref, Set, List, and Map
// 2. have at most one struct element with a given name
//
// e. all named unions are pointing at the same simplified struct, which means
// that all named unions with the same name form cycles.
//
// that all named unions with the same name form cycles.
//
// f. all cycle type that can be resolved have been resolved.
// g. all types reachable from it also fulfill b-f
//
@@ -40,18 +43,18 @@ import (
// - The input types are deduplicated
// - Any unions in the input set are "flattened" into the input set
// - The inputs are grouped into categories:
// - ref
// - list
// - set
// - map
// - struct, by name (each unique struct name will have its own group)
// - The ref, set, and list groups are collapsed like so:
// - ref
// - list
// - set
// - map
// - struct, by name (each unique struct name will have its own group)
// - The ref, set, and list groups are collapsed like so:
// {Ref<A>,Ref<B>,...} -> Ref<A|B|...>
// - The map group is collapsed like so:
// - The map group is collapsed like so:
// {Map<K1,V1>|Map<K2,V2>...} -> Map<K1|K2,V1|V2>
// - Each struct group is collapsed like so:
// - Each struct group is collapsed like so:
// {struct{foo:number,bar:string}, struct{bar:blob, baz:bool}} ->
// struct{foo?:number,bar:string|blob,baz?:bool}
// struct{foo?:number,bar:string|blob,baz?:bool}
//
// All the above rules are applied recursively.
func simplifyType(t *Type, intersectStructs bool) (*Type, error) {

View File

@@ -44,8 +44,10 @@ func isSubtypeTopLevel(nbf *NomsBinFormat, requiredType, concreteType *Type) (is
}
// IsSubtypeDetails returns two values:
// isSub - which indicates whether concreteType is a subtype of requiredType.
// hasExtra - which indicates whether concreteType has additional fields.
//
// isSub - which indicates whether concreteType is a subtype of requiredType.
// hasExtra - which indicates whether concreteType has additional fields.
//
// See comment below on isValueSubtypeOfDetails
func isSubtypeDetails(nbf *NomsBinFormat, requiredType, concreteType *Type, hasExtra bool, parentStructTypes []*Type) (bool, bool) {
if requiredType.Equals(concreteType) {
@@ -184,23 +186,28 @@ func IsValueSubtypeOf(nbf *NomsBinFormat, v Value, t *Type) (bool, error) {
}
// IsValueSubtypeOfDetails returns two values:
// isSub - which indicates whether v is a subtype of t.
// hasExtra - which indicates whether v has additional fields. This field has
// no meaning if IsSub is false.
//
// isSub - which indicates whether v is a subtype of t.
// hasExtra - which indicates whether v has additional fields. This field has
// no meaning if IsSub is false.
//
// For example, given the following data:
// type1 := struct S { v := Struct S1 {
// a Float | string a: "hello"
// b ?int b: 2
// } }
//
// type1 := struct S { v := Struct S1 {
// a Float | string a: "hello"
// b ?int b: 2
// } }
//
// IsValueSubtypeOfDetails(v, type1) would return isSub == true, and hasExtra == false
//
// And given these types:
// type2 := struct S { v := Struct S1 {
// a Float | string a: "hello"
// b ?int b: 2
// } c: "hello again"
// }
//
// type2 := struct S { v := Struct S1 {
// a Float | string a: "hello"
// b ?int b: 2
// } c: "hello again"
// }
//
// IsValueSubtypeOfDetails(v, type1) would return isSub == true, and hasExtra == true
func IsValueSubtypeOfDetails(nbf *NomsBinFormat, v Value, t *Type) (bool, bool, error) {
return isValueSubtypeOfDetails(nbf, v, t, false)

View File

@@ -376,11 +376,11 @@ func (lvs *ValueStore) WriteValue(ctx context.Context, v Value) (Ref, error) {
// ChunkStore in a way which attempts to locate children and grandchildren
// sequentially together. The following invariants are retained:
//
// 1. For any given chunk currently in the buffer, only direct children of the
// chunk may also be presently buffered (any grandchildren will have been
// flushed).
// 2. The total data occupied by buffered chunks does not exceed
// lvs.bufferedChunksMax
// 1. For any given chunk currently in the buffer, only direct children of the
// chunk may also be presently buffered (any grandchildren will have been
// flushed).
// 2. The total data occupied by buffered chunks does not exceed
// lvs.bufferedChunksMax
func (lvs *ValueStore) bufferChunk(ctx context.Context, v Value, c chunks.Chunk, height uint64) error {
lvs.bufferMu.Lock()
defer lvs.bufferMu.Unlock()

View File

@@ -112,11 +112,13 @@ func WriteToWriter(ctx context.Context, wr io.Writer, store *FileValueStore, val
// uint32 num chunks
//
// for each chunk:
// hash of chunk
// len of chunk
//
// hash of chunk
// len of chunk
//
// for each chunk
// chunk bytes
//
// chunk bytes
func write(wr io.Writer, h hash.Hash, store *FileValueStore) error {
// The Write*IfNoErr functions makes the error handling code less annoying
err := iohelp.WritePrimIfNoErr(wr, uint32(len(store.nbf.VersionString())), nil)