diff --git a/go/Godeps/LICENSES b/go/Godeps/LICENSES index 31466a6af2..528bdaa36c 100644 --- a/go/Godeps/LICENSES +++ b/go/Godeps/LICENSES @@ -309,7 +309,7 @@ Isaac Dunham Jaydeep Patil Jens Gustedt Jeremy Huntwork -Jo-Philipp Wich +Jo-Philipp Which Joakim Sindholt John Spencer Julien Ramseier @@ -322,7 +322,7 @@ Luca Barbato Luka Perkov M Farkas-Dyck (Strake) Mahesh Bodapati -Markus Wichmann +Markus Whichmann Masanori Ogino Michael Clark Michael Forney @@ -409,7 +409,7 @@ under the standard MIT terms. All other files which have no copyright comments are original works produced specifically for use as part of this library, written either by Rich Felker, the main author of the library, or by one or more -contibutors listed above. Details on authorship of individual files +contributors listed above. Details on authorship of individual files can be found in the git version control history of the project. The omission of copyright and license comments in each file is in the interest of source tree size. @@ -1749,7 +1749,7 @@ Codegen template in t_html_generator.h --------------------------------------------------- For t_cl_generator.cc - * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2008- Patrick Collision * Copyright (c) 2006- Facebook --------------------------------------------------- @@ -9921,7 +9921,7 @@ For more information, please refer to = github.com/zeebo/xxh3 licensed under: = xxHash Library -Copyright (c) 2012-2014, Yann Collet +Copyright (c) 2012-2014, Yann Collect Copyright (c) 2019, Jeff Wendling All rights reserved. diff --git a/go/cmd/dolt/cli/messages.go b/go/cmd/dolt/cli/messages.go index b2eabf6af1..58f57668db 100644 --- a/go/cmd/dolt/cli/messages.go +++ b/go/cmd/dolt/cli/messages.go @@ -17,7 +17,7 @@ package cli // This is a starting point for storing common messages. Doing this correctly would probably mean using language files // but that is overkill for the moment. const ( - // Single variable - the name of the command. `dolt ` is how the commandString is formated in calls to the Exec method + // Single variable - the name of the command. `dolt ` is how the commandString is formatted in calls to the Exec method // for dolt commands. RemoteUnsupportedMsg = "%s can not currently be used when there is a local server running. Please stop your dolt sql-server and try again." ) diff --git a/go/cmd/dolt/commands/assist.go b/go/cmd/dolt/commands/assist.go index 0d12137676..c406696b14 100644 --- a/go/cmd/dolt/commands/assist.go +++ b/go/cmd/dolt/commands/assist.go @@ -252,7 +252,7 @@ func extractJsonResponse(content string) map[string]interface{} { } func sqlQuery(ctx context.Context, query string) (string, bool, error) { - cli.Println(fmt.Sprintf("Runnning query \"%s\"...", query)) + cli.Println(fmt.Sprintf("Running query \"%s\"...", query)) output, _, err := doltExec(ctx, fmt.Sprintf("dolt sql -q \"%s\"", query), false) if err != nil { diff --git a/go/cmd/dolt/commands/branch.go b/go/cmd/dolt/commands/branch.go index bf12881f6d..a9b2852ceb 100644 --- a/go/cmd/dolt/commands/branch.go +++ b/go/cmd/dolt/commands/branch.go @@ -176,7 +176,7 @@ func getBranches(sqlCtx *sql.Context, queryEngine cli.Queryist, remote bool) ([] return nil, err } if len(row) != 2 { - return nil, fmt.Errorf("unexpectedly received multiple columns in '%s': %s", command, row) + return nil, fmt.Errorf("unexpectedededly received multiple columns in '%s': %s", command, row) } rowStrings, err := sqlfmt.SqlRowAsStrings(row, schema) @@ -479,7 +479,7 @@ func generateForceDeleteMessage(args []string) string { return newArgs } -// callStoredProcedure generates and exectures the SQL query for calling the DOLT_BRANCH stored procedure. +// callStoredProcedure generates and executes the SQL query for calling the DOLT_BRANCH stored procedure. // All actions that modify branches delegate to this after they validate their arguments. // Actions that don't modify branches, such as `dolt branch --list` and `dolt branch --show-current`, don't call // this method. diff --git a/go/cmd/dolt/commands/clone.go b/go/cmd/dolt/commands/clone.go index 92ca626cb4..d1ebd34bde 100644 --- a/go/cmd/dolt/commands/clone.go +++ b/go/cmd/dolt/commands/clone.go @@ -231,7 +231,7 @@ func validateAndParseDolthubUrl(urlStr string) (string, bool) { } if u.Scheme == dbfactory.HTTPSScheme && u.Host == "www.dolthub.com" { - // Get the actual repo name and covert the remote + // Get the actual repo name and convert the remote split := strings.Split(u.Path, "/") if len(split) > 2 { diff --git a/go/cmd/dolt/commands/diff_output.go b/go/cmd/dolt/commands/diff_output.go index e93dd78e7c..042725c5f7 100644 --- a/go/cmd/dolt/commands/diff_output.go +++ b/go/cmd/dolt/commands/diff_output.go @@ -69,7 +69,7 @@ func newDiffWriter(diffOutput diffOutput) (diffWriter, error) { case JsonDiffOutput: return newJsonDiffWriter(iohelp.NopWrCloser(cli.CliOut)) default: - panic(fmt.Sprintf("unexpected diff output: %v", diffOutput)) + panic(fmt.Sprintf("unexpectededed diff output: %v", diffOutput)) } } @@ -310,7 +310,7 @@ func (s sqlDiffWriter) RowWriter(fromTableInfo, toTableInfo *diff.TableInfo, tds targetSch = fromTableInfo.Sch } - // TOOD: schema names + // TODO: schema names return sqlexport.NewSqlDiffWriter(tds.ToTableName.Name, targetSch, iohelp.NopWrCloser(cli.CliOut)), nil } diff --git a/go/cmd/dolt/commands/sqlserver/server.go b/go/cmd/dolt/commands/sqlserver/server.go index d2ea79a0c8..5915ea3c50 100644 --- a/go/cmd/dolt/commands/sqlserver/server.go +++ b/go/cmd/dolt/commands/sqlserver/server.go @@ -310,7 +310,7 @@ func ConfigureServices( primaryController := sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.BinlogPrimaryController doltBinlogPrimaryController, ok := primaryController.(*binlogreplication.DoltBinlogPrimaryController) if !ok { - return fmt.Errorf("unexpected type of binlog controller: %T", primaryController) + return fmt.Errorf("unexpectededed type of binlog controller: %T", primaryController) } _, logBinValue, ok := sql.SystemVariables.GetGlobal("log_bin") @@ -319,7 +319,7 @@ func ConfigureServices( } logBin, ok := logBinValue.(int8) if !ok { - return fmt.Errorf("unexpected type for @@log_bin system variable: %T", logBinValue) + return fmt.Errorf("unexpectededed type for @@log_bin system variable: %T", logBinValue) } _, logBinBranchValue, ok := sql.SystemVariables.GetGlobal("log_bin_branch") @@ -328,7 +328,7 @@ func ConfigureServices( } logBinBranch, ok := logBinBranchValue.(string) if !ok { - return fmt.Errorf("unexpected type for @@log_bin_branch system variable: %T", logBinBranchValue) + return fmt.Errorf("unexpectededed type for @@log_bin_branch system variable: %T", logBinBranchValue) } if logBinBranch != "" { // If an invalid branch has been configured, let the server start up so that it's @@ -800,7 +800,7 @@ func persistServerLocalCreds(port int, dEnv *env.DoltEnv) (*LocalCreds, error) { // remotesapiAuth facilitates the implementation remotesrv.AccessControl for the remotesapi server. type remotesapiAuth struct { - // ctxFactory is a function that returns a new sql.Context. This will create a new conext every time it is called, + // ctxFactory is a function that returns a new sql.Context. This will create a new context every time it is called, // so it should be called once per API request. ctxFactory func(context.Context) (*sql.Context, error) rawDb *mysql_db.MySQLDb @@ -827,7 +827,7 @@ func (r *remotesapiAuth) ApiAuthenticate(ctx context.Context) (context.Context, if strings.Index(address, ":") > 0 { address, _, err = net.SplitHostPort(creds.Address) if err != nil { - return nil, fmt.Errorf("Invlaid Host string for authentication: %s", creds.Address) + return nil, fmt.Errorf("Invalid Host string for authentication: %s", creds.Address) } } diff --git a/go/cmd/dolt/commands/sqlserver/server_test.go b/go/cmd/dolt/commands/sqlserver/server_test.go index c86f0284a0..ed847132ab 100644 --- a/go/cmd/dolt/commands/sqlserver/server_test.go +++ b/go/cmd/dolt/commands/sqlserver/server_test.go @@ -140,7 +140,7 @@ func TestServerBadArgs(t *testing.T) { tests := [][]string{ {"-H", "127.0.0.0.1"}, - {"-H", "loclahost"}, + {"-H", "localahost"}, {"-P", "300"}, {"-P", "90000"}, {"-l", "everything"}, diff --git a/go/libraries/doltcore/doltdb/table.go b/go/libraries/doltcore/doltdb/table.go index 093dca0ec0..fbfc44718f 100644 --- a/go/libraries/doltcore/doltdb/table.go +++ b/go/libraries/doltcore/doltdb/table.go @@ -37,7 +37,7 @@ var ErrNoConflictsResolved = errors.New("no conflicts resolved") const dolt_row_hash_tag = 0 -// IsValidTableName checks if name is a valid identifer, and doesn't end with space characters +// IsValidTableName checks if name is a valid identifier, and doesn't end with space characters func IsValidTableName(name string) bool { if len(name) == 0 || unicode.IsSpace(rune(name[len(name)-1])) { return false @@ -454,7 +454,7 @@ func (t *Table) HashOf() (hash.Hash, error) { // UpdateNomsRows replaces the current row data and returns and updated Table. // Calls to UpdateNomsRows will not be written to the database. The root must -// be updated with the updated table, and the root must be committed or written. +// be updated with the updated table, and the root must be comitted or written. // Deprecated: use Table.UpdateRows() instead. func (t *Table) UpdateNomsRows(ctx context.Context, updatedRows types.Map) (*Table, error) { table, err := t.table.SetTableRows(ctx, durable.IndexFromNomsMap(updatedRows, t.ValueReadWriter(), t.NodeStore())) @@ -466,7 +466,7 @@ func (t *Table) UpdateNomsRows(ctx context.Context, updatedRows types.Map) (*Tab // UpdateRows replaces the current row data and returns and updated Table. // Calls to UpdateRows will not be written to the database. The root must -// be updated with the updated table, and the root must be committed or written. +// be updated with the updated table, and the root must be comitted or written. func (t *Table) UpdateRows(ctx context.Context, updatedRows durable.Index) (*Table, error) { table, err := t.table.SetTableRows(ctx, updatedRows) if err != nil { diff --git a/go/libraries/doltcore/dtestutils/data.go b/go/libraries/doltcore/dtestutils/data.go index b687e6ec77..cb6b11adf6 100644 --- a/go/libraries/doltcore/dtestutils/data.go +++ b/go/libraries/doltcore/dtestutils/data.go @@ -101,7 +101,7 @@ func RowsAndSchema() ([]row.Row, schema.Schema, error) { return rows, sch, err } -// MustTuple contructs a types.Tuple for a slice of types.Values. +// MustTuple constructs a types.Tuple for a slice of types.Values. func MustTuple(vals ...types.Value) types.Tuple { tup, err := types.NewTuple(types.Format_Default, vals...) if err != nil { diff --git a/go/libraries/doltcore/env/multi_repo_env.go b/go/libraries/doltcore/env/multi_repo_env.go index 8beec95635..8055eafd0b 100644 --- a/go/libraries/doltcore/env/multi_repo_env.go +++ b/go/libraries/doltcore/env/multi_repo_env.go @@ -124,7 +124,7 @@ func GetMultiEnvStorageMetadata(dataDirFS filesys.Filesys) (StorageMetadataMap, return sms, nil } -// NewMultiEnv returns a new MultiRepoEnv instance dirived from a root DoltEnv instance. +// NewMultiEnv returns a new MultiRepoEnv instance derived from a root DoltEnv instance. func MultiEnvForSingleEnv(ctx context.Context, env *DoltEnv) (*MultiRepoEnv, error) { return MultiEnvForDirectory(ctx, env.Config.WriteableConfig(), env.FS, env.Version, env) } diff --git a/go/libraries/doltcore/merge/merge_prolly_rows.go b/go/libraries/doltcore/merge/merge_prolly_rows.go index 374fef0e0c..485b5d2dce 100644 --- a/go/libraries/doltcore/merge/merge_prolly_rows.go +++ b/go/libraries/doltcore/merge/merge_prolly_rows.go @@ -635,7 +635,7 @@ func (uv uniqValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff) ( // deleteArtifact deletes the unique constraint violation artifact for the row identified by |key| and returns a // boolean that indicates if an artifact was deleted, as well as an error that indicates if there were any -// unexpected errors encountered. +// unexpectededed errors encountered. func (uv uniqValidator) deleteArtifact(ctx context.Context, key val.Tuple) (bool, error) { artifactKey := uv.edits.BuildArtifactKey(ctx, key, uv.srcHash, prolly.ArtifactTypeUniqueKeyViol) @@ -1121,7 +1121,7 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc // WARNING: In theory, we should only have to call MutableMap::Delete if the key is actually being deleted // from the left branch. However, because of https://github.com/dolthub/dolt/issues/7192, // if the left side of the merge is an empty table and we don't attempt to modify the map, - // the table will have an unexpected root hash. + // the table will have an unexpectededed root hash. return m.mut.Delete(ctx, diff.Key) case tree.DiffOpDivergentModifyResolved: // any generated columns need to be re-resolved because their computed values may have changed as a result of @@ -1177,7 +1177,7 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc } return m.mut.Put(ctx, diff.Key, newTupleValue) default: - return fmt.Errorf("unexpected diffOp for editing primary index: %s", diff.Op) + return fmt.Errorf("unexpectededed diffOp for editing primary index: %s", diff.Op) } } @@ -1210,7 +1210,7 @@ func resolveDefaults(ctx *sql.Context, tableName string, mergedSchema schema.Sch return nil, err } - // The default expresions always come in the order of the merged schema, but the fields we need to apply them to + // The default expressions always come in the order of the merged schema, but the fields we need to apply them to // might have different column indexes in the case of a schema change if len(exprs) > 0 { for i := range exprs { @@ -1540,7 +1540,7 @@ func writeTupleExpression( // instance that describes how the table is being merged, |from| is the field position in the value tuple from the // previous schema, and |rightSide| indicates whether the previous type info can be found on the right side of the merge // or the left side. If the previous type info is the same as the current type info for the merged schema, then this -// function is a no-op and simply returns |value|. The converted value along with any unexpected error encountered is +// function is a no-op and simply returns |value|. The converted value along with any unexpectededed error encountered is // returned. func convertValueToNewType(value interface{}, newTypeInfo typeinfo.TypeInfo, tm *TableMerger, from int, rightSide bool) (interface{}, error) { var previousTypeInfo typeinfo.TypeInfo diff --git a/go/libraries/doltcore/merge/mutable_secondary_index.go b/go/libraries/doltcore/merge/mutable_secondary_index.go index f4a283c0e7..20ae4478ae 100644 --- a/go/libraries/doltcore/merge/mutable_secondary_index.go +++ b/go/libraries/doltcore/merge/mutable_secondary_index.go @@ -75,7 +75,7 @@ func GetMutableSecondaryIdxsWithPending(ctx *sql.Context, ourSch, sch schema.Sch // If the schema has changed, don't reuse the index. // TODO: This isn't technically required, but correctly handling updating secondary indexes when only some // of the table's rows have been updated is difficult to get right. - // Dropping the index is potentially slower but guarenteed to be correct. + // Dropping the index is potentially slower but guaranteed to be correct. if !m.KeyDesc().Equals(index.Schema().GetKeyDescriptorWithNoConversion()) { continue } diff --git a/go/libraries/doltcore/migrate/validation.go b/go/libraries/doltcore/migrate/validation.go index 23080900cf..59d2b06a66 100644 --- a/go/libraries/doltcore/migrate/validation.go +++ b/go/libraries/doltcore/migrate/validation.go @@ -276,7 +276,7 @@ func nomsKindsFromQueryTypes(qt query.Type) []types.NomsKind { return []types.NomsKind{types.JSONKind} default: - panic(fmt.Sprintf("unexpect query.Type %s", qt.String())) + panic(fmt.Sprintf("unexpectededed query.Type %s", qt.String())) } } diff --git a/go/libraries/doltcore/rebase/filter_branch_test.go b/go/libraries/doltcore/rebase/filter_branch_test.go index 18f2e472eb..1f0d1c7618 100644 --- a/go/libraries/doltcore/rebase/filter_branch_test.go +++ b/go/libraries/doltcore/rebase/filter_branch_test.go @@ -184,7 +184,7 @@ func filterBranchTests() []filterBranchTest { asserts: []testAssertion{ { setup: []testCommand{ - // expeced error: "table not found: test" + // expected error: "table not found: test" {cmd.FilterBranchCmd{}, args{"--continue", "-q", "DELETE FROM test WHERE pk > 1;"}}, }, }, diff --git a/go/libraries/doltcore/remotesrv/http.go b/go/libraries/doltcore/remotesrv/http.go index eedbf92614..92ca3710a0 100644 --- a/go/libraries/doltcore/remotesrv/http.go +++ b/go/libraries/doltcore/remotesrv/http.go @@ -96,7 +96,7 @@ func (fh filehandler) ServeHTTP(respWr http.ResponseWriter, req *http.Request) { } _, ok := hash.MaybeParse(path[i+1:]) if !ok { - logger.WithField("last_path_component", path[i+1:]).Warn("bad request with unparseable last path component") + logger.WithField("last_path_component", path[i+1:]).Warn("bad request with unparsable last path component") respWr.WriteHeader(http.StatusBadRequest) return } diff --git a/go/libraries/doltcore/remotestorage/chunk_fetcher.go b/go/libraries/doltcore/remotestorage/chunk_fetcher.go index 1f025bb8dd..51bd4bac61 100644 --- a/go/libraries/doltcore/remotestorage/chunk_fetcher.go +++ b/go/libraries/doltcore/remotestorage/chunk_fetcher.go @@ -34,7 +34,7 @@ import ( // A remotestorage.ChunkFetcher is a pipelined chunk fetcher for fetching a // large number of chunks where the downloads may benefit from range -// coallescing, hedging, automatic retries, pipelining of download location +// coalescing, hedging, automatic retries, pipelining of download location // retrieval with the fetching of the actual chunk bytes, etc. // // It is expected that one goroutine will be calling `Get()` with batches of @@ -392,7 +392,7 @@ func fetcherDownloadRangesThread(ctx context.Context, locCh chan []*remotesapi.D // |toSend| could have come from a previous iteration // of this loop or the outer loop. If it's |nil|, we // can get the next range to download from - // |downlaods.ranges|. + // |downloads.ranges|. if toSend == nil { max := downloads.ranges.DeleteMaxRegion() if len(max) == 0 { @@ -486,9 +486,9 @@ type SizeSetter interface { // This does additive increase, multiplicative decrease on calls to |SetSize|, // reading successes and failures from calls to |RecordSuccess| and -// |RecordFailure|. If there have been any faliures in the last update window, +// |RecordFailure|. If there have been any failures in the last update window, // it will call |SetSize| with a new size that's 1/2 the current size. If there -// have been no faliures in the last update window, but there has been at least +// have been no failures in the last update window, but there has been at least // one success, it will call |SetSize| with a size 1 greater than the current // size. Will not scale size greater than |MaxConcurrency|. func (cc *ConcurrencyControl) Run(ctx context.Context, done <-chan struct{}, ss SizeSetter, sz int) error { diff --git a/go/libraries/doltcore/remotestorage/internal/ranges/ranges.go b/go/libraries/doltcore/remotestorage/internal/ranges/ranges.go index 3dc4109c86..a7db2be1f2 100644 --- a/go/libraries/doltcore/remotestorage/internal/ranges/ranges.go +++ b/go/libraries/doltcore/remotestorage/internal/ranges/ranges.go @@ -38,8 +38,8 @@ type GetRange struct { // A |Region| represents a continuous range of bytes within in a Url. // |ranges.Tree| maintains |Region| instances that cover every |GetRange| // within the tree. As entries are inserted into the Tree, their Regions can -// coallesce with Regions which come before or after them in the same Url, -// based on the |coallesceLimit|. +// coalesce with Regions which come before or after them in the same Url, +// based on the |coalesceLimit|. // // |Region|s are maintained in a |RegionHeap| so that the |Tree| can quickly // return a large download to get started on when a download worker is @@ -103,11 +103,11 @@ func (rh *RegionHeap) Pop() any { } // A ranges.Tree is a tree data structure designed to support efficient -// coallescing of non-overlapping ranges inserted into it. +// coalescing of non-overlapping ranges inserted into it. type Tree struct { t *btree.BTreeG[*GetRange] regions *RegionHeap - coallesceLimit int + coalesceLimit int } func GetRangeLess(a, b *GetRange) bool { @@ -118,11 +118,11 @@ func GetRangeLess(a, b *GetRange) bool { } } -func NewTree(coallesceLimit int) *Tree { +func NewTree(coalesceLimit int) *Tree { return &Tree{ t: btree.NewG[*GetRange](64, GetRangeLess), regions: &RegionHeap{}, - coallesceLimit: coallesceLimit, + coalesceLimit: coalesceLimit, } } @@ -154,12 +154,12 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) { } t.t.ReplaceOrInsert(ins) - // Check for coallesce with the range of the entry before the new one... + // Check for coalesce with the range of the entry before the new one... t.t.DescendLessOrEqual(ins, func(gr *GetRange) bool { if gr == ins { return true } - // If we coallesce... + // If we coalesce... if ins.Url == gr.Url { regionEnd := gr.Region.EndOffset if regionEnd > ins.Offset { @@ -167,8 +167,8 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) { ins.Region = gr.Region ins.Region.MatchedBytes += uint64(ins.Length) heap.Fix(t.regions, ins.Region.HeapIndex) - } else if (ins.Offset - regionEnd) < uint64(t.coallesceLimit) { - // Inserted entry is within the limit to coallesce with the prior one. + } else if (ins.Offset - regionEnd) < uint64(t.coalesceLimit) { + // Inserted entry is within the limit to coalesce with the prior one. ins.Region = gr.Region ins.Region.MatchedBytes += uint64(ins.Length) ins.Region.EndOffset = ins.Offset + uint64(ins.Length) @@ -183,10 +183,10 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) { if gr == ins { return true } - // If we coallesce... + // If we coalesce... if ins.Url == gr.Url && gr.Region != ins.Region { regionStart := gr.Region.StartOffset - if regionStart < (ins.Offset + uint64(ins.Length) + uint64(t.coallesceLimit)) { + if regionStart < (ins.Offset + uint64(ins.Length) + uint64(t.coalesceLimit)) { if ins.Region == nil { ins.Region = gr.Region ins.Region.MatchedBytes += uint64(ins.Length) @@ -216,7 +216,7 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) { return false }) - // We didn't coallesce with any existing Regions. Insert a new Region + // We didn't coalesce with any existing Regions. Insert a new Region // covering just this GetRange. if ins.Region == nil { ins.Region = &Region{ @@ -233,7 +233,7 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) { // Returns all the |*GetRange| entries in the tree that are encompassed by the // current top entry in our |RegionHeap|. For |HeapStrategy_largest|, this will // be the largest possible download we can currently start, given our -// |coallesceLimit|. +// |coalesceLimit|. func (t *Tree) DeleteMaxRegion() []*GetRange { if t.regions.Len() == 0 { return nil diff --git a/go/libraries/doltcore/remotestorage/internal/reliable/chan.go b/go/libraries/doltcore/remotestorage/internal/reliable/chan.go index ec290a180c..8beeb5ea61 100644 --- a/go/libraries/doltcore/remotestorage/internal/reliable/chan.go +++ b/go/libraries/doltcore/remotestorage/internal/reliable/chan.go @@ -36,7 +36,7 @@ import ( // // Close should always be called on an reliable.Chan to ensure resource cleanup. type Chan[T any] struct { - // All unack'd |T|s are stored in |buff|. As they get Ackd, they get poped from here. + // All unack'd |T|s are stored in |buff|. As they get Ackd, they get popped from here. buff *circular.Buff[T] // We return new |T|s from here and they go into |buff| to be delivered diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_streamer.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_streamer.go index 4b25821bba..5a0b30afd1 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_streamer.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_primary_streamer.go @@ -47,7 +47,7 @@ func newBinlogStreamer() *binlogStreamer { } // startStream listens for new binlog events sent to this streamer over its binlog event -// channel and sends them over |conn|. It also listens for ticker ticks to send hearbeats +// channel and sends them over |conn|. It also listens for ticker ticks to send heartbeats // over |conn|. The specified |binlogFormat| is used to define the format of binlog events // and |binlogEventMeta| records the position of the stream. This method blocks until an error // is received over the stream (e.g. the connection closing) or the streamer is closed, diff --git a/go/libraries/doltcore/sqle/cluster/branch_control_replica.go b/go/libraries/doltcore/sqle/cluster/branch_control_replica.go index 2f00cfdb8b..7f9e80af04 100644 --- a/go/libraries/doltcore/sqle/cluster/branch_control_replica.go +++ b/go/libraries/doltcore/sqle/cluster/branch_control_replica.go @@ -142,7 +142,7 @@ func (r *branchControlReplica) Run() { r.progressNotifier.RecordSuccess(attempt) r.fastFailReplicationWait = false r.backoff.Reset() - r.lgr.Debugf("branchControlReplica[%s]: sucessfully replicated branch control permissions.", r.client.remote) + r.lgr.Debugf("branchControlReplica[%s]: successfully replicated branch control permissions.", r.client.remote) r.replicatedVersion = version } } diff --git a/go/libraries/doltcore/sqle/cluster/mysqldb_persister.go b/go/libraries/doltcore/sqle/cluster/mysqldb_persister.go index bd8ea7f727..b84a44d1d5 100644 --- a/go/libraries/doltcore/sqle/cluster/mysqldb_persister.go +++ b/go/libraries/doltcore/sqle/cluster/mysqldb_persister.go @@ -160,7 +160,7 @@ func (r *mysqlDbReplica) Run() { r.progressNotifier.RecordSuccess(attempt) r.fastFailReplicationWait = false r.backoff.Reset() - r.lgr.Debugf("mysqlDbReplica[%s]: sucessfully replicated users and grants at version %d.", r.client.remote, version) + r.lgr.Debugf("mysqlDbReplica[%s]: successfully replicated users and grants at version %d.", r.client.remote, version) r.replicatedVersion = version } else { r.lgr.Debugf("mysqlDbReplica[%s]: not replicating empty users and grants at version %d.", r.client.remote, r.version) diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_rebase.go b/go/libraries/doltcore/sqle/dprocedures/dolt_rebase.go index e1ba6265ad..55f032a354 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_rebase.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_rebase.go @@ -77,9 +77,9 @@ var DoltRebaseSystemTableSchema = []*sql.Column{ }, } -// ErrRebaseUncommittedChanges is used when a rebase is started, but there are uncommitted (and not +// ErrRebaseUncomittedChanges is used when a rebase is started, but there are uncomitted (and not // ignored) changes in the working set. -var ErrRebaseUncommittedChanges = fmt.Errorf("cannot start a rebase with uncommitted changes") +var ErrRebaseUncomittedChanges = fmt.Errorf("cannot start a rebase with uncomitted changes") // ErrRebaseConflict is used when a merge conflict is detected while rebasing a commit. var ErrRebaseConflict = goerrors.NewKind( @@ -203,7 +203,7 @@ func startRebase(ctx *sql.Context, upstreamPoint string) error { } // rebaseWorkingBranch is the name of the temporary branch used when performing a rebase. In Git, a rebase - // happens with a detatched HEAD, but Dolt doesn't support that, we use a temporary branch. + // happens with a detached HEAD, but Dolt doesn't support that, we use a temporary branch. rebaseWorkingBranch := "dolt_rebase_" + rebaseBranch var rsc doltdb.ReplicationStatusController err = actions.CreateBranchWithStartPt(ctx, dbData, rebaseWorkingBranch, upstreamPoint, false, &rsc) @@ -319,7 +319,7 @@ func validateWorkingSetCanStartRebase(ctx *sql.Context) error { return fmt.Errorf("unable to start rebase while another rebase is in progress – abort the current rebase before proceeding") } - // Make sure the working set doesn't contain any uncommitted changes + // Make sure the working set doesn't contain any uncomitted changes roots, ok := doltSession.GetRoots(ctx, ctx.GetCurrentDatabase()) if !ok { return fmt.Errorf("unable to get roots for database %s", ctx.GetCurrentDatabase()) @@ -329,7 +329,7 @@ func validateWorkingSetCanStartRebase(ctx *sql.Context) error { return err } if !wsOnlyHasIgnoredTables { - return ErrRebaseUncommittedChanges + return ErrRebaseUncomittedChanges } return nil @@ -473,7 +473,7 @@ func continueRebase(ctx *sql.Context) (string, error) { func processRebasePlanStep(ctx *sql.Context, planStep *rebase.RebasePlanStep) error { // Make sure we have a transaction opened for the session - // NOTE: After our first call to cherry-pick, the tx is committed, so a new tx needs to be started + // NOTE: After our first call to cherry-pick, the tx is comitted, so a new tx needs to be started // as we process additional rebase actions. doltSession := dsess.DSessFromSess(ctx.Session) if doltSession.GetTransaction() == nil { diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries.go index 5d77899d32..8caba215c7 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries.go @@ -3587,7 +3587,7 @@ var DoltBranchScripts = []queries.ScriptTest{ var DoltResetTestScripts = []queries.ScriptTest{ { - Name: "CALL DOLT_RESET('--hard') should reset the merge state after uncommitted merge", + Name: "CALL DOLT_RESET('--hard') should reset the merge state after uncomitted merge", SetUpScript: []string{ "CREATE TABLE test1 (pk int NOT NULL, c1 int, c2 int, PRIMARY KEY (pk));", "CALL DOLT_ADD('.')", @@ -5703,7 +5703,7 @@ var DoltCherryPickTests = []queries.ScriptTest{ Assertions: []queries.ScriptTestAssertion{ { Query: "CALL Dolt_Cherry_Pick(@commit1);", - ExpectedErrStr: "cannot cherry-pick with uncommitted changes", + ExpectedErrStr: "cannot cherry-pick with uncomitted changes", }, { Query: "call dolt_add('t');", @@ -5711,7 +5711,7 @@ var DoltCherryPickTests = []queries.ScriptTest{ }, { Query: "CALL Dolt_Cherry_Pick(@commit1);", - ExpectedErrStr: "cannot cherry-pick with uncommitted changes", + ExpectedErrStr: "cannot cherry-pick with uncomitted changes", }, }, }, @@ -7035,7 +7035,7 @@ END`, }, }, { - Name: "Database syntax propogates to inner calls", + Name: "Database syntax propagates to inner calls", SetUpScript: []string{ "CALL DOLT_CHECKOUT('main');", `CREATE PROCEDURE p4() diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go index bb15ca75ba..1e74361d57 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go @@ -581,7 +581,7 @@ var MergeScripts = []queries.ScriptTest{ // TODO: These tests are skipped, because we have temporarily disabled dolt_conflicts_resolve // when there are schema conflicts, since schema conflicts prevent table data from being // merged, and resolving the schema changes, but not completing the data merge will likely - // give customers unexpected results. + // give customers unexpectededed results. // https://github.com/dolthub/dolt/issues/6616 Name: "CALL DOLT_MERGE with schema conflicts can be correctly resolved using dolt_conflicts_resolve when autocommit is off", SetUpScript: []string{ @@ -3737,7 +3737,7 @@ var SchemaConflictScripts = []queries.ScriptTest{ SetUpScript: []string{ "set @@autocommit=1;", "create table t (pk int primary key, c0 varchar(20))", - "call dolt_commit('-Am', 'added tabele t')", + "call dolt_commit('-Am', 'added table t')", "call dolt_checkout('-b', 'other')", "alter table t modify column c0 int", "call dolt_commit('-am', 'altered t on branch other')", @@ -3765,7 +3765,7 @@ var SchemaConflictScripts = []queries.ScriptTest{ SetUpScript: []string{ "set @@autocommit=0;", "create table t (pk int primary key, c0 varchar(20))", - "call dolt_commit('-Am', 'added tabele t')", + "call dolt_commit('-Am', 'added table t')", "call dolt_checkout('-b', 'other')", "alter table t modify column c0 int", "call dolt_commit('-am', 'altered t on branch other')", @@ -3932,7 +3932,7 @@ var OldFormatMergeConflictsAndCVsScripts = []queries.ScriptTest{ }, { Query: "CALL DOLT_MERGE('branch3');", - Expected: []sql.Row{{"", 0, 1, "conficts found"}}, + Expected: []sql.Row{{"", 0, 1, "conflicts found"}}, }, { Query: "SELECT violation_type, pk, parent_fk from dolt_constraint_violations_child;", diff --git a/go/libraries/doltcore/sqle/globalstate/auto_increment_tracker.go b/go/libraries/doltcore/sqle/globalstate/auto_increment_tracker.go index 9687bb8000..0bf07cd491 100644 --- a/go/libraries/doltcore/sqle/globalstate/auto_increment_tracker.go +++ b/go/libraries/doltcore/sqle/globalstate/auto_increment_tracker.go @@ -39,7 +39,7 @@ type AutoIncrementTracker interface { // given, so the new global maximum is computed without regard for its value in that working set. Set(ctx *sql.Context, tableName string, table *doltdb.Table, ws ref.WorkingSetRef, newAutoIncVal uint64) (*doltdb.Table, error) - // AcquireTableLock acquires the auto increment lock on a table, and reutrns a callback function to release the lock. + // AcquireTableLock acquires the auto increment lock on a table, and returns a callback function to release the lock. // Depending on the value of the `innodb_autoinc_lock_mode` system variable, the engine may need to acquire and hold // the lock for the duration of an insert statement. AcquireTableLock(ctx *sql.Context, tableName string) (func(), error) diff --git a/go/libraries/doltcore/sqle/integration_test/stockmarket_test.go b/go/libraries/doltcore/sqle/integration_test/stockmarket_test.go index 9277d81098..4781d9090e 100644 --- a/go/libraries/doltcore/sqle/integration_test/stockmarket_test.go +++ b/go/libraries/doltcore/sqle/integration_test/stockmarket_test.go @@ -11629,13 +11629,13 @@ INSERT INTO symbols VALUES ('KRNY','Kearny Financial','Finance',NULL); INSERT INTO symbols VALUES ('KRO','Kronos Worldwide Inc','Basic Industries',NULL); INSERT INTO symbols VALUES ('KRP','Kimbell Royalty Partners','Energy',2017); INSERT INTO symbols VALUES ('KRYS','Krystal Biotech, Inc.','Health Care',2017); -INSERT INTO symbols VALUES ('KSM','Scudder Strategic Municiple Income Trust',NULL,1989); +INSERT INTO symbols VALUES ('KSM','Scudder Strategic Municipal Income Trust',NULL,1989); INSERT INTO symbols VALUES ('KSS','Kohl's Corporation','Consumer Services',1992); INSERT INTO symbols VALUES ('KSU','Kansas City Southern','Transportation',NULL); INSERT INTO symbols VALUES ('KSU^','Kansas City Southern','Transportation',NULL); INSERT INTO symbols VALUES ('KT','KT Corporation','Public Utilities',NULL); INSERT INTO symbols VALUES ('KTCC','Key Tronic Corporation','Technology',1983); -INSERT INTO symbols VALUES ('KTF','Scudder Municiple Income Trust',NULL,1988); +INSERT INTO symbols VALUES ('KTF','Scudder Municipal Income Trust',NULL,1988); INSERT INTO symbols VALUES ('KTH','Lehman ABS Corporation','Finance',NULL); INSERT INTO symbols VALUES ('KTN','Lehman ABS Corporation','Finance',NULL); INSERT INTO symbols VALUES ('KTOS','Kratos Defense & Security Solutions, Inc.','Capital Goods',NULL); @@ -12818,7 +12818,7 @@ INSERT INTO symbols VALUES ('PCI','PIMCO Dynamic Credit and Mortgage Income Fund INSERT INTO symbols VALUES ('PCK','Pimco California Municipal Income Fund II',NULL,2002); INSERT INTO symbols VALUES ('PCM','PIMCO Commercial Mortgage Securities Trust, Inc.',NULL,1993); INSERT INTO symbols VALUES ('PCMI','PCM, Inc.','Consumer Services',NULL); -INSERT INTO symbols VALUES ('PCN','Pimco Corporate & Income Stategy Fund',NULL,2001); +INSERT INTO symbols VALUES ('PCN','Pimco Corporate & Income Strategy Fund',NULL,2001); INSERT INTO symbols VALUES ('PCOM','Points International, Ltd.','Miscellaneous',NULL); INSERT INTO symbols VALUES ('PCQ','PIMCO California Municipal Income Fund',NULL,2001); INSERT INTO symbols VALUES ('PCRX','Pacira BioSciences, Inc.','Health Care',2011); @@ -13502,7 +13502,7 @@ INSERT INTO symbols VALUES ('SBLKZ','Star Bulk Carriers Corp.','Transportation', INSERT INTO symbols VALUES ('SBNA','Scorpio Tankers Inc.','Transportation',2014); INSERT INTO symbols VALUES ('SBNY','Signature Bank','Finance',2004); INSERT INTO symbols VALUES ('SBOT','Stellar Biotechnologies, Inc.','Health Care',NULL); -INSERT INTO symbols VALUES ('SBOW','SilverBow Resorces, Inc.','Energy',NULL); +INSERT INTO symbols VALUES ('SBOW','SilverBow Resources, Inc.','Energy',NULL); INSERT INTO symbols VALUES ('SBPH','Spring Bank Pharmaceuticals, Inc.','Health Care',2016); INSERT INTO symbols VALUES ('SBR','Sabine Royalty Trust','Energy',NULL); INSERT INTO symbols VALUES ('SBRA','Sabra Health Care REIT, Inc.','Consumer Services',NULL); @@ -14104,7 +14104,7 @@ INSERT INTO symbols VALUES ('THGA','The Hanover Insurance Group, Inc.','Finance' INSERT INTO symbols VALUES ('THM','International Tower Hill Mines Ltd','Basic Industries',NULL); INSERT INTO symbols VALUES ('THO','Thor Industries, Inc.','Consumer Non-Durables',NULL); INSERT INTO symbols VALUES ('THOR','Synthorx, Inc.','Health Care',2018); -INSERT INTO symbols VALUES ('THQ','Tekla Healthcare Opportunies Fund',NULL,2014); +INSERT INTO symbols VALUES ('THQ','Tekla Healthcare Opportunities Fund',NULL,2014); INSERT INTO symbols VALUES ('THR','Thermon Group Holdings, Inc.','Energy',2011); INSERT INTO symbols VALUES ('THRM','Gentherm Inc','Capital Goods',NULL); INSERT INTO symbols VALUES ('THS','Treehouse Foods, Inc.','Consumer Non-Durables',NULL); @@ -17627,12 +17627,12 @@ INSERT INTO join_result VALUES ('stock','KRNY','us','2017-11-01',15.2,15.3,14.9, INSERT INTO join_result VALUES ('stock','KRO','us','2017-11-01',26.68,26.7558,25.9,26.1,246853,'0','Kronos Worldwide Inc','Basic Industries',NULL); INSERT INTO join_result VALUES ('stock','KRP','us','2017-11-01',16.689,16.807,16.631,16.64,19253,'0','Kimbell Royalty Partners','Energy',2017); INSERT INTO join_result VALUES ('stock','KRYS','us','2017-11-01',9.349,10,9.349,9.55,15987,'0','Krystal Biotech, Inc.','Health Care',2017); -INSERT INTO join_result VALUES ('stock','KSM','us','2017-11-01',11.93,11.96,11.88,11.8953,82733,'0','Scudder Strategic Municiple Income Trust',NULL,1989); +INSERT INTO join_result VALUES ('stock','KSM','us','2017-11-01',11.93,11.96,11.88,11.8953,82733,'0','Scudder Strategic Municipal Income Trust',NULL,1989); INSERT INTO join_result VALUES ('stock','KSS','us','2017-11-01',41.95,42.81,41.52,41.9,2970700,'0','Kohl's Corporation','Consumer Services',1992); INSERT INTO join_result VALUES ('stock','KSU','us','2017-11-01',104.77,105.72,104.46,105.46,952870,'0','Kansas City Southern','Transportation',NULL); INSERT INTO join_result VALUES ('stock','KT','us','2017-11-01',14.4,14.41,13.91,14.13,1238720,'0','KT Corporation','Public Utilities',NULL); INSERT INTO join_result VALUES ('stock','KTCC','us','2017-11-01',7.23,7.8,7.07,7.8,112870,'0','Key Tronic Corporation','Technology',1983); -INSERT INTO join_result VALUES ('stock','KTF','us','2017-11-01',11.89,11.94,11.83,11.87,277606,'0','Scudder Municiple Income Trust',NULL,1988); +INSERT INTO join_result VALUES ('stock','KTF','us','2017-11-01',11.89,11.94,11.83,11.87,277606,'0','Scudder Municipal Income Trust',NULL,1988); INSERT INTO join_result VALUES ('stock','KTH','us','2017-11-01',32.41,32.53,32.41,32.53,1371,'0','Lehman ABS Corporation','Finance',NULL); INSERT INTO join_result VALUES ('stock','KTN','us','2017-11-01',33.8,33.8,32.7304,33.73,3746,'0','Lehman ABS Corporation','Finance',NULL); INSERT INTO join_result VALUES ('stock','KTOS','us','2017-11-01',12.16,12.3,11.65,11.9,1603390,'0','Kratos Defense & Security Solutions, Inc.','Capital Goods',NULL); @@ -18542,7 +18542,7 @@ INSERT INTO join_result VALUES ('stock','PCI','us','2017-11-01',22.619,22.639,22 INSERT INTO join_result VALUES ('stock','PCK','us','2017-11-01',9.971,10.01,9.941,9.951,68472,'0','Pimco California Municipal Income Fund II',NULL,2002); INSERT INTO join_result VALUES ('stock','PCM','us','2017-11-01',11.95,11.97,11.821,11.88,57610,'0','PIMCO Commercial Mortgage Securities Trust, Inc.',NULL,1993); INSERT INTO join_result VALUES ('stock','PCMI','us','2017-11-01',14.2,14.5,14,14.25,87457,'0','PCM, Inc.','Consumer Services',NULL); -INSERT INTO join_result VALUES ('stock','PCN','us','2017-11-01',16.881,16.961,16.841,16.871,79197,'0','Pimco Corporate & Income Stategy Fund',NULL,2001); +INSERT INTO join_result VALUES ('stock','PCN','us','2017-11-01',16.881,16.961,16.841,16.871,79197,'0','Pimco Corporate & Income Strategy Fund',NULL,2001); INSERT INTO join_result VALUES ('stock','PCOM','us','2017-11-01',11.5,11.5,11.01,11.08,45690,'0','Points International, Ltd.','Miscellaneous',NULL); INSERT INTO join_result VALUES ('stock','PCQ','us','2017-11-01',17.08,17.15,16.951,16.971,100898,'0','PIMCO California Municipal Income Fund',NULL,2001); INSERT INTO join_result VALUES ('stock','PCRX','us','2017-11-01',32.4,33.3,32.2,32.6,797680,'0','Pacira BioSciences, Inc.','Health Care',2011); @@ -19055,7 +19055,7 @@ INSERT INTO join_result VALUES ('stock','SBLK','us','2017-11-01',10.85,11,10.5,1 INSERT INTO join_result VALUES ('stock','SBNA','us','2017-11-01',24.4,24.75,24.4,24.7,1334,'0','Scorpio Tankers Inc.','Transportation',2014); INSERT INTO join_result VALUES ('stock','SBNY','us','2017-11-01',131.31,131.96,128.85,129.7,414960,'0','Signature Bank','Finance',2004); INSERT INTO join_result VALUES ('stock','SBOT','us','2017-11-01',1.15,1.16,1.11,1.12,53927,'0','Stellar Biotechnologies, Inc.','Health Care',NULL); -INSERT INTO join_result VALUES ('stock','SBOW','us','2017-11-01',22.83,22.9572,22.26,22.27,28686,'0','SilverBow Resorces, Inc.','Energy',NULL); +INSERT INTO join_result VALUES ('stock','SBOW','us','2017-11-01',22.83,22.9572,22.26,22.27,28686,'0','SilverBow Resources, Inc.','Energy',NULL); INSERT INTO join_result VALUES ('stock','SBPH','us','2017-11-01',15.03,15.41,14.79,15.12,38453,'0','Spring Bank Pharmaceuticals, Inc.','Health Care',2016); INSERT INTO join_result VALUES ('stock','SBR','us','2017-11-01',42.65,42.8,42.3946,42.65,12375,'0','Sabine Royalty Trust','Energy',NULL); INSERT INTO join_result VALUES ('stock','SBRA','us','2017-11-01',19.88,20.14,19.75,20.14,1558500,'0','Sabra Health Care REIT, Inc.','Consumer Services',NULL); @@ -19505,7 +19505,7 @@ INSERT INTO join_result VALUES ('stock','THG','us','2017-11-01',99.05,99.7,98.52 INSERT INTO join_result VALUES ('stock','THGA','us','2017-11-01',25.51,25.5226,25.4801,25.4801,1471,'0','The Hanover Insurance Group, Inc.','Finance',NULL); INSERT INTO join_result VALUES ('stock','THM','us','2017-11-01',0.434,0.4379,0.4239,0.4252,81904,'0','International Tower Hill Mines Ltd','Basic Industries',NULL); INSERT INTO join_result VALUES ('stock','THO','us','2017-11-01',137.64,138.07,136.07,136.82,575303,'0','Thor Industries, Inc.','Consumer Non-Durables',NULL); -INSERT INTO join_result VALUES ('stock','THQ','us','2017-11-01',17.63,17.63,17.5,17.53,162058,'0','Tekla Healthcare Opportunies Fund',NULL,2014); +INSERT INTO join_result VALUES ('stock','THQ','us','2017-11-01',17.63,17.63,17.5,17.53,162058,'0','Tekla Healthcare Opportunities Fund',NULL,2014); INSERT INTO join_result VALUES ('stock','THR','us','2017-11-01',21.73,21.73,21.16,21.27,164411,'0','Thermon Group Holdings, Inc.','Energy',2011); INSERT INTO join_result VALUES ('stock','THRM','us','2017-11-01',33.7,34.45,33.25,33.65,215357,'0','Gentherm Inc','Capital Goods',NULL); INSERT INTO join_result VALUES ('stock','THS','us','2017-11-01',66.52,67.13,66.05,66.36,1109140,'0','Treehouse Foods, Inc.','Consumer Non-Durables',NULL); diff --git a/go/libraries/doltcore/sqle/json/noms_json_value_test.go b/go/libraries/doltcore/sqle/json/noms_json_value_test.go index e773a7d080..ade8b41bf8 100644 --- a/go/libraries/doltcore/sqle/json/noms_json_value_test.go +++ b/go/libraries/doltcore/sqle/json/noms_json_value_test.go @@ -56,7 +56,7 @@ func TestJSONValueMarshallingRoundTrip(t *testing.T) { doc: gmstypes.MustJSON(`2.71`), }, { - name: "type homogenous object", + name: "type homogeneous object", doc: gmstypes.MustJSON(`{"a": 2, "b": 3, "c": 4}`), }, { @@ -64,7 +64,7 @@ func TestJSONValueMarshallingRoundTrip(t *testing.T) { doc: gmstypes.MustJSON(`{"a": 2, "b": "two", "c": false}`), }, { - name: "homogenous array", + name: "homogeneous array", doc: gmstypes.MustJSON(`[1, 2, 3]`), }, { diff --git a/go/libraries/doltcore/sqle/statspro/auto_refresh.go b/go/libraries/doltcore/sqle/statspro/auto_refresh.go index 20619565d2..775a945a87 100644 --- a/go/libraries/doltcore/sqle/statspro/auto_refresh.go +++ b/go/libraries/doltcore/sqle/statspro/auto_refresh.go @@ -196,7 +196,7 @@ func (p *Provider) checkRefresh(ctx *sql.Context, sqlDb sql.Database, dbName, br ctx.GetLogger().Debugf("statistics updating: %s", updateMeta.qual) // mark index for updating idxMetas = append(idxMetas, updateMeta) - // update lastest hash if we haven't already + // update latest hash if we haven't already statDb.SetLatestHash(branch, table, tableHash) } } diff --git a/go/libraries/doltcore/table/editor/table_edit_accumulator_test.go b/go/libraries/doltcore/table/editor/table_edit_accumulator_test.go index 19b5af42ed..a90d2eefac 100644 --- a/go/libraries/doltcore/table/editor/table_edit_accumulator_test.go +++ b/go/libraries/doltcore/table/editor/table_edit_accumulator_test.go @@ -164,7 +164,7 @@ func TestGet(t *testing.T) { key5 := newTuple(t, types.Int(5)) key6 := newTuple(t, types.Int(6)) - // test uncommitted + // test uncomitted requireGet(ctx, t, tea, key1, false) teaInsert(t, tea, key1) requireGet(ctx, t, tea, key1, true) @@ -172,22 +172,22 @@ func TestGet(t *testing.T) { require.NoError(t, err) requireGet(ctx, t, tea, key1, false) - // test uncommitted flushed + // test uncomitted flushed teaInsert(t, tea, key1) requireGet(ctx, t, tea, key1, true) - tea.flushUncommitted() + tea.flushUncomitted() requireGet(ctx, t, tea, key1, true) err = tea.Rollback(ctx) require.NoError(t, err) requireGet(ctx, t, tea, key1, false) - // test commmitted + // test comitted teaInsert(t, tea, key1) err = tea.Commit(ctx, nbf) require.NoError(t, err) requireGet(ctx, t, tea, key1, true) - // edits in committed and uncommitted + // edits in comitted and uncomitted requireGet(ctx, t, tea, key2, false) teaInsert(t, tea, key2) requireGet(ctx, t, tea, key1, true) @@ -197,11 +197,11 @@ func TestGet(t *testing.T) { requireGet(ctx, t, tea, key1, true) requireGet(ctx, t, tea, key2, false) - // edits in committed and uncommitted flushed + // edits in comitted and uncomitted flushed teaInsert(t, tea, key2) requireGet(ctx, t, tea, key1, true) requireGet(ctx, t, tea, key2, true) - tea.flushUncommitted() + tea.flushUncomitted() requireGet(ctx, t, tea, key1, true) requireGet(ctx, t, tea, key2, true) err = tea.Rollback(ctx) @@ -209,10 +209,10 @@ func TestGet(t *testing.T) { requireGet(ctx, t, tea, key1, true) requireGet(ctx, t, tea, key2, false) - // edits in committed, uncommitted and uncommitted flushed + // edits in comitted, uncomitted and uncomitted flushed requireGet(ctx, t, tea, key3, false) teaInsert(t, tea, key2) - tea.flushUncommitted() + tea.flushUncomitted() teaInsert(t, tea, key3) requireGet(ctx, t, tea, key1, true) requireGet(ctx, t, tea, key2, true) @@ -225,7 +225,7 @@ func TestGet(t *testing.T) { // edits everywhere materialized teaInsert(t, tea, key2) - tea.flushUncommitted() + tea.flushUncomitted() teaInsert(t, tea, key3) requireGet(ctx, t, tea, key1, true) requireGet(ctx, t, tea, key2, true) @@ -247,7 +247,7 @@ func TestGet(t *testing.T) { requireGet(ctx, t, tea, key4, true) teaDelete(t, tea, key2) teaInsert(t, tea, key5) - tea.flushUncommitted() + tea.flushUncomitted() requireGet(ctx, t, tea, key2, false) requireGet(ctx, t, tea, key5, true) teaInsert(t, tea, key6) diff --git a/go/libraries/doltcore/table/inmem_table_test.go b/go/libraries/doltcore/table/inmem_table_test.go index 2108ca8f21..4651c52671 100644 --- a/go/libraries/doltcore/table/inmem_table_test.go +++ b/go/libraries/doltcore/table/inmem_table_test.go @@ -50,7 +50,7 @@ func mustRow(r row.Row, err error) row.Row { } // These are in noms-key-sorted order, since InMemoryTable.AppendRow sorts its rows. This should probably be done -// programatically instead of hard-coded. +// programmatically instead of hard-coded. var rows = []row.Row{ mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{ nameTag: types.String("Bill Billerson"), diff --git a/go/libraries/events/metrics.go b/go/libraries/events/metrics.go index 6eba0d7759..73b0e999b0 100644 --- a/go/libraries/events/metrics.go +++ b/go/libraries/events/metrics.go @@ -40,7 +40,7 @@ func NewCounter(metricID eventsapi.MetricID) *Counter { return &Counter{0, metricID} } -// Inc incements a counter. This method happens atomically. +// Inc increments a counter. This method happens atomically. func (c *Counter) Inc() { c.Add(1) } diff --git a/go/libraries/utils/argparser/args_test.go b/go/libraries/utils/argparser/args_test.go index 27458ed975..669cccbef3 100644 --- a/go/libraries/utils/argparser/args_test.go +++ b/go/libraries/utils/argparser/args_test.go @@ -233,7 +233,7 @@ func TestValidation(t *testing.T) { } if apr.ContainsAny("string2", "flag2", "integer2") { - t.Error("Contains unexpected parameter(s)") + t.Error("Contains unexpectededed parameter(s)") } if val := apr.MustGetValue("string"); val != "string" { @@ -308,7 +308,7 @@ func TestDropValue(t *testing.T) { } newApr2 := apr.DropValue("flag") - require.NotEqualf(t, apr, newApr2, "DropValue failes to drop flag") + require.NotEqualf(t, apr, newApr2, "DropValue fails to drop flag") _, hasVal = newApr2.GetValue("string") if !hasVal { diff --git a/go/libraries/utils/concurrentmap/concurrentmap_test.go b/go/libraries/utils/concurrentmap/concurrentmap_test.go index 779b594c04..d8e2ff685c 100644 --- a/go/libraries/utils/concurrentmap/concurrentmap_test.go +++ b/go/libraries/utils/concurrentmap/concurrentmap_test.go @@ -100,7 +100,7 @@ func TestConcurrentMapIter(t *testing.T) { t.Errorf("Iter failed, expected to iterate 3 times, iterated %d times", counter) } - // Test that iteration yeilds all elements + // Test that iteration yields all elements if len(elements) != 3 { t.Errorf("Iter failed, there should be 3 elements in the map, got %d", len(elements)) } diff --git a/go/libraries/utils/config/config_hierarchy.go b/go/libraries/utils/config/config_hierarchy.go index 58cac55307..d9ffa2f0f3 100644 --- a/go/libraries/utils/config/config_hierarchy.go +++ b/go/libraries/utils/config/config_hierarchy.go @@ -109,7 +109,7 @@ func (ch *ConfigHierarchy) SetStrings(updates map[string]string) error { ns, paramName := splitParamName(k) if ns == "" { - // panicing in cases where developers have used this function incorrectly + // panicking in cases where developers have used this function incorrectly panic("Calls to SetStrings for a ConfigHierarchy must include the config name. " + k + " is not in the format config_name::param_name") } @@ -182,7 +182,7 @@ func (ch *ConfigHierarchy) Unset(params []string) error { ns, paramName := splitParamName(param) if ns == "" { - // panicing in cases where developers have used this function incorrectly + // panicking in cases where developers have used this function incorrectly panic("Calls to Unset for a ConfigHierarchy must include the config name. " + param + " is not in the format config_name::param_name") } diff --git a/go/libraries/utils/filesys/fs.go b/go/libraries/utils/filesys/fs.go index 0f3d2484af..7ea38ef35c 100644 --- a/go/libraries/utils/filesys/fs.go +++ b/go/libraries/utils/filesys/fs.go @@ -93,14 +93,14 @@ type WalkableFS interface { Iter(directory string, recursive bool, cb FSIterCB) error } -// ReadWriteFS is an interface whose implementors will provide read, and write implementations but may not allow +// ReadWriteFS is an interface whose implementers will provide read, and write implementations but may not allow // for files to be listed. type ReadWriteFS interface { ReadableFS WritableFS } -// Filesys is an interface whose implementors will provide read, write, and list mechanisms +// Filesys is an interface whose implementers will provide read, write, and list mechanisms type Filesys interface { ReadableFS WritableFS diff --git a/go/libraries/utils/filesys/inmemfs.go b/go/libraries/utils/filesys/inmemfs.go index 12c0e9717a..bcb6ab747c 100644 --- a/go/libraries/utils/filesys/inmemfs.go +++ b/go/libraries/utils/filesys/inmemfs.go @@ -521,7 +521,7 @@ func (fs *InMemFS) MoveDir(srcPath, destPath string) error { func (fs *InMemFS) moveDirHelper(dir *memDir, destPath string) error { // All calls to moveDirHelper MUST happen with the filesystem's read-write mutex locked if err := lockutil.AssertRWMutexIsLocked(fs.rwLock); err != nil { - return fmt.Errorf("moveDirHelper called without first aquiring filesystem read-write lock") + return fmt.Errorf("moveDirHelper called without first acquiring filesystem read-write lock") } if _, exists := fs.objs[destPath]; exists { @@ -562,7 +562,7 @@ func (fs *InMemFS) moveDirHelper(dir *memDir, destPath string) error { delete(dir.objs, obj.absPath) delete(fs.objs, obj.absPath) default: - return fmt.Errorf("unexpected type of memory object: %T", v) + return fmt.Errorf("unexpectededed type of memory object: %T", v) } } @@ -597,7 +597,7 @@ func (fs *InMemFS) MoveFile(srcPath, destPath string) error { func (fs *InMemFS) moveFileHelper(obj *memFile, destPath string) error { // All calls to moveFileHelper MUST happen with the filesystem's read-write mutex locked if err := lockutil.AssertRWMutexIsLocked(fs.rwLock); err != nil { - return fmt.Errorf("moveFileHelper called without first aquiring filesystem read-write lock") + return fmt.Errorf("moveFileHelper called without first acquiring filesystem read-write lock") } destDir := filepath.Dir(destPath) diff --git a/go/libraries/utils/minver/minver.go b/go/libraries/utils/minver/minver.go index 52b00c10a3..98a34622c1 100644 --- a/go/libraries/utils/minver/minver.go +++ b/go/libraries/utils/minver/minver.go @@ -27,7 +27,7 @@ import ( func YamlForVersion(st any, versionNum uint32) ([]byte, error) { err := NullUnsupported(versionNum, st) if err != nil { - return nil, fmt.Errorf("error nulling unspported fields for version %d: %w", versionNum, err) + return nil, fmt.Errorf("error nulling unsupported fields for version %d: %w", versionNum, err) } return yaml.Marshal(st) diff --git a/go/libraries/utils/svcs/controller.go b/go/libraries/utils/svcs/controller.go index 3b4e1c3536..bb1d360161 100644 --- a/go/libraries/utils/svcs/controller.go +++ b/go/libraries/utils/svcs/controller.go @@ -116,7 +116,7 @@ func (ss *ServiceState) CompareAndSwap(old, new ServiceState) (swapped bool) { // // |WaitForStart| can be called at any time on a Controller. It will block // until |Start| is called. After |Start| is called, if all the services -// succesfully initialize, it will return |nil|. Otherwise it will return the +// successfully initialize, it will return |nil|. Otherwise it will return the // same error |Start| returned. // // |WaitForStop| can be called at any time on a Controller. It will block until diff --git a/go/performance/sysbench/testdef.go b/go/performance/sysbench/testdef.go index 0e147a5d8e..272815db35 100644 --- a/go/performance/sysbench/testdef.go +++ b/go/performance/sysbench/testdef.go @@ -281,8 +281,8 @@ func (r *Result) populateHistogram(buf []byte) error { var err error { - timeRe := regexp.MustCompile(`total time:\s+([0-9][0-9]*\.[0-9]+)s\n`) - res := timeRe.FindSubmatch(buf) + timer := regexp.MustCompile(`total time:\s+([0-9][0-9]*\.[0-9]+)s\n`) + res := timer.FindSubmatch(buf) if len(res) == 0 { return fmt.Errorf("time not found") } @@ -475,7 +475,7 @@ func (test *Script) RunExternalServerTests(repoName string, s *driver.ExternalSe conf.Port = strconv.Itoa(s.Port) conf.Password = s.Password return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error { - log.Printf("starting scipt: %s", script) + log.Printf("starting script: %s", script) db, err := driver.ConnectDB(s.User, s.Password, s.Name, s.Host, s.Port, nil) if err != nil { @@ -515,7 +515,7 @@ func (test *Script) RunExternalServerTests(repoName string, s *driver.ExternalSe // RunSqlServerTests creates a new repo and server for every import test. func (test *Script) RunSqlServerTests(repo driver.TestRepo, user driver.DoltUser, conf Config) error { return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error { - log.Printf("starting scipt: %s", script) + log.Printf("starting script: %s", script) //make a new server for every test server, err := newServer(user, repo, conf) if err != nil { diff --git a/go/performance/utils/benchmark_runner/README.md b/go/performance/utils/benchmark_runner/README.md index 5d47db7395..20317e833e 100644 --- a/go/performance/utils/benchmark_runner/README.md +++ b/go/performance/utils/benchmark_runner/README.md @@ -111,7 +111,7 @@ Configuration: } ``` -`Servers`: The server defintions to run the benchmark against. Accepts Dolt and MySQL configuratiosn. +`Servers`: The server definitions to run the benchmark against. Accepts Dolt and MySQL configuratiosn. `ScriptDir`: The directory of the TPCC testing scripts diff --git a/go/performance/utils/benchmark_runner/run.go b/go/performance/utils/benchmark_runner/run.go index a22964f41d..1444845879 100644 --- a/go/performance/utils/benchmark_runner/run.go +++ b/go/performance/utils/benchmark_runner/run.go @@ -79,7 +79,7 @@ func Run(ctx context.Context, config SysbenchConfig) error { fmt.Println("Running postgres sysbench tests") b = NewPostgresBenchmarker(cwd, config, sc) default: - panic(fmt.Sprintf("unexpected server type: %s", st)) + panic(fmt.Sprintf("unexpectededed server type: %s", st)) } results, err = b.Benchmark(ctx) @@ -87,14 +87,14 @@ func Run(ctx context.Context, config SysbenchConfig) error { return err } - fmt.Printf("Successfuly finished %s\n", st) + fmt.Printf("Successfully finished %s\n", st) err = WriteResults(serverConfig, results) if err != nil { return err } - fmt.Printf("Successfuly wrote results for %s\n", st) + fmt.Printf("Successfully wrote results for %s\n", st) } return nil } diff --git a/go/performance/utils/benchmark_runner/run_tpcc.go b/go/performance/utils/benchmark_runner/run_tpcc.go index 10a3710ca8..f5a285e451 100644 --- a/go/performance/utils/benchmark_runner/run_tpcc.go +++ b/go/performance/utils/benchmark_runner/run_tpcc.go @@ -49,7 +49,7 @@ func RunTpcc(ctx context.Context, config TpccConfig) error { fmt.Println("Running mysql tpcc benchmarks") b = NewMysqlTpccBenchmarker(cwd, config, sc) default: - panic(fmt.Sprintf("unexpected server type: %s", st)) + panic(fmt.Sprintf("unexpectededed server type: %s", st)) } results, err = b.Benchmark(ctx) @@ -62,7 +62,7 @@ func RunTpcc(ctx context.Context, config TpccConfig) error { return err } - fmt.Printf("Successfuly wrote results for %s\n", st) + fmt.Printf("Successfully wrote results for %s\n", st) } return nil diff --git a/go/serial/addressmap.fbs b/go/serial/addressmap.fbs index e94e038a73..154b0434db 100644 --- a/go/serial/addressmap.fbs +++ b/go/serial/addressmap.fbs @@ -17,7 +17,7 @@ namespace serial; table AddressMap { // sorted array of key items key_items:[ubyte] (required); - // items offets for |key_items| + // items offsets for |key_items| // first offset is 0, last offset is len(key_items) key_offsets:[uint16] (required); diff --git a/go/serial/mergeartifacts.fbs b/go/serial/mergeartifacts.fbs index ca1d1d44be..d40590d964 100644 --- a/go/serial/mergeartifacts.fbs +++ b/go/serial/mergeartifacts.fbs @@ -18,7 +18,7 @@ table MergeArtifacts { // sorted array of key items // key items are encoded as TupleFormatAlpha key_items:[ubyte] (required); - // items offets for |key_items| + // items offsets for |key_items| // first offset is 0, last offset is len(key_items) key_offsets:[uint16] (required); diff --git a/go/serial/prolly.fbs b/go/serial/prolly.fbs index 8c7f1ea49d..d10e0e096c 100644 --- a/go/serial/prolly.fbs +++ b/go/serial/prolly.fbs @@ -22,7 +22,7 @@ enum ItemType : uint8 { table ProllyTreeNode { // sorted array of key items key_items:[ubyte] (required); - // items offets for |key_items| + // items offsets for |key_items| // first offset is 0, last offset is len(key_items) key_offsets:[uint16] (required); // item type for |key_items| diff --git a/go/store/blobstore/blobstore_test.go b/go/store/blobstore/blobstore_test.go index 638a5d6f01..46b552c8dc 100644 --- a/go/store/blobstore/blobstore_test.go +++ b/go/store/blobstore/blobstore_test.go @@ -324,7 +324,7 @@ func TestConcurrentCheckAndPuts(t *testing.T) { for _, bsTest := range newBlobStoreTests() { t.Run(bsTest.bsType, func(t *testing.T) { if bsTest.rmwIterations*bsTest.rmwConcurrency > 255 { - panic("Test epects less than 255 total updates or it won't work as is.") + panic("Test expects less than 255 total updates or it won't work as is.") } testConcurrentCheckAndPuts(t, bsTest, uuid.New().String()) }) diff --git a/go/store/chunks/test_utils.go b/go/store/chunks/test_utils.go index a344837dda..07b84ad0d7 100644 --- a/go/store/chunks/test_utils.go +++ b/go/store/chunks/test_utils.go @@ -40,7 +40,7 @@ func (t *TestStorage) NewView() *TestStoreView { type TestStoreView struct { ChunkStore reads int32 - hases int32 + hashes int32 writes int32 } @@ -61,12 +61,12 @@ func (s *TestStoreView) CacheHas(_ hash.Hash) bool { } func (s *TestStoreView) Has(ctx context.Context, h hash.Hash) (bool, error) { - atomic.AddInt32(&s.hases, 1) + atomic.AddInt32(&s.hashes, 1) return s.ChunkStore.Has(ctx, h) } func (s *TestStoreView) HasMany(ctx context.Context, hashes hash.HashSet) (hash.HashSet, error) { - atomic.AddInt32(&s.hases, int32(len(hashes))) + atomic.AddInt32(&s.hashes, int32(len(hashes))) return s.ChunkStore.HasMany(ctx, hashes) } @@ -104,9 +104,9 @@ func (s *TestStoreView) Reads() int { return int(reads) } -func (s *TestStoreView) Hases() int { - hases := atomic.LoadInt32(&s.hases) - return int(hases) +func (s *TestStoreView) Hashes() int { + hashes := atomic.LoadInt32(&s.hashes) + return int(hashes) } func (s *TestStoreView) Writes() int { diff --git a/go/store/cmd/noms/commit_iterator.go b/go/store/cmd/noms/commit_iterator.go index c95a089d98..3ba4fce90e 100644 --- a/go/store/cmd/noms/commit_iterator.go +++ b/go/store/cmd/noms/commit_iterator.go @@ -86,7 +86,7 @@ func (iter *CommitIterator) Next(ctx context.Context) (LogNode, bool) { newCols = append(newCols, col+cnt) } - // Now that the branchlist has been adusted, check to see if there are branches with common + // Now that the branchlist has been adjusted, check to see if there are branches with common // ancestors that will be folded together on this commit's graph. foldedCols := iter.branches.HighestBranchIndexes() node := LogNode{ diff --git a/go/store/cmd/noms/noms.go b/go/store/cmd/noms/noms.go index 9723c75090..f5877d30ce 100644 --- a/go/store/cmd/noms/noms.go +++ b/go/store/cmd/noms/noms.go @@ -141,7 +141,7 @@ func addDatabaseArg(cmd *kingpin.CmdClause) (arg *string) { // addNomsDocs - adds documentation (docs only, not commands) for existing (pre-kingpin) commands. func addNomsDocs(noms *kingpin.Application) { - // commmit + // commit commit := noms.Command("commit", `Commits a specified value as head of the dataset If absolute-path is not provided, then it is read from stdin. See Spelling Objects at https://github.com/attic-labs/noms/blob/master/doc/spelling.md for details on the dataset and absolute-path arguments. `) diff --git a/go/store/datas/database_common.go b/go/store/datas/database_common.go index 509c22cdb3..9f131e5b85 100644 --- a/go/store/datas/database_common.go +++ b/go/store/datas/database_common.go @@ -47,8 +47,8 @@ const ( var ( ErrOptimisticLockFailed = errors.New("optimistic lock failed on database Root update") ErrMergeNeeded = errors.New("dataset head is not ancestor of commit") - ErrAlreadyCommitted = errors.New("dataset head already pointing at given commit") - ErrDirtyWorkspace = errors.New("target has uncommitted changes. --force required to overwrite") + ErrAlreadyComitted = errors.New("dataset head already pointing at given commit") + ErrDirtyWorkspace = errors.New("target has uncomitted changes. --force required to overwrite") ) // rootTracker is a narrowing of the ChunkStore interface, to keep Database disciplined about working directly with Chunks @@ -289,7 +289,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash, w return err } if !iscommit { - return fmt.Errorf("SetHead failed: reffered to value is not a commit:") + return fmt.Errorf("SetHead failed: referred to value is not a commit:") } case tagName: istag, err := IsTag(ctx, newVal) @@ -297,7 +297,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash, w return err } if !istag { - return fmt.Errorf("SetHead failed: reffered to value is not a tag:") + return fmt.Errorf("SetHead failed: referred to value is not a tag:") } _, commitaddr, err := newHead.HeadTag() if err != nil { @@ -487,7 +487,7 @@ func (db *database) doFastForward(ctx context.Context, ds Dataset, newHeadAddr h } if curr != (hash.Hash{}) { if curr == h { - return prolly.AddressMap{}, ErrAlreadyCommitted + return prolly.AddressMap{}, ErrAlreadyComitted } } @@ -573,7 +573,7 @@ func (db *database) doFastForward(ctx context.Context, ds Dataset, newHeadAddr h return ae.Flush(ctx) }) - if err == ErrAlreadyCommitted { + if err == ErrAlreadyComitted { return nil } @@ -655,7 +655,7 @@ func buildClassicCommitFunc(db Database, datasetID string, datasetCurrentAddr ha return types.Map{}, ErrMergeNeeded } if currRef.TargetHash() == newCommitValueRef.TargetHash() { - return types.Map{}, ErrAlreadyCommitted + return types.Map{}, ErrAlreadyComitted } } else if datasetCurrentAddr != (hash.Hash{}) { return types.Map{}, ErrMergeNeeded @@ -682,7 +682,7 @@ func (db *database) doCommit(ctx context.Context, datasetID string, datasetCurre } if curr != (hash.Hash{}) { if curr == h { - return prolly.AddressMap{}, ErrAlreadyCommitted + return prolly.AddressMap{}, ErrAlreadyComitted } } diff --git a/go/store/diff/print_diff.go b/go/store/diff/print_diff.go index 1a60d8d049..033359f779 100644 --- a/go/store/diff/print_diff.go +++ b/go/store/diff/print_diff.go @@ -43,7 +43,7 @@ type ( printFunc func(ctx context.Context, w io.Writer, op prefixOp, key, val types.Value) error ) -// PrintDiff writes a textual reprensentation of the diff from |v1| to |v2| +// PrintDiff writes a textual representation of the diff from |v1| to |v2| // to |w|. If |leftRight| is true then the left-right diff is used for ordered // sequences - see Diff vs DiffLeftRight in Set and Map. func PrintDiff(ctx context.Context, w io.Writer, v1, v2 types.Value, leftRight bool) (err error) { @@ -112,7 +112,7 @@ func PrintDiff(ctx context.Context, w io.Writer, v1, v2 types.Value, leftRight b return err } } else { - panic("unexpected Path type") + panic("unexpectededed Path type") } case types.Set: // default values are ok diff --git a/go/store/hash/hash.go b/go/store/hash/hash.go index 61ead00bb6..480a1837ca 100644 --- a/go/store/hash/hash.go +++ b/go/store/hash/hash.go @@ -38,7 +38,7 @@ // // The textual serialization of hashes uses big-endian base32 with the alphabet {0-9,a-v}. This scheme was chosen because: // -// - It's easy to convert to and from base32 without bignum arithemetic. +// - It's easy to convert to and from base32 without bignum arithmetic. // - No special chars: you can double-click to select in GUIs. // - Sorted hashes will be sorted textually, making it easy to scan for humans. // @@ -125,7 +125,7 @@ func IsValid(s string) bool { func Parse(s string) Hash { r, ok := MaybeParse(s) if !ok { - d.PanicIfError(fmt.Errorf("cound not parse Hash: %s", s)) + d.PanicIfError(fmt.Errorf("count not parse Hash: %s", s)) } return r } diff --git a/go/store/marshal/encode.go b/go/store/marshal/encode.go index 7c517bfd21..d8175b0509 100644 --- a/go/store/marshal/encode.go +++ b/go/store/marshal/encode.go @@ -213,12 +213,12 @@ func float64Encoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWri } func intEncoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWriter) (types.Value, error) { - // TODO: encoding types.Int as types.Float is lossy, but will recquire a migration to change + // TODO: encoding types.Int as types.Float is lossy, but will require a migration to change return types.Float(float64(v.Int())), nil } func uintEncoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWriter) (types.Value, error) { - // TODO: encoding types.Int as types.Uint is lossy, but will recquire a migration to change + // TODO: encoding types.Int as types.Uint is lossy, but will require a migration to change return types.Float(float64(v.Uint())), nil } @@ -767,6 +767,6 @@ func shouldEncodeAsSet(t reflect.Type, tags nomsTags) bool { t.Elem().Kind() == reflect.Struct && t.Elem().NumField() == 0 default: - panic(fmt.Errorf("called with unexpected kind %v", t.Kind())) + panic(fmt.Errorf("called with unexpectededed kind %v", t.Kind())) } } diff --git a/go/store/merge/three_way.go b/go/store/merge/three_way.go index ee6a965d2e..19f437965e 100644 --- a/go/store/merge/three_way.go +++ b/go/store/merge/three_way.go @@ -52,7 +52,7 @@ func None(aChange, bChange types.DiffChangeType, a, b types.Value, path types.Pa return change, merged, false } -// Ours resolves conflicts by preferring changes from the Value currently being committed. +// Ours resolves conflicts by preferring changes from the Value currently being comitted. func Ours(aChange, bChange types.DiffChangeType, a, b types.Value, path types.Path) (change types.DiffChangeType, merged types.Value, ok bool) { return aChange, a, true } @@ -227,7 +227,7 @@ func (m *merger) threeWay(ctx context.Context, a, b, parent types.Value, path ty defer updateProgress(m.progress) if a == nil || b == nil { - d.Panic("Merge candidates cannont be nil: a = %v, b = %v", a, b) + d.Panic("Merge candidates cannot be nil: a = %v, b = %v", a, b) } switch a.Kind() { diff --git a/go/store/metrics/histogram.go b/go/store/metrics/histogram.go index 91c6ab38d7..d9a1118cb7 100644 --- a/go/store/metrics/histogram.go +++ b/go/store/metrics/histogram.go @@ -38,9 +38,9 @@ import ( // // It logically stores a running histogram of uint64 values and shares some // important features of its inspiration: -// * It acccepts a correctness deficit in return for not needing to lock. +// * It accepts a correctness deficit in return for not needing to lock. // IOW, concurrent calls to Sample may clobber each other. -// * It trades compactness and ease of arithmatic across histograms for +// * It trades compactness and ease of arithmetic across histograms for // precision. Samples lose precision up to the range of the values which // are stored in a bucket // diff --git a/go/store/nbs/README.md b/go/store/nbs/README.md index 6db473b567..e09b71c8d0 100644 --- a/go/store/nbs/README.md +++ b/go/store/nbs/README.md @@ -22,7 +22,7 @@ When backed by AWS, NBS stores its data mainly in S3, along with a single Dynamo ## Perf -For the file back-end, perf is substantially better than LevelDB mainly because LDB spends substantial IO with the goal of keeping KV pairs in key-order which doesn't benenfit Noms at all. NBS locates related chunks together and thus reading data from a NBS store can be done quite alot faster. As an example, storing & retrieving a 1.1GB MP4 video file on a MBP i5 2.9Ghz: +For the file back-end, perf is substantially better than LevelDB mainly because LDB spends substantial IO with the goal of keeping KV pairs in key-order which doesn't benenfit Noms at all. NBS locates related chunks together and thus reading data from a NBS store can be done quite a lot faster. As an example, storing & retrieving a 1.1GB MP4 video file on a MBP i5 2.9Ghz: * LDB * Initial import: 44 MB/s, size on disk: 1.1 GB. diff --git a/go/store/nbs/archive_build.go b/go/store/nbs/archive_build.go index 235fd1e6a8..aa1cb44270 100644 --- a/go/store/nbs/archive_build.go +++ b/go/store/nbs/archive_build.go @@ -531,7 +531,7 @@ type chunkGroup struct { type chunkCmpScore struct { chunkId hash.Hash // The compression score. Higher is better. This is the ratio of the compressed size to the raw size, using the group's - // dictionary. IE, this number only has meaning withing the group + // dictionary. IE, this number only has meaning within the group score float64 // The size of the compressed chunk using the group's dictionary. dictCmpSize int diff --git a/go/store/nbs/archive_writer.go b/go/store/nbs/archive_writer.go index d3c35a2252..0223cf33c5 100644 --- a/go/store/nbs/archive_writer.go +++ b/go/store/nbs/archive_writer.go @@ -105,7 +105,7 @@ func (aw *archiveWriter) writeByteSpan(b []byte) (uint32, error) { } if len(b) == 0 { - return 0, fmt.Errorf("Rutime error: empty compressed byte span") + return 0, fmt.Errorf("Runtime error: empty compressed byte span") } offset := aw.bytesWritten diff --git a/go/store/nbs/dynamo_manifest.go b/go/store/nbs/dynamo_manifest.go index 883a7ab72e..57e81fd5f1 100644 --- a/go/store/nbs/dynamo_manifest.go +++ b/go/store/nbs/dynamo_manifest.go @@ -97,7 +97,7 @@ func (dm dynamoManifest) ParseIfExists(ctx context.Context, stats *Stats, readHo return false, manifestContents{}, fmt.Errorf("failed to get dynamo table: '%s' - %w", dm.table, err) } - // !exists(dbAttr) => unitialized store + // !exists(dbAttr) => uninitialized store if len(result.Item) > 0 { valid, hasSpecs, hasAppendix := validateManifest(result.Item) if !valid { diff --git a/go/store/nbs/file_table_reader.go b/go/store/nbs/file_table_reader.go index 051a4f79bd..bbaf7fe22f 100644 --- a/go/store/nbs/file_table_reader.go +++ b/go/store/nbs/file_table_reader.go @@ -105,7 +105,7 @@ func nomsFileTableReader(ctx context.Context, path string, h hash.Hash, chunkCou } if fi.Size() < 0 { - // Size returns the number of bytes for regular files and is system dependant for others (Some of which can be negative). + // Size returns the number of bytes for regular files and is system dependent for others (Some of which can be negative). err = fmt.Errorf("%s has invalid size: %d", path, fi.Size()) return } @@ -150,7 +150,7 @@ func nomsFileTableReader(ctx context.Context, path string, h hash.Hash, chunkCou if chunkCount != index.chunkCount() { index.Close() f.Close() - return nil, errors.New("unexpected chunk count") + return nil, errors.New("unexpectededed chunk count") } tr, err := newTableReader(index, &fileReaderAt{f, path, sz}, fileBlockSize) diff --git a/go/store/nbs/journal_writer.go b/go/store/nbs/journal_writer.go index 10febd736e..83c1b46b86 100644 --- a/go/store/nbs/journal_writer.go +++ b/go/store/nbs/journal_writer.go @@ -444,7 +444,7 @@ func (wr *journalWriter) writeCompressedChunk(ctx context.Context, cc Compressed // We go through |commitRootHash|, instead of directly |Sync()|ing the // file, because we also have accumulating delayed work in the form of // journal index records which may need to be serialized and flushed. - // Assumptions in journal bootstraping and the contents of the journal + // Assumptions in journal bootstrapping and the contents of the journal // index require us to have a newly written root hash record anytime we // write index records out. It's perfectly fine to reuse the current // root hash, and this will also take care of the |Sync|. diff --git a/go/store/nbs/manifest.go b/go/store/nbs/manifest.go index 118c15d60a..4c548295f5 100644 --- a/go/store/nbs/manifest.go +++ b/go/store/nbs/manifest.go @@ -119,7 +119,7 @@ type manifestContents struct { gcGen hash.Hash specs []tableSpec - // An appendix is a list of |tableSpecs| that track an auxillary collection of + // An appendix is a list of |tableSpecs| that track an auxiliary collection of // table files used _only_ for query performance optimizations. These appendix |tableSpecs| can be safely // managed with nbs.UpdateManifestWithAppendix, however generation and removal of the actual table files // the appendix |tableSpecs| reference is done manually. All appendix |tableSpecs| will be prepended to the diff --git a/go/store/nbs/table.go b/go/store/nbs/table.go index e8e080c690..d242452699 100644 --- a/go/store/nbs/table.go +++ b/go/store/nbs/table.go @@ -95,11 +95,11 @@ import ( -Total Uncompressed Chunk Data is the sum of the uncompressed byte lengths of all contained chunk byte slices. -Magic Number is the first 8 bytes of the SHA256 hash of "https://github.com/attic-labs/nbs". - NOTE: Unsigned integer quanities, hashes and hash suffix are all encoded big-endian + NOTE: Unsigned integer quantities, hashes and hash suffix are all encoded big-endian Looking up Chunks in an NBS Table - There are two phases to loading chunk data for a given Hash from an NBS Table: Checking for the chunk's presence, and fetching the chunk's bytes. When performing a has-check, only the first phase is necessary. + There are two phashes to loading chunk data for a given Hash from an NBS Table: Checking for the chunk's presence, and fetching the chunk's bytes. When performing a has-check, only the first phase is necessary. Phase one: Chunk presence - Slice off the first 8 bytes of your Hash to create a Prefix diff --git a/go/store/nbs/table_writer.go b/go/store/nbs/table_writer.go index 4dd9c1569a..b3bf386efd 100644 --- a/go/store/nbs/table_writer.go +++ b/go/store/nbs/table_writer.go @@ -91,7 +91,7 @@ func newTableWriter(buff []byte, snapper snappyEncoder) *tableWriter { func (tw *tableWriter) addChunk(h hash.Hash, data []byte) bool { if len(data) == 0 { - panic("NBS blocks cannont be zero length") + panic("NBS blocks cannot be zero length") } // Compress data straight into tw.buff diff --git a/go/store/prolly/message/merge_artifacts.go b/go/store/prolly/message/merge_artifacts.go index e18815e7df..55586c6eef 100644 --- a/go/store/prolly/message/merge_artifacts.go +++ b/go/store/prolly/message/merge_artifacts.go @@ -170,7 +170,7 @@ func getMergeArtifactCount(msg serial.Message) (uint16, error) { if ma.KeyItemsLength() == 0 { return 0, nil } - // zeroth offset ommitted from array + // zeroth offset omitted from array return uint16(ma.KeyOffsetsLength() + 1), nil } diff --git a/go/store/prolly/message/serialize.go b/go/store/prolly/message/serialize.go index 70b8fcaf36..f9515dcf21 100644 --- a/go/store/prolly/message/serialize.go +++ b/go/store/prolly/message/serialize.go @@ -66,7 +66,7 @@ func writeItemOffsets(b *fb.Builder, items [][]byte, sumSz int) fb.UOffsetT { func countAddresses(items [][]byte, td val.TupleDesc) (cnt int) { for i := len(items) - 1; i >= 0; i-- { val.IterAddressFields(td, func(j int, t val.Type) { - // get offset of address withing |tup| + // get offset of address within |tup| addr := val.Tuple(items[i]).GetField(j) if len(addr) > 0 && !hash.New(addr).IsEmpty() { cnt++ @@ -89,7 +89,7 @@ func writeAddressOffsets(b *fb.Builder, items [][]byte, sumSz int, td val.TupleD if len(addr) == 0 || hash.New(addr).IsEmpty() { return } - // get offset of address withing |tup| + // get offset of address within |tup| o, _ := tup.GetOffset(j) o += off // offset is tuple start plus field start b.PrependUint16(uint16(o)) diff --git a/go/store/prolly/tree/addr_diff_test.go b/go/store/prolly/tree/addr_diff_test.go index e44bf3d09a..142943282c 100644 --- a/go/store/prolly/tree/addr_diff_test.go +++ b/go/store/prolly/tree/addr_diff_test.go @@ -24,7 +24,7 @@ import ( "github.com/dolthub/dolt/go/store/val" ) -// Single layer trees are entirly root nodes - which are imbedded in the table flatbuffer, so we don't +// Single layer trees are entirely root nodes - which are embedded in the table flatbuffer, so we don't // currently use them for purposes of grouping chunks. func TestAddressDifferFromRootsOneLayer(t *testing.T) { fromTups, desc := AscendingUintTuples(42) diff --git a/go/store/prolly/tree/node_splitter.go b/go/store/prolly/tree/node_splitter.go index 89422fcbe8..5714dc0bd2 100644 --- a/go/store/prolly/tree/node_splitter.go +++ b/go/store/prolly/tree/node_splitter.go @@ -236,7 +236,7 @@ const ( // split on any of the records up to |size - thisSize|, // the probability that we should split on this record // is (CDF(end) - CDF(start)) / (1 - CDF(start)), or, -// the precentage of the remaining portion of the CDF +// the percentage of the remaining portion of the CDF // that this record actually covers. We split is |hash|, // treated as a uniform random number between [0,1), // is less than this percentage. diff --git a/go/store/skip/list_bench_test.go b/go/store/skip/list_bench_test.go index 7808fef291..18517dd4ab 100644 --- a/go/store/skip/list_bench_test.go +++ b/go/store/skip/list_bench_test.go @@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) { benchmarkPut(b, randomInts(65536)) }) }) - b.Run("asending keys", func(b *testing.B) { + b.Run("ascending keys", func(b *testing.B) { b.Run("n=64", func(b *testing.B) { benchmarkPut(b, ascendingInts(64)) }) @@ -104,7 +104,7 @@ func BenchmarkIterAll(b *testing.B) { benchmarkIterAll(b, randomInts(65536)) }) }) - b.Run("asending keys", func(b *testing.B) { + b.Run("ascending keys", func(b *testing.B) { b.Run("n=64", func(b *testing.B) { benchmarkIterAll(b, ascendingInts(64)) }) diff --git a/go/store/sloppy/sloppy.go b/go/store/sloppy/sloppy.go index dd677479e3..cdfa963a57 100644 --- a/go/store/sloppy/sloppy.go +++ b/go/store/sloppy/sloppy.go @@ -57,14 +57,14 @@ var maxOffset = int(1<