Fixed grammar typos re issue #8221

This commit is contained in:
Dylan Varga
2024-08-09 22:24:19 -07:00
parent 2c1b0854ac
commit fed30c181d
82 changed files with 192 additions and 192 deletions
+5 -5
View File
@@ -309,7 +309,7 @@ Isaac Dunham
Jaydeep Patil
Jens Gustedt
Jeremy Huntwork
Jo-Philipp Wich
Jo-Philipp Which
Joakim Sindholt
John Spencer
Julien Ramseier
@@ -322,7 +322,7 @@ Luca Barbato
Luka Perkov
M Farkas-Dyck (Strake)
Mahesh Bodapati
Markus Wichmann
Markus Whichmann
Masanori Ogino
Michael Clark
Michael Forney
@@ -409,7 +409,7 @@ under the standard MIT terms.
All other files which have no copyright comments are original works
produced specifically for use as part of this library, written either
by Rich Felker, the main author of the library, or by one or more
contibutors listed above. Details on authorship of individual files
contributors listed above. Details on authorship of individual files
can be found in the git version control history of the project. The
omission of copyright and license comments in each file is in the
interest of source tree size.
@@ -1749,7 +1749,7 @@ Codegen template in t_html_generator.h
---------------------------------------------------
For t_cl_generator.cc
* Copyright (c) 2008- Patrick Collison <patrick@collison.ie>
* Copyright (c) 2008- Patrick Collision <patrick@collision.ie>
* Copyright (c) 2006- Facebook
---------------------------------------------------
@@ -9921,7 +9921,7 @@ For more information, please refer to <http://unlicense.org/>
= github.com/zeebo/xxh3 licensed under: =
xxHash Library
Copyright (c) 2012-2014, Yann Collet
Copyright (c) 2012-2014, Yann Collect
Copyright (c) 2019, Jeff Wendling
All rights reserved.
+1 -1
View File
@@ -17,7 +17,7 @@ package cli
// This is a starting point for storing common messages. Doing this correctly would probably mean using language files
// but that is overkill for the moment.
const (
// Single variable - the name of the command. `dolt <command>` is how the commandString is formated in calls to the Exec method
// Single variable - the name of the command. `dolt <command>` is how the commandString is formatted in calls to the Exec method
// for dolt commands.
RemoteUnsupportedMsg = "%s can not currently be used when there is a local server running. Please stop your dolt sql-server and try again."
)
+1 -1
View File
@@ -252,7 +252,7 @@ func extractJsonResponse(content string) map[string]interface{} {
}
func sqlQuery(ctx context.Context, query string) (string, bool, error) {
cli.Println(fmt.Sprintf("Runnning query \"%s\"...", query))
cli.Println(fmt.Sprintf("Running query \"%s\"...", query))
output, _, err := doltExec(ctx, fmt.Sprintf("dolt sql -q \"%s\"", query), false)
if err != nil {
+2 -2
View File
@@ -176,7 +176,7 @@ func getBranches(sqlCtx *sql.Context, queryEngine cli.Queryist, remote bool) ([]
return nil, err
}
if len(row) != 2 {
return nil, fmt.Errorf("unexpectedly received multiple columns in '%s': %s", command, row)
return nil, fmt.Errorf("unexpectedededly received multiple columns in '%s': %s", command, row)
}
rowStrings, err := sqlfmt.SqlRowAsStrings(row, schema)
@@ -479,7 +479,7 @@ func generateForceDeleteMessage(args []string) string {
return newArgs
}
// callStoredProcedure generates and exectures the SQL query for calling the DOLT_BRANCH stored procedure.
// callStoredProcedure generates and executes the SQL query for calling the DOLT_BRANCH stored procedure.
// All actions that modify branches delegate to this after they validate their arguments.
// Actions that don't modify branches, such as `dolt branch --list` and `dolt branch --show-current`, don't call
// this method.
+1 -1
View File
@@ -231,7 +231,7 @@ func validateAndParseDolthubUrl(urlStr string) (string, bool) {
}
if u.Scheme == dbfactory.HTTPSScheme && u.Host == "www.dolthub.com" {
// Get the actual repo name and covert the remote
// Get the actual repo name and convert the remote
split := strings.Split(u.Path, "/")
if len(split) > 2 {
+2 -2
View File
@@ -69,7 +69,7 @@ func newDiffWriter(diffOutput diffOutput) (diffWriter, error) {
case JsonDiffOutput:
return newJsonDiffWriter(iohelp.NopWrCloser(cli.CliOut))
default:
panic(fmt.Sprintf("unexpected diff output: %v", diffOutput))
panic(fmt.Sprintf("unexpectededed diff output: %v", diffOutput))
}
}
@@ -310,7 +310,7 @@ func (s sqlDiffWriter) RowWriter(fromTableInfo, toTableInfo *diff.TableInfo, tds
targetSch = fromTableInfo.Sch
}
// TOOD: schema names
// TODO: schema names
return sqlexport.NewSqlDiffWriter(tds.ToTableName.Name, targetSch, iohelp.NopWrCloser(cli.CliOut)), nil
}
+5 -5
View File
@@ -310,7 +310,7 @@ func ConfigureServices(
primaryController := sqlEngine.GetUnderlyingEngine().Analyzer.Catalog.BinlogPrimaryController
doltBinlogPrimaryController, ok := primaryController.(*binlogreplication.DoltBinlogPrimaryController)
if !ok {
return fmt.Errorf("unexpected type of binlog controller: %T", primaryController)
return fmt.Errorf("unexpectededed type of binlog controller: %T", primaryController)
}
_, logBinValue, ok := sql.SystemVariables.GetGlobal("log_bin")
@@ -319,7 +319,7 @@ func ConfigureServices(
}
logBin, ok := logBinValue.(int8)
if !ok {
return fmt.Errorf("unexpected type for @@log_bin system variable: %T", logBinValue)
return fmt.Errorf("unexpectededed type for @@log_bin system variable: %T", logBinValue)
}
_, logBinBranchValue, ok := sql.SystemVariables.GetGlobal("log_bin_branch")
@@ -328,7 +328,7 @@ func ConfigureServices(
}
logBinBranch, ok := logBinBranchValue.(string)
if !ok {
return fmt.Errorf("unexpected type for @@log_bin_branch system variable: %T", logBinBranchValue)
return fmt.Errorf("unexpectededed type for @@log_bin_branch system variable: %T", logBinBranchValue)
}
if logBinBranch != "" {
// If an invalid branch has been configured, let the server start up so that it's
@@ -800,7 +800,7 @@ func persistServerLocalCreds(port int, dEnv *env.DoltEnv) (*LocalCreds, error) {
// remotesapiAuth facilitates the implementation remotesrv.AccessControl for the remotesapi server.
type remotesapiAuth struct {
// ctxFactory is a function that returns a new sql.Context. This will create a new conext every time it is called,
// ctxFactory is a function that returns a new sql.Context. This will create a new context every time it is called,
// so it should be called once per API request.
ctxFactory func(context.Context) (*sql.Context, error)
rawDb *mysql_db.MySQLDb
@@ -827,7 +827,7 @@ func (r *remotesapiAuth) ApiAuthenticate(ctx context.Context) (context.Context,
if strings.Index(address, ":") > 0 {
address, _, err = net.SplitHostPort(creds.Address)
if err != nil {
return nil, fmt.Errorf("Invlaid Host string for authentication: %s", creds.Address)
return nil, fmt.Errorf("Invalid Host string for authentication: %s", creds.Address)
}
}
@@ -140,7 +140,7 @@ func TestServerBadArgs(t *testing.T) {
tests := [][]string{
{"-H", "127.0.0.0.1"},
{"-H", "loclahost"},
{"-H", "localahost"},
{"-P", "300"},
{"-P", "90000"},
{"-l", "everything"},
+3 -3
View File
@@ -37,7 +37,7 @@ var ErrNoConflictsResolved = errors.New("no conflicts resolved")
const dolt_row_hash_tag = 0
// IsValidTableName checks if name is a valid identifer, and doesn't end with space characters
// IsValidTableName checks if name is a valid identifier, and doesn't end with space characters
func IsValidTableName(name string) bool {
if len(name) == 0 || unicode.IsSpace(rune(name[len(name)-1])) {
return false
@@ -454,7 +454,7 @@ func (t *Table) HashOf() (hash.Hash, error) {
// UpdateNomsRows replaces the current row data and returns and updated Table.
// Calls to UpdateNomsRows will not be written to the database. The root must
// be updated with the updated table, and the root must be committed or written.
// be updated with the updated table, and the root must be comitted or written.
// Deprecated: use Table.UpdateRows() instead.
func (t *Table) UpdateNomsRows(ctx context.Context, updatedRows types.Map) (*Table, error) {
table, err := t.table.SetTableRows(ctx, durable.IndexFromNomsMap(updatedRows, t.ValueReadWriter(), t.NodeStore()))
@@ -466,7 +466,7 @@ func (t *Table) UpdateNomsRows(ctx context.Context, updatedRows types.Map) (*Tab
// UpdateRows replaces the current row data and returns and updated Table.
// Calls to UpdateRows will not be written to the database. The root must
// be updated with the updated table, and the root must be committed or written.
// be updated with the updated table, and the root must be comitted or written.
func (t *Table) UpdateRows(ctx context.Context, updatedRows durable.Index) (*Table, error) {
table, err := t.table.SetTableRows(ctx, updatedRows)
if err != nil {
+1 -1
View File
@@ -101,7 +101,7 @@ func RowsAndSchema() ([]row.Row, schema.Schema, error) {
return rows, sch, err
}
// MustTuple contructs a types.Tuple for a slice of types.Values.
// MustTuple constructs a types.Tuple for a slice of types.Values.
func MustTuple(vals ...types.Value) types.Tuple {
tup, err := types.NewTuple(types.Format_Default, vals...)
if err != nil {
+1 -1
View File
@@ -124,7 +124,7 @@ func GetMultiEnvStorageMetadata(dataDirFS filesys.Filesys) (StorageMetadataMap,
return sms, nil
}
// NewMultiEnv returns a new MultiRepoEnv instance dirived from a root DoltEnv instance.
// NewMultiEnv returns a new MultiRepoEnv instance derived from a root DoltEnv instance.
func MultiEnvForSingleEnv(ctx context.Context, env *DoltEnv) (*MultiRepoEnv, error) {
return MultiEnvForDirectory(ctx, env.Config.WriteableConfig(), env.FS, env.Version, env)
}
@@ -635,7 +635,7 @@ func (uv uniqValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff) (
// deleteArtifact deletes the unique constraint violation artifact for the row identified by |key| and returns a
// boolean that indicates if an artifact was deleted, as well as an error that indicates if there were any
// unexpected errors encountered.
// unexpectededed errors encountered.
func (uv uniqValidator) deleteArtifact(ctx context.Context, key val.Tuple) (bool, error) {
artifactKey := uv.edits.BuildArtifactKey(ctx, key, uv.srcHash, prolly.ArtifactTypeUniqueKeyViol)
@@ -1121,7 +1121,7 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc
// WARNING: In theory, we should only have to call MutableMap::Delete if the key is actually being deleted
// from the left branch. However, because of https://github.com/dolthub/dolt/issues/7192,
// if the left side of the merge is an empty table and we don't attempt to modify the map,
// the table will have an unexpected root hash.
// the table will have an unexpectededed root hash.
return m.mut.Delete(ctx, diff.Key)
case tree.DiffOpDivergentModifyResolved:
// any generated columns need to be re-resolved because their computed values may have changed as a result of
@@ -1177,7 +1177,7 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc
}
return m.mut.Put(ctx, diff.Key, newTupleValue)
default:
return fmt.Errorf("unexpected diffOp for editing primary index: %s", diff.Op)
return fmt.Errorf("unexpectededed diffOp for editing primary index: %s", diff.Op)
}
}
@@ -1210,7 +1210,7 @@ func resolveDefaults(ctx *sql.Context, tableName string, mergedSchema schema.Sch
return nil, err
}
// The default expresions always come in the order of the merged schema, but the fields we need to apply them to
// The default expressions always come in the order of the merged schema, but the fields we need to apply them to
// might have different column indexes in the case of a schema change
if len(exprs) > 0 {
for i := range exprs {
@@ -1540,7 +1540,7 @@ func writeTupleExpression(
// instance that describes how the table is being merged, |from| is the field position in the value tuple from the
// previous schema, and |rightSide| indicates whether the previous type info can be found on the right side of the merge
// or the left side. If the previous type info is the same as the current type info for the merged schema, then this
// function is a no-op and simply returns |value|. The converted value along with any unexpected error encountered is
// function is a no-op and simply returns |value|. The converted value along with any unexpectededed error encountered is
// returned.
func convertValueToNewType(value interface{}, newTypeInfo typeinfo.TypeInfo, tm *TableMerger, from int, rightSide bool) (interface{}, error) {
var previousTypeInfo typeinfo.TypeInfo
@@ -75,7 +75,7 @@ func GetMutableSecondaryIdxsWithPending(ctx *sql.Context, ourSch, sch schema.Sch
// If the schema has changed, don't reuse the index.
// TODO: This isn't technically required, but correctly handling updating secondary indexes when only some
// of the table's rows have been updated is difficult to get right.
// Dropping the index is potentially slower but guarenteed to be correct.
// Dropping the index is potentially slower but guaranteed to be correct.
if !m.KeyDesc().Equals(index.Schema().GetKeyDescriptorWithNoConversion()) {
continue
}
+1 -1
View File
@@ -276,7 +276,7 @@ func nomsKindsFromQueryTypes(qt query.Type) []types.NomsKind {
return []types.NomsKind{types.JSONKind}
default:
panic(fmt.Sprintf("unexpect query.Type %s", qt.String()))
panic(fmt.Sprintf("unexpectededed query.Type %s", qt.String()))
}
}
@@ -184,7 +184,7 @@ func filterBranchTests() []filterBranchTest {
asserts: []testAssertion{
{
setup: []testCommand{
// expeced error: "table not found: test"
// expected error: "table not found: test"
{cmd.FilterBranchCmd{}, args{"--continue", "-q", "DELETE FROM test WHERE pk > 1;"}},
},
},
+1 -1
View File
@@ -96,7 +96,7 @@ func (fh filehandler) ServeHTTP(respWr http.ResponseWriter, req *http.Request) {
}
_, ok := hash.MaybeParse(path[i+1:])
if !ok {
logger.WithField("last_path_component", path[i+1:]).Warn("bad request with unparseable last path component")
logger.WithField("last_path_component", path[i+1:]).Warn("bad request with unparsable last path component")
respWr.WriteHeader(http.StatusBadRequest)
return
}
@@ -34,7 +34,7 @@ import (
// A remotestorage.ChunkFetcher is a pipelined chunk fetcher for fetching a
// large number of chunks where the downloads may benefit from range
// coallescing, hedging, automatic retries, pipelining of download location
// coalescing, hedging, automatic retries, pipelining of download location
// retrieval with the fetching of the actual chunk bytes, etc.
//
// It is expected that one goroutine will be calling `Get()` with batches of
@@ -392,7 +392,7 @@ func fetcherDownloadRangesThread(ctx context.Context, locCh chan []*remotesapi.D
// |toSend| could have come from a previous iteration
// of this loop or the outer loop. If it's |nil|, we
// can get the next range to download from
// |downlaods.ranges|.
// |downloads.ranges|.
if toSend == nil {
max := downloads.ranges.DeleteMaxRegion()
if len(max) == 0 {
@@ -486,9 +486,9 @@ type SizeSetter interface {
// This does additive increase, multiplicative decrease on calls to |SetSize|,
// reading successes and failures from calls to |RecordSuccess| and
// |RecordFailure|. If there have been any faliures in the last update window,
// |RecordFailure|. If there have been any failures in the last update window,
// it will call |SetSize| with a new size that's 1/2 the current size. If there
// have been no faliures in the last update window, but there has been at least
// have been no failures in the last update window, but there has been at least
// one success, it will call |SetSize| with a size 1 greater than the current
// size. Will not scale size greater than |MaxConcurrency|.
func (cc *ConcurrencyControl) Run(ctx context.Context, done <-chan struct{}, ss SizeSetter, sz int) error {
@@ -38,8 +38,8 @@ type GetRange struct {
// A |Region| represents a continuous range of bytes within in a Url.
// |ranges.Tree| maintains |Region| instances that cover every |GetRange|
// within the tree. As entries are inserted into the Tree, their Regions can
// coallesce with Regions which come before or after them in the same Url,
// based on the |coallesceLimit|.
// coalesce with Regions which come before or after them in the same Url,
// based on the |coalesceLimit|.
//
// |Region|s are maintained in a |RegionHeap| so that the |Tree| can quickly
// return a large download to get started on when a download worker is
@@ -103,11 +103,11 @@ func (rh *RegionHeap) Pop() any {
}
// A ranges.Tree is a tree data structure designed to support efficient
// coallescing of non-overlapping ranges inserted into it.
// coalescing of non-overlapping ranges inserted into it.
type Tree struct {
t *btree.BTreeG[*GetRange]
regions *RegionHeap
coallesceLimit int
coalesceLimit int
}
func GetRangeLess(a, b *GetRange) bool {
@@ -118,11 +118,11 @@ func GetRangeLess(a, b *GetRange) bool {
}
}
func NewTree(coallesceLimit int) *Tree {
func NewTree(coalesceLimit int) *Tree {
return &Tree{
t: btree.NewG[*GetRange](64, GetRangeLess),
regions: &RegionHeap{},
coallesceLimit: coallesceLimit,
coalesceLimit: coalesceLimit,
}
}
@@ -154,12 +154,12 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
}
t.t.ReplaceOrInsert(ins)
// Check for coallesce with the range of the entry before the new one...
// Check for coalesce with the range of the entry before the new one...
t.t.DescendLessOrEqual(ins, func(gr *GetRange) bool {
if gr == ins {
return true
}
// If we coallesce...
// If we coalesce...
if ins.Url == gr.Url {
regionEnd := gr.Region.EndOffset
if regionEnd > ins.Offset {
@@ -167,8 +167,8 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
ins.Region = gr.Region
ins.Region.MatchedBytes += uint64(ins.Length)
heap.Fix(t.regions, ins.Region.HeapIndex)
} else if (ins.Offset - regionEnd) < uint64(t.coallesceLimit) {
// Inserted entry is within the limit to coallesce with the prior one.
} else if (ins.Offset - regionEnd) < uint64(t.coalesceLimit) {
// Inserted entry is within the limit to coalesce with the prior one.
ins.Region = gr.Region
ins.Region.MatchedBytes += uint64(ins.Length)
ins.Region.EndOffset = ins.Offset + uint64(ins.Length)
@@ -183,10 +183,10 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
if gr == ins {
return true
}
// If we coallesce...
// If we coalesce...
if ins.Url == gr.Url && gr.Region != ins.Region {
regionStart := gr.Region.StartOffset
if regionStart < (ins.Offset + uint64(ins.Length) + uint64(t.coallesceLimit)) {
if regionStart < (ins.Offset + uint64(ins.Length) + uint64(t.coalesceLimit)) {
if ins.Region == nil {
ins.Region = gr.Region
ins.Region.MatchedBytes += uint64(ins.Length)
@@ -216,7 +216,7 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
return false
})
// We didn't coallesce with any existing Regions. Insert a new Region
// We didn't coalesce with any existing Regions. Insert a new Region
// covering just this GetRange.
if ins.Region == nil {
ins.Region = &Region{
@@ -233,7 +233,7 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
// Returns all the |*GetRange| entries in the tree that are encompassed by the
// current top entry in our |RegionHeap|. For |HeapStrategy_largest|, this will
// be the largest possible download we can currently start, given our
// |coallesceLimit|.
// |coalesceLimit|.
func (t *Tree) DeleteMaxRegion() []*GetRange {
if t.regions.Len() == 0 {
return nil
@@ -36,7 +36,7 @@ import (
//
// Close should always be called on an reliable.Chan to ensure resource cleanup.
type Chan[T any] struct {
// All unack'd |T|s are stored in |buff|. As they get Ackd, they get poped from here.
// All unack'd |T|s are stored in |buff|. As they get Ackd, they get popped from here.
buff *circular.Buff[T]
// We return new |T|s from here and they go into |buff| to be delivered
@@ -47,7 +47,7 @@ func newBinlogStreamer() *binlogStreamer {
}
// startStream listens for new binlog events sent to this streamer over its binlog event
// channel and sends them over |conn|. It also listens for ticker ticks to send hearbeats
// channel and sends them over |conn|. It also listens for ticker ticks to send heartbeats
// over |conn|. The specified |binlogFormat| is used to define the format of binlog events
// and |binlogEventMeta| records the position of the stream. This method blocks until an error
// is received over the stream (e.g. the connection closing) or the streamer is closed,
@@ -142,7 +142,7 @@ func (r *branchControlReplica) Run() {
r.progressNotifier.RecordSuccess(attempt)
r.fastFailReplicationWait = false
r.backoff.Reset()
r.lgr.Debugf("branchControlReplica[%s]: sucessfully replicated branch control permissions.", r.client.remote)
r.lgr.Debugf("branchControlReplica[%s]: successfully replicated branch control permissions.", r.client.remote)
r.replicatedVersion = version
}
}
@@ -160,7 +160,7 @@ func (r *mysqlDbReplica) Run() {
r.progressNotifier.RecordSuccess(attempt)
r.fastFailReplicationWait = false
r.backoff.Reset()
r.lgr.Debugf("mysqlDbReplica[%s]: sucessfully replicated users and grants at version %d.", r.client.remote, version)
r.lgr.Debugf("mysqlDbReplica[%s]: successfully replicated users and grants at version %d.", r.client.remote, version)
r.replicatedVersion = version
} else {
r.lgr.Debugf("mysqlDbReplica[%s]: not replicating empty users and grants at version %d.", r.client.remote, r.version)
@@ -77,9 +77,9 @@ var DoltRebaseSystemTableSchema = []*sql.Column{
},
}
// ErrRebaseUncommittedChanges is used when a rebase is started, but there are uncommitted (and not
// ErrRebaseUncomittedChanges is used when a rebase is started, but there are uncomitted (and not
// ignored) changes in the working set.
var ErrRebaseUncommittedChanges = fmt.Errorf("cannot start a rebase with uncommitted changes")
var ErrRebaseUncomittedChanges = fmt.Errorf("cannot start a rebase with uncomitted changes")
// ErrRebaseConflict is used when a merge conflict is detected while rebasing a commit.
var ErrRebaseConflict = goerrors.NewKind(
@@ -203,7 +203,7 @@ func startRebase(ctx *sql.Context, upstreamPoint string) error {
}
// rebaseWorkingBranch is the name of the temporary branch used when performing a rebase. In Git, a rebase
// happens with a detatched HEAD, but Dolt doesn't support that, we use a temporary branch.
// happens with a detached HEAD, but Dolt doesn't support that, we use a temporary branch.
rebaseWorkingBranch := "dolt_rebase_" + rebaseBranch
var rsc doltdb.ReplicationStatusController
err = actions.CreateBranchWithStartPt(ctx, dbData, rebaseWorkingBranch, upstreamPoint, false, &rsc)
@@ -319,7 +319,7 @@ func validateWorkingSetCanStartRebase(ctx *sql.Context) error {
return fmt.Errorf("unable to start rebase while another rebase is in progress abort the current rebase before proceeding")
}
// Make sure the working set doesn't contain any uncommitted changes
// Make sure the working set doesn't contain any uncomitted changes
roots, ok := doltSession.GetRoots(ctx, ctx.GetCurrentDatabase())
if !ok {
return fmt.Errorf("unable to get roots for database %s", ctx.GetCurrentDatabase())
@@ -329,7 +329,7 @@ func validateWorkingSetCanStartRebase(ctx *sql.Context) error {
return err
}
if !wsOnlyHasIgnoredTables {
return ErrRebaseUncommittedChanges
return ErrRebaseUncomittedChanges
}
return nil
@@ -473,7 +473,7 @@ func continueRebase(ctx *sql.Context) (string, error) {
func processRebasePlanStep(ctx *sql.Context, planStep *rebase.RebasePlanStep) error {
// Make sure we have a transaction opened for the session
// NOTE: After our first call to cherry-pick, the tx is committed, so a new tx needs to be started
// NOTE: After our first call to cherry-pick, the tx is comitted, so a new tx needs to be started
// as we process additional rebase actions.
doltSession := dsess.DSessFromSess(ctx.Session)
if doltSession.GetTransaction() == nil {
@@ -3587,7 +3587,7 @@ var DoltBranchScripts = []queries.ScriptTest{
var DoltResetTestScripts = []queries.ScriptTest{
{
Name: "CALL DOLT_RESET('--hard') should reset the merge state after uncommitted merge",
Name: "CALL DOLT_RESET('--hard') should reset the merge state after uncomitted merge",
SetUpScript: []string{
"CREATE TABLE test1 (pk int NOT NULL, c1 int, c2 int, PRIMARY KEY (pk));",
"CALL DOLT_ADD('.')",
@@ -5703,7 +5703,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "CALL Dolt_Cherry_Pick(@commit1);",
ExpectedErrStr: "cannot cherry-pick with uncommitted changes",
ExpectedErrStr: "cannot cherry-pick with uncomitted changes",
},
{
Query: "call dolt_add('t');",
@@ -5711,7 +5711,7 @@ var DoltCherryPickTests = []queries.ScriptTest{
},
{
Query: "CALL Dolt_Cherry_Pick(@commit1);",
ExpectedErrStr: "cannot cherry-pick with uncommitted changes",
ExpectedErrStr: "cannot cherry-pick with uncomitted changes",
},
},
},
@@ -7035,7 +7035,7 @@ END`,
},
},
{
Name: "Database syntax propogates to inner calls",
Name: "Database syntax propagates to inner calls",
SetUpScript: []string{
"CALL DOLT_CHECKOUT('main');",
`CREATE PROCEDURE p4()
@@ -581,7 +581,7 @@ var MergeScripts = []queries.ScriptTest{
// TODO: These tests are skipped, because we have temporarily disabled dolt_conflicts_resolve
// when there are schema conflicts, since schema conflicts prevent table data from being
// merged, and resolving the schema changes, but not completing the data merge will likely
// give customers unexpected results.
// give customers unexpectededed results.
// https://github.com/dolthub/dolt/issues/6616
Name: "CALL DOLT_MERGE with schema conflicts can be correctly resolved using dolt_conflicts_resolve when autocommit is off",
SetUpScript: []string{
@@ -3737,7 +3737,7 @@ var SchemaConflictScripts = []queries.ScriptTest{
SetUpScript: []string{
"set @@autocommit=1;",
"create table t (pk int primary key, c0 varchar(20))",
"call dolt_commit('-Am', 'added tabele t')",
"call dolt_commit('-Am', 'added table t')",
"call dolt_checkout('-b', 'other')",
"alter table t modify column c0 int",
"call dolt_commit('-am', 'altered t on branch other')",
@@ -3765,7 +3765,7 @@ var SchemaConflictScripts = []queries.ScriptTest{
SetUpScript: []string{
"set @@autocommit=0;",
"create table t (pk int primary key, c0 varchar(20))",
"call dolt_commit('-Am', 'added tabele t')",
"call dolt_commit('-Am', 'added table t')",
"call dolt_checkout('-b', 'other')",
"alter table t modify column c0 int",
"call dolt_commit('-am', 'altered t on branch other')",
@@ -3932,7 +3932,7 @@ var OldFormatMergeConflictsAndCVsScripts = []queries.ScriptTest{
},
{
Query: "CALL DOLT_MERGE('branch3');",
Expected: []sql.Row{{"", 0, 1, "conficts found"}},
Expected: []sql.Row{{"", 0, 1, "conflicts found"}},
},
{
Query: "SELECT violation_type, pk, parent_fk from dolt_constraint_violations_child;",
@@ -39,7 +39,7 @@ type AutoIncrementTracker interface {
// given, so the new global maximum is computed without regard for its value in that working set.
Set(ctx *sql.Context, tableName string, table *doltdb.Table, ws ref.WorkingSetRef, newAutoIncVal uint64) (*doltdb.Table, error)
// AcquireTableLock acquires the auto increment lock on a table, and reutrns a callback function to release the lock.
// AcquireTableLock acquires the auto increment lock on a table, and returns a callback function to release the lock.
// Depending on the value of the `innodb_autoinc_lock_mode` system variable, the engine may need to acquire and hold
// the lock for the duration of an insert statement.
AcquireTableLock(ctx *sql.Context, tableName string) (func(), error)
@@ -11629,13 +11629,13 @@ INSERT INTO symbols VALUES ('KRNY','Kearny Financial','Finance',NULL);
INSERT INTO symbols VALUES ('KRO','Kronos Worldwide Inc','Basic Industries',NULL);
INSERT INTO symbols VALUES ('KRP','Kimbell Royalty Partners','Energy',2017);
INSERT INTO symbols VALUES ('KRYS','Krystal Biotech, Inc.','Health Care',2017);
INSERT INTO symbols VALUES ('KSM','Scudder Strategic Municiple Income Trust',NULL,1989);
INSERT INTO symbols VALUES ('KSM','Scudder Strategic Municipal Income Trust',NULL,1989);
INSERT INTO symbols VALUES ('KSS','Kohl&#39;s Corporation','Consumer Services',1992);
INSERT INTO symbols VALUES ('KSU','Kansas City Southern','Transportation',NULL);
INSERT INTO symbols VALUES ('KSU^','Kansas City Southern','Transportation',NULL);
INSERT INTO symbols VALUES ('KT','KT Corporation','Public Utilities',NULL);
INSERT INTO symbols VALUES ('KTCC','Key Tronic Corporation','Technology',1983);
INSERT INTO symbols VALUES ('KTF','Scudder Municiple Income Trust',NULL,1988);
INSERT INTO symbols VALUES ('KTF','Scudder Municipal Income Trust',NULL,1988);
INSERT INTO symbols VALUES ('KTH','Lehman ABS Corporation','Finance',NULL);
INSERT INTO symbols VALUES ('KTN','Lehman ABS Corporation','Finance',NULL);
INSERT INTO symbols VALUES ('KTOS','Kratos Defense & Security Solutions, Inc.','Capital Goods',NULL);
@@ -12818,7 +12818,7 @@ INSERT INTO symbols VALUES ('PCI','PIMCO Dynamic Credit and Mortgage Income Fund
INSERT INTO symbols VALUES ('PCK','Pimco California Municipal Income Fund II',NULL,2002);
INSERT INTO symbols VALUES ('PCM','PIMCO Commercial Mortgage Securities Trust, Inc.',NULL,1993);
INSERT INTO symbols VALUES ('PCMI','PCM, Inc.','Consumer Services',NULL);
INSERT INTO symbols VALUES ('PCN','Pimco Corporate & Income Stategy Fund',NULL,2001);
INSERT INTO symbols VALUES ('PCN','Pimco Corporate & Income Strategy Fund',NULL,2001);
INSERT INTO symbols VALUES ('PCOM','Points International, Ltd.','Miscellaneous',NULL);
INSERT INTO symbols VALUES ('PCQ','PIMCO California Municipal Income Fund',NULL,2001);
INSERT INTO symbols VALUES ('PCRX','Pacira BioSciences, Inc.','Health Care',2011);
@@ -13502,7 +13502,7 @@ INSERT INTO symbols VALUES ('SBLKZ','Star Bulk Carriers Corp.','Transportation',
INSERT INTO symbols VALUES ('SBNA','Scorpio Tankers Inc.','Transportation',2014);
INSERT INTO symbols VALUES ('SBNY','Signature Bank','Finance',2004);
INSERT INTO symbols VALUES ('SBOT','Stellar Biotechnologies, Inc.','Health Care',NULL);
INSERT INTO symbols VALUES ('SBOW','SilverBow Resorces, Inc.','Energy',NULL);
INSERT INTO symbols VALUES ('SBOW','SilverBow Resources, Inc.','Energy',NULL);
INSERT INTO symbols VALUES ('SBPH','Spring Bank Pharmaceuticals, Inc.','Health Care',2016);
INSERT INTO symbols VALUES ('SBR','Sabine Royalty Trust','Energy',NULL);
INSERT INTO symbols VALUES ('SBRA','Sabra Health Care REIT, Inc.','Consumer Services',NULL);
@@ -14104,7 +14104,7 @@ INSERT INTO symbols VALUES ('THGA','The Hanover Insurance Group, Inc.','Finance'
INSERT INTO symbols VALUES ('THM','International Tower Hill Mines Ltd','Basic Industries',NULL);
INSERT INTO symbols VALUES ('THO','Thor Industries, Inc.','Consumer Non-Durables',NULL);
INSERT INTO symbols VALUES ('THOR','Synthorx, Inc.','Health Care',2018);
INSERT INTO symbols VALUES ('THQ','Tekla Healthcare Opportunies Fund',NULL,2014);
INSERT INTO symbols VALUES ('THQ','Tekla Healthcare Opportunities Fund',NULL,2014);
INSERT INTO symbols VALUES ('THR','Thermon Group Holdings, Inc.','Energy',2011);
INSERT INTO symbols VALUES ('THRM','Gentherm Inc','Capital Goods',NULL);
INSERT INTO symbols VALUES ('THS','Treehouse Foods, Inc.','Consumer Non-Durables',NULL);
@@ -17627,12 +17627,12 @@ INSERT INTO join_result VALUES ('stock','KRNY','us','2017-11-01',15.2,15.3,14.9,
INSERT INTO join_result VALUES ('stock','KRO','us','2017-11-01',26.68,26.7558,25.9,26.1,246853,'0','Kronos Worldwide Inc','Basic Industries',NULL);
INSERT INTO join_result VALUES ('stock','KRP','us','2017-11-01',16.689,16.807,16.631,16.64,19253,'0','Kimbell Royalty Partners','Energy',2017);
INSERT INTO join_result VALUES ('stock','KRYS','us','2017-11-01',9.349,10,9.349,9.55,15987,'0','Krystal Biotech, Inc.','Health Care',2017);
INSERT INTO join_result VALUES ('stock','KSM','us','2017-11-01',11.93,11.96,11.88,11.8953,82733,'0','Scudder Strategic Municiple Income Trust',NULL,1989);
INSERT INTO join_result VALUES ('stock','KSM','us','2017-11-01',11.93,11.96,11.88,11.8953,82733,'0','Scudder Strategic Municipal Income Trust',NULL,1989);
INSERT INTO join_result VALUES ('stock','KSS','us','2017-11-01',41.95,42.81,41.52,41.9,2970700,'0','Kohl&#39;s Corporation','Consumer Services',1992);
INSERT INTO join_result VALUES ('stock','KSU','us','2017-11-01',104.77,105.72,104.46,105.46,952870,'0','Kansas City Southern','Transportation',NULL);
INSERT INTO join_result VALUES ('stock','KT','us','2017-11-01',14.4,14.41,13.91,14.13,1238720,'0','KT Corporation','Public Utilities',NULL);
INSERT INTO join_result VALUES ('stock','KTCC','us','2017-11-01',7.23,7.8,7.07,7.8,112870,'0','Key Tronic Corporation','Technology',1983);
INSERT INTO join_result VALUES ('stock','KTF','us','2017-11-01',11.89,11.94,11.83,11.87,277606,'0','Scudder Municiple Income Trust',NULL,1988);
INSERT INTO join_result VALUES ('stock','KTF','us','2017-11-01',11.89,11.94,11.83,11.87,277606,'0','Scudder Municipal Income Trust',NULL,1988);
INSERT INTO join_result VALUES ('stock','KTH','us','2017-11-01',32.41,32.53,32.41,32.53,1371,'0','Lehman ABS Corporation','Finance',NULL);
INSERT INTO join_result VALUES ('stock','KTN','us','2017-11-01',33.8,33.8,32.7304,33.73,3746,'0','Lehman ABS Corporation','Finance',NULL);
INSERT INTO join_result VALUES ('stock','KTOS','us','2017-11-01',12.16,12.3,11.65,11.9,1603390,'0','Kratos Defense & Security Solutions, Inc.','Capital Goods',NULL);
@@ -18542,7 +18542,7 @@ INSERT INTO join_result VALUES ('stock','PCI','us','2017-11-01',22.619,22.639,22
INSERT INTO join_result VALUES ('stock','PCK','us','2017-11-01',9.971,10.01,9.941,9.951,68472,'0','Pimco California Municipal Income Fund II',NULL,2002);
INSERT INTO join_result VALUES ('stock','PCM','us','2017-11-01',11.95,11.97,11.821,11.88,57610,'0','PIMCO Commercial Mortgage Securities Trust, Inc.',NULL,1993);
INSERT INTO join_result VALUES ('stock','PCMI','us','2017-11-01',14.2,14.5,14,14.25,87457,'0','PCM, Inc.','Consumer Services',NULL);
INSERT INTO join_result VALUES ('stock','PCN','us','2017-11-01',16.881,16.961,16.841,16.871,79197,'0','Pimco Corporate & Income Stategy Fund',NULL,2001);
INSERT INTO join_result VALUES ('stock','PCN','us','2017-11-01',16.881,16.961,16.841,16.871,79197,'0','Pimco Corporate & Income Strategy Fund',NULL,2001);
INSERT INTO join_result VALUES ('stock','PCOM','us','2017-11-01',11.5,11.5,11.01,11.08,45690,'0','Points International, Ltd.','Miscellaneous',NULL);
INSERT INTO join_result VALUES ('stock','PCQ','us','2017-11-01',17.08,17.15,16.951,16.971,100898,'0','PIMCO California Municipal Income Fund',NULL,2001);
INSERT INTO join_result VALUES ('stock','PCRX','us','2017-11-01',32.4,33.3,32.2,32.6,797680,'0','Pacira BioSciences, Inc.','Health Care',2011);
@@ -19055,7 +19055,7 @@ INSERT INTO join_result VALUES ('stock','SBLK','us','2017-11-01',10.85,11,10.5,1
INSERT INTO join_result VALUES ('stock','SBNA','us','2017-11-01',24.4,24.75,24.4,24.7,1334,'0','Scorpio Tankers Inc.','Transportation',2014);
INSERT INTO join_result VALUES ('stock','SBNY','us','2017-11-01',131.31,131.96,128.85,129.7,414960,'0','Signature Bank','Finance',2004);
INSERT INTO join_result VALUES ('stock','SBOT','us','2017-11-01',1.15,1.16,1.11,1.12,53927,'0','Stellar Biotechnologies, Inc.','Health Care',NULL);
INSERT INTO join_result VALUES ('stock','SBOW','us','2017-11-01',22.83,22.9572,22.26,22.27,28686,'0','SilverBow Resorces, Inc.','Energy',NULL);
INSERT INTO join_result VALUES ('stock','SBOW','us','2017-11-01',22.83,22.9572,22.26,22.27,28686,'0','SilverBow Resources, Inc.','Energy',NULL);
INSERT INTO join_result VALUES ('stock','SBPH','us','2017-11-01',15.03,15.41,14.79,15.12,38453,'0','Spring Bank Pharmaceuticals, Inc.','Health Care',2016);
INSERT INTO join_result VALUES ('stock','SBR','us','2017-11-01',42.65,42.8,42.3946,42.65,12375,'0','Sabine Royalty Trust','Energy',NULL);
INSERT INTO join_result VALUES ('stock','SBRA','us','2017-11-01',19.88,20.14,19.75,20.14,1558500,'0','Sabra Health Care REIT, Inc.','Consumer Services',NULL);
@@ -19505,7 +19505,7 @@ INSERT INTO join_result VALUES ('stock','THG','us','2017-11-01',99.05,99.7,98.52
INSERT INTO join_result VALUES ('stock','THGA','us','2017-11-01',25.51,25.5226,25.4801,25.4801,1471,'0','The Hanover Insurance Group, Inc.','Finance',NULL);
INSERT INTO join_result VALUES ('stock','THM','us','2017-11-01',0.434,0.4379,0.4239,0.4252,81904,'0','International Tower Hill Mines Ltd','Basic Industries',NULL);
INSERT INTO join_result VALUES ('stock','THO','us','2017-11-01',137.64,138.07,136.07,136.82,575303,'0','Thor Industries, Inc.','Consumer Non-Durables',NULL);
INSERT INTO join_result VALUES ('stock','THQ','us','2017-11-01',17.63,17.63,17.5,17.53,162058,'0','Tekla Healthcare Opportunies Fund',NULL,2014);
INSERT INTO join_result VALUES ('stock','THQ','us','2017-11-01',17.63,17.63,17.5,17.53,162058,'0','Tekla Healthcare Opportunities Fund',NULL,2014);
INSERT INTO join_result VALUES ('stock','THR','us','2017-11-01',21.73,21.73,21.16,21.27,164411,'0','Thermon Group Holdings, Inc.','Energy',2011);
INSERT INTO join_result VALUES ('stock','THRM','us','2017-11-01',33.7,34.45,33.25,33.65,215357,'0','Gentherm Inc','Capital Goods',NULL);
INSERT INTO join_result VALUES ('stock','THS','us','2017-11-01',66.52,67.13,66.05,66.36,1109140,'0','Treehouse Foods, Inc.','Consumer Non-Durables',NULL);
@@ -56,7 +56,7 @@ func TestJSONValueMarshallingRoundTrip(t *testing.T) {
doc: gmstypes.MustJSON(`2.71`),
},
{
name: "type homogenous object",
name: "type homogeneous object",
doc: gmstypes.MustJSON(`{"a": 2, "b": 3, "c": 4}`),
},
{
@@ -64,7 +64,7 @@ func TestJSONValueMarshallingRoundTrip(t *testing.T) {
doc: gmstypes.MustJSON(`{"a": 2, "b": "two", "c": false}`),
},
{
name: "homogenous array",
name: "homogeneous array",
doc: gmstypes.MustJSON(`[1, 2, 3]`),
},
{
@@ -196,7 +196,7 @@ func (p *Provider) checkRefresh(ctx *sql.Context, sqlDb sql.Database, dbName, br
ctx.GetLogger().Debugf("statistics updating: %s", updateMeta.qual)
// mark index for updating
idxMetas = append(idxMetas, updateMeta)
// update lastest hash if we haven't already
// update latest hash if we haven't already
statDb.SetLatestHash(branch, table, tableHash)
}
}
@@ -164,7 +164,7 @@ func TestGet(t *testing.T) {
key5 := newTuple(t, types.Int(5))
key6 := newTuple(t, types.Int(6))
// test uncommitted
// test uncomitted
requireGet(ctx, t, tea, key1, false)
teaInsert(t, tea, key1)
requireGet(ctx, t, tea, key1, true)
@@ -172,22 +172,22 @@ func TestGet(t *testing.T) {
require.NoError(t, err)
requireGet(ctx, t, tea, key1, false)
// test uncommitted flushed
// test uncomitted flushed
teaInsert(t, tea, key1)
requireGet(ctx, t, tea, key1, true)
tea.flushUncommitted()
tea.flushUncomitted()
requireGet(ctx, t, tea, key1, true)
err = tea.Rollback(ctx)
require.NoError(t, err)
requireGet(ctx, t, tea, key1, false)
// test commmitted
// test comitted
teaInsert(t, tea, key1)
err = tea.Commit(ctx, nbf)
require.NoError(t, err)
requireGet(ctx, t, tea, key1, true)
// edits in committed and uncommitted
// edits in comitted and uncomitted
requireGet(ctx, t, tea, key2, false)
teaInsert(t, tea, key2)
requireGet(ctx, t, tea, key1, true)
@@ -197,11 +197,11 @@ func TestGet(t *testing.T) {
requireGet(ctx, t, tea, key1, true)
requireGet(ctx, t, tea, key2, false)
// edits in committed and uncommitted flushed
// edits in comitted and uncomitted flushed
teaInsert(t, tea, key2)
requireGet(ctx, t, tea, key1, true)
requireGet(ctx, t, tea, key2, true)
tea.flushUncommitted()
tea.flushUncomitted()
requireGet(ctx, t, tea, key1, true)
requireGet(ctx, t, tea, key2, true)
err = tea.Rollback(ctx)
@@ -209,10 +209,10 @@ func TestGet(t *testing.T) {
requireGet(ctx, t, tea, key1, true)
requireGet(ctx, t, tea, key2, false)
// edits in committed, uncommitted and uncommitted flushed
// edits in comitted, uncomitted and uncomitted flushed
requireGet(ctx, t, tea, key3, false)
teaInsert(t, tea, key2)
tea.flushUncommitted()
tea.flushUncomitted()
teaInsert(t, tea, key3)
requireGet(ctx, t, tea, key1, true)
requireGet(ctx, t, tea, key2, true)
@@ -225,7 +225,7 @@ func TestGet(t *testing.T) {
// edits everywhere materialized
teaInsert(t, tea, key2)
tea.flushUncommitted()
tea.flushUncomitted()
teaInsert(t, tea, key3)
requireGet(ctx, t, tea, key1, true)
requireGet(ctx, t, tea, key2, true)
@@ -247,7 +247,7 @@ func TestGet(t *testing.T) {
requireGet(ctx, t, tea, key4, true)
teaDelete(t, tea, key2)
teaInsert(t, tea, key5)
tea.flushUncommitted()
tea.flushUncomitted()
requireGet(ctx, t, tea, key2, false)
requireGet(ctx, t, tea, key5, true)
teaInsert(t, tea, key6)
@@ -50,7 +50,7 @@ func mustRow(r row.Row, err error) row.Row {
}
// These are in noms-key-sorted order, since InMemoryTable.AppendRow sorts its rows. This should probably be done
// programatically instead of hard-coded.
// programmatically instead of hard-coded.
var rows = []row.Row{
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameTag: types.String("Bill Billerson"),
+1 -1
View File
@@ -40,7 +40,7 @@ func NewCounter(metricID eventsapi.MetricID) *Counter {
return &Counter{0, metricID}
}
// Inc incements a counter. This method happens atomically.
// Inc increments a counter. This method happens atomically.
func (c *Counter) Inc() {
c.Add(1)
}
+2 -2
View File
@@ -233,7 +233,7 @@ func TestValidation(t *testing.T) {
}
if apr.ContainsAny("string2", "flag2", "integer2") {
t.Error("Contains unexpected parameter(s)")
t.Error("Contains unexpectededed parameter(s)")
}
if val := apr.MustGetValue("string"); val != "string" {
@@ -308,7 +308,7 @@ func TestDropValue(t *testing.T) {
}
newApr2 := apr.DropValue("flag")
require.NotEqualf(t, apr, newApr2, "DropValue failes to drop flag")
require.NotEqualf(t, apr, newApr2, "DropValue fails to drop flag")
_, hasVal = newApr2.GetValue("string")
if !hasVal {
@@ -100,7 +100,7 @@ func TestConcurrentMapIter(t *testing.T) {
t.Errorf("Iter failed, expected to iterate 3 times, iterated %d times", counter)
}
// Test that iteration yeilds all elements
// Test that iteration yields all elements
if len(elements) != 3 {
t.Errorf("Iter failed, there should be 3 elements in the map, got %d", len(elements))
}
@@ -109,7 +109,7 @@ func (ch *ConfigHierarchy) SetStrings(updates map[string]string) error {
ns, paramName := splitParamName(k)
if ns == "" {
// panicing in cases where developers have used this function incorrectly
// panicking in cases where developers have used this function incorrectly
panic("Calls to SetStrings for a ConfigHierarchy must include the config name. " + k + " is not in the format config_name::param_name")
}
@@ -182,7 +182,7 @@ func (ch *ConfigHierarchy) Unset(params []string) error {
ns, paramName := splitParamName(param)
if ns == "" {
// panicing in cases where developers have used this function incorrectly
// panicking in cases where developers have used this function incorrectly
panic("Calls to Unset for a ConfigHierarchy must include the config name. " + param + " is not in the format config_name::param_name")
}
+2 -2
View File
@@ -93,14 +93,14 @@ type WalkableFS interface {
Iter(directory string, recursive bool, cb FSIterCB) error
}
// ReadWriteFS is an interface whose implementors will provide read, and write implementations but may not allow
// ReadWriteFS is an interface whose implementers will provide read, and write implementations but may not allow
// for files to be listed.
type ReadWriteFS interface {
ReadableFS
WritableFS
}
// Filesys is an interface whose implementors will provide read, write, and list mechanisms
// Filesys is an interface whose implementers will provide read, write, and list mechanisms
type Filesys interface {
ReadableFS
WritableFS
+3 -3
View File
@@ -521,7 +521,7 @@ func (fs *InMemFS) MoveDir(srcPath, destPath string) error {
func (fs *InMemFS) moveDirHelper(dir *memDir, destPath string) error {
// All calls to moveDirHelper MUST happen with the filesystem's read-write mutex locked
if err := lockutil.AssertRWMutexIsLocked(fs.rwLock); err != nil {
return fmt.Errorf("moveDirHelper called without first aquiring filesystem read-write lock")
return fmt.Errorf("moveDirHelper called without first acquiring filesystem read-write lock")
}
if _, exists := fs.objs[destPath]; exists {
@@ -562,7 +562,7 @@ func (fs *InMemFS) moveDirHelper(dir *memDir, destPath string) error {
delete(dir.objs, obj.absPath)
delete(fs.objs, obj.absPath)
default:
return fmt.Errorf("unexpected type of memory object: %T", v)
return fmt.Errorf("unexpectededed type of memory object: %T", v)
}
}
@@ -597,7 +597,7 @@ func (fs *InMemFS) MoveFile(srcPath, destPath string) error {
func (fs *InMemFS) moveFileHelper(obj *memFile, destPath string) error {
// All calls to moveFileHelper MUST happen with the filesystem's read-write mutex locked
if err := lockutil.AssertRWMutexIsLocked(fs.rwLock); err != nil {
return fmt.Errorf("moveFileHelper called without first aquiring filesystem read-write lock")
return fmt.Errorf("moveFileHelper called without first acquiring filesystem read-write lock")
}
destDir := filepath.Dir(destPath)
+1 -1
View File
@@ -27,7 +27,7 @@ import (
func YamlForVersion(st any, versionNum uint32) ([]byte, error) {
err := NullUnsupported(versionNum, st)
if err != nil {
return nil, fmt.Errorf("error nulling unspported fields for version %d: %w", versionNum, err)
return nil, fmt.Errorf("error nulling unsupported fields for version %d: %w", versionNum, err)
}
return yaml.Marshal(st)
+1 -1
View File
@@ -116,7 +116,7 @@ func (ss *ServiceState) CompareAndSwap(old, new ServiceState) (swapped bool) {
//
// |WaitForStart| can be called at any time on a Controller. It will block
// until |Start| is called. After |Start| is called, if all the services
// succesfully initialize, it will return |nil|. Otherwise it will return the
// successfully initialize, it will return |nil|. Otherwise it will return the
// same error |Start| returned.
//
// |WaitForStop| can be called at any time on a Controller. It will block until
+4 -4
View File
@@ -281,8 +281,8 @@ func (r *Result) populateHistogram(buf []byte) error {
var err error
{
timeRe := regexp.MustCompile(`total time:\s+([0-9][0-9]*\.[0-9]+)s\n`)
res := timeRe.FindSubmatch(buf)
timer := regexp.MustCompile(`total time:\s+([0-9][0-9]*\.[0-9]+)s\n`)
res := timer.FindSubmatch(buf)
if len(res) == 0 {
return fmt.Errorf("time not found")
}
@@ -475,7 +475,7 @@ func (test *Script) RunExternalServerTests(repoName string, s *driver.ExternalSe
conf.Port = strconv.Itoa(s.Port)
conf.Password = s.Password
return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error {
log.Printf("starting scipt: %s", script)
log.Printf("starting script: %s", script)
db, err := driver.ConnectDB(s.User, s.Password, s.Name, s.Host, s.Port, nil)
if err != nil {
@@ -515,7 +515,7 @@ func (test *Script) RunExternalServerTests(repoName string, s *driver.ExternalSe
// RunSqlServerTests creates a new repo and server for every import test.
func (test *Script) RunSqlServerTests(repo driver.TestRepo, user driver.DoltUser, conf Config) error {
return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error {
log.Printf("starting scipt: %s", script)
log.Printf("starting script: %s", script)
//make a new server for every test
server, err := newServer(user, repo, conf)
if err != nil {
@@ -111,7 +111,7 @@ Configuration:
}
```
`Servers`: The server defintions to run the benchmark against. Accepts Dolt and MySQL configuratiosn.
`Servers`: The server definitions to run the benchmark against. Accepts Dolt and MySQL configuratiosn.
`ScriptDir`: The directory of the TPCC testing scripts
+3 -3
View File
@@ -79,7 +79,7 @@ func Run(ctx context.Context, config SysbenchConfig) error {
fmt.Println("Running postgres sysbench tests")
b = NewPostgresBenchmarker(cwd, config, sc)
default:
panic(fmt.Sprintf("unexpected server type: %s", st))
panic(fmt.Sprintf("unexpectededed server type: %s", st))
}
results, err = b.Benchmark(ctx)
@@ -87,14 +87,14 @@ func Run(ctx context.Context, config SysbenchConfig) error {
return err
}
fmt.Printf("Successfuly finished %s\n", st)
fmt.Printf("Successfully finished %s\n", st)
err = WriteResults(serverConfig, results)
if err != nil {
return err
}
fmt.Printf("Successfuly wrote results for %s\n", st)
fmt.Printf("Successfully wrote results for %s\n", st)
}
return nil
}
@@ -49,7 +49,7 @@ func RunTpcc(ctx context.Context, config TpccConfig) error {
fmt.Println("Running mysql tpcc benchmarks")
b = NewMysqlTpccBenchmarker(cwd, config, sc)
default:
panic(fmt.Sprintf("unexpected server type: %s", st))
panic(fmt.Sprintf("unexpectededed server type: %s", st))
}
results, err = b.Benchmark(ctx)
@@ -62,7 +62,7 @@ func RunTpcc(ctx context.Context, config TpccConfig) error {
return err
}
fmt.Printf("Successfuly wrote results for %s\n", st)
fmt.Printf("Successfully wrote results for %s\n", st)
}
return nil
+1 -1
View File
@@ -17,7 +17,7 @@ namespace serial;
table AddressMap {
// sorted array of key items
key_items:[ubyte] (required);
// items offets for |key_items|
// items offsets for |key_items|
// first offset is 0, last offset is len(key_items)
key_offsets:[uint16] (required);
+1 -1
View File
@@ -18,7 +18,7 @@ table MergeArtifacts {
// sorted array of key items
// key items are encoded as TupleFormatAlpha
key_items:[ubyte] (required);
// items offets for |key_items|
// items offsets for |key_items|
// first offset is 0, last offset is len(key_items)
key_offsets:[uint16] (required);
+1 -1
View File
@@ -22,7 +22,7 @@ enum ItemType : uint8 {
table ProllyTreeNode {
// sorted array of key items
key_items:[ubyte] (required);
// items offets for |key_items|
// items offsets for |key_items|
// first offset is 0, last offset is len(key_items)
key_offsets:[uint16] (required);
// item type for |key_items|
+1 -1
View File
@@ -324,7 +324,7 @@ func TestConcurrentCheckAndPuts(t *testing.T) {
for _, bsTest := range newBlobStoreTests() {
t.Run(bsTest.bsType, func(t *testing.T) {
if bsTest.rmwIterations*bsTest.rmwConcurrency > 255 {
panic("Test epects less than 255 total updates or it won't work as is.")
panic("Test expects less than 255 total updates or it won't work as is.")
}
testConcurrentCheckAndPuts(t, bsTest, uuid.New().String())
})
+6 -6
View File
@@ -40,7 +40,7 @@ func (t *TestStorage) NewView() *TestStoreView {
type TestStoreView struct {
ChunkStore
reads int32
hases int32
hashes int32
writes int32
}
@@ -61,12 +61,12 @@ func (s *TestStoreView) CacheHas(_ hash.Hash) bool {
}
func (s *TestStoreView) Has(ctx context.Context, h hash.Hash) (bool, error) {
atomic.AddInt32(&s.hases, 1)
atomic.AddInt32(&s.hashes, 1)
return s.ChunkStore.Has(ctx, h)
}
func (s *TestStoreView) HasMany(ctx context.Context, hashes hash.HashSet) (hash.HashSet, error) {
atomic.AddInt32(&s.hases, int32(len(hashes)))
atomic.AddInt32(&s.hashes, int32(len(hashes)))
return s.ChunkStore.HasMany(ctx, hashes)
}
@@ -104,9 +104,9 @@ func (s *TestStoreView) Reads() int {
return int(reads)
}
func (s *TestStoreView) Hases() int {
hases := atomic.LoadInt32(&s.hases)
return int(hases)
func (s *TestStoreView) Hashes() int {
hashes := atomic.LoadInt32(&s.hashes)
return int(hashes)
}
func (s *TestStoreView) Writes() int {
+1 -1
View File
@@ -86,7 +86,7 @@ func (iter *CommitIterator) Next(ctx context.Context) (LogNode, bool) {
newCols = append(newCols, col+cnt)
}
// Now that the branchlist has been adusted, check to see if there are branches with common
// Now that the branchlist has been adjusted, check to see if there are branches with common
// ancestors that will be folded together on this commit's graph.
foldedCols := iter.branches.HighestBranchIndexes()
node := LogNode{
+1 -1
View File
@@ -141,7 +141,7 @@ func addDatabaseArg(cmd *kingpin.CmdClause) (arg *string) {
// addNomsDocs - adds documentation (docs only, not commands) for existing (pre-kingpin) commands.
func addNomsDocs(noms *kingpin.Application) {
// commmit
// commit
commit := noms.Command("commit", `Commits a specified value as head of the dataset
If absolute-path is not provided, then it is read from stdin. See Spelling Objects at https://github.com/attic-labs/noms/blob/master/doc/spelling.md for details on the dataset and absolute-path arguments.
`)
+8 -8
View File
@@ -47,8 +47,8 @@ const (
var (
ErrOptimisticLockFailed = errors.New("optimistic lock failed on database Root update")
ErrMergeNeeded = errors.New("dataset head is not ancestor of commit")
ErrAlreadyCommitted = errors.New("dataset head already pointing at given commit")
ErrDirtyWorkspace = errors.New("target has uncommitted changes. --force required to overwrite")
ErrAlreadyComitted = errors.New("dataset head already pointing at given commit")
ErrDirtyWorkspace = errors.New("target has uncomitted changes. --force required to overwrite")
)
// rootTracker is a narrowing of the ChunkStore interface, to keep Database disciplined about working directly with Chunks
@@ -289,7 +289,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash, w
return err
}
if !iscommit {
return fmt.Errorf("SetHead failed: reffered to value is not a commit:")
return fmt.Errorf("SetHead failed: referred to value is not a commit:")
}
case tagName:
istag, err := IsTag(ctx, newVal)
@@ -297,7 +297,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash, w
return err
}
if !istag {
return fmt.Errorf("SetHead failed: reffered to value is not a tag:")
return fmt.Errorf("SetHead failed: referred to value is not a tag:")
}
_, commitaddr, err := newHead.HeadTag()
if err != nil {
@@ -487,7 +487,7 @@ func (db *database) doFastForward(ctx context.Context, ds Dataset, newHeadAddr h
}
if curr != (hash.Hash{}) {
if curr == h {
return prolly.AddressMap{}, ErrAlreadyCommitted
return prolly.AddressMap{}, ErrAlreadyComitted
}
}
@@ -573,7 +573,7 @@ func (db *database) doFastForward(ctx context.Context, ds Dataset, newHeadAddr h
return ae.Flush(ctx)
})
if err == ErrAlreadyCommitted {
if err == ErrAlreadyComitted {
return nil
}
@@ -655,7 +655,7 @@ func buildClassicCommitFunc(db Database, datasetID string, datasetCurrentAddr ha
return types.Map{}, ErrMergeNeeded
}
if currRef.TargetHash() == newCommitValueRef.TargetHash() {
return types.Map{}, ErrAlreadyCommitted
return types.Map{}, ErrAlreadyComitted
}
} else if datasetCurrentAddr != (hash.Hash{}) {
return types.Map{}, ErrMergeNeeded
@@ -682,7 +682,7 @@ func (db *database) doCommit(ctx context.Context, datasetID string, datasetCurre
}
if curr != (hash.Hash{}) {
if curr == h {
return prolly.AddressMap{}, ErrAlreadyCommitted
return prolly.AddressMap{}, ErrAlreadyComitted
}
}
+2 -2
View File
@@ -43,7 +43,7 @@ type (
printFunc func(ctx context.Context, w io.Writer, op prefixOp, key, val types.Value) error
)
// PrintDiff writes a textual reprensentation of the diff from |v1| to |v2|
// PrintDiff writes a textual representation of the diff from |v1| to |v2|
// to |w|. If |leftRight| is true then the left-right diff is used for ordered
// sequences - see Diff vs DiffLeftRight in Set and Map.
func PrintDiff(ctx context.Context, w io.Writer, v1, v2 types.Value, leftRight bool) (err error) {
@@ -112,7 +112,7 @@ func PrintDiff(ctx context.Context, w io.Writer, v1, v2 types.Value, leftRight b
return err
}
} else {
panic("unexpected Path type")
panic("unexpectededed Path type")
}
case types.Set:
// default values are ok
+2 -2
View File
@@ -38,7 +38,7 @@
//
// The textual serialization of hashes uses big-endian base32 with the alphabet {0-9,a-v}. This scheme was chosen because:
//
// - It's easy to convert to and from base32 without bignum arithemetic.
// - It's easy to convert to and from base32 without bignum arithmetic.
// - No special chars: you can double-click to select in GUIs.
// - Sorted hashes will be sorted textually, making it easy to scan for humans.
//
@@ -125,7 +125,7 @@ func IsValid(s string) bool {
func Parse(s string) Hash {
r, ok := MaybeParse(s)
if !ok {
d.PanicIfError(fmt.Errorf("cound not parse Hash: %s", s))
d.PanicIfError(fmt.Errorf("count not parse Hash: %s", s))
}
return r
}
+3 -3
View File
@@ -213,12 +213,12 @@ func float64Encoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWri
}
func intEncoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWriter) (types.Value, error) {
// TODO: encoding types.Int as types.Float is lossy, but will recquire a migration to change
// TODO: encoding types.Int as types.Float is lossy, but will require a migration to change
return types.Float(float64(v.Int())), nil
}
func uintEncoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWriter) (types.Value, error) {
// TODO: encoding types.Int as types.Uint is lossy, but will recquire a migration to change
// TODO: encoding types.Int as types.Uint is lossy, but will require a migration to change
return types.Float(float64(v.Uint())), nil
}
@@ -767,6 +767,6 @@ func shouldEncodeAsSet(t reflect.Type, tags nomsTags) bool {
t.Elem().Kind() == reflect.Struct &&
t.Elem().NumField() == 0
default:
panic(fmt.Errorf("called with unexpected kind %v", t.Kind()))
panic(fmt.Errorf("called with unexpectededed kind %v", t.Kind()))
}
}
+2 -2
View File
@@ -52,7 +52,7 @@ func None(aChange, bChange types.DiffChangeType, a, b types.Value, path types.Pa
return change, merged, false
}
// Ours resolves conflicts by preferring changes from the Value currently being committed.
// Ours resolves conflicts by preferring changes from the Value currently being comitted.
func Ours(aChange, bChange types.DiffChangeType, a, b types.Value, path types.Path) (change types.DiffChangeType, merged types.Value, ok bool) {
return aChange, a, true
}
@@ -227,7 +227,7 @@ func (m *merger) threeWay(ctx context.Context, a, b, parent types.Value, path ty
defer updateProgress(m.progress)
if a == nil || b == nil {
d.Panic("Merge candidates cannont be nil: a = %v, b = %v", a, b)
d.Panic("Merge candidates cannot be nil: a = %v, b = %v", a, b)
}
switch a.Kind() {
+2 -2
View File
@@ -38,9 +38,9 @@ import (
//
// It logically stores a running histogram of uint64 values and shares some
// important features of its inspiration:
// * It acccepts a correctness deficit in return for not needing to lock.
// * It accepts a correctness deficit in return for not needing to lock.
// IOW, concurrent calls to Sample may clobber each other.
// * It trades compactness and ease of arithmatic across histograms for
// * It trades compactness and ease of arithmetic across histograms for
// precision. Samples lose precision up to the range of the values which
// are stored in a bucket
//
+1 -1
View File
@@ -22,7 +22,7 @@ When backed by AWS, NBS stores its data mainly in S3, along with a single Dynamo
## Perf
For the file back-end, perf is substantially better than LevelDB mainly because LDB spends substantial IO with the goal of keeping KV pairs in key-order which doesn't benenfit Noms at all. NBS locates related chunks together and thus reading data from a NBS store can be done quite alot faster. As an example, storing & retrieving a 1.1GB MP4 video file on a MBP i5 2.9Ghz:
For the file back-end, perf is substantially better than LevelDB mainly because LDB spends substantial IO with the goal of keeping KV pairs in key-order which doesn't benenfit Noms at all. NBS locates related chunks together and thus reading data from a NBS store can be done quite a lot faster. As an example, storing & retrieving a 1.1GB MP4 video file on a MBP i5 2.9Ghz:
* LDB
* Initial import: 44 MB/s, size on disk: 1.1 GB.
+1 -1
View File
@@ -531,7 +531,7 @@ type chunkGroup struct {
type chunkCmpScore struct {
chunkId hash.Hash
// The compression score. Higher is better. This is the ratio of the compressed size to the raw size, using the group's
// dictionary. IE, this number only has meaning withing the group
// dictionary. IE, this number only has meaning within the group
score float64
// The size of the compressed chunk using the group's dictionary.
dictCmpSize int
+1 -1
View File
@@ -105,7 +105,7 @@ func (aw *archiveWriter) writeByteSpan(b []byte) (uint32, error) {
}
if len(b) == 0 {
return 0, fmt.Errorf("Rutime error: empty compressed byte span")
return 0, fmt.Errorf("Runtime error: empty compressed byte span")
}
offset := aw.bytesWritten
+1 -1
View File
@@ -97,7 +97,7 @@ func (dm dynamoManifest) ParseIfExists(ctx context.Context, stats *Stats, readHo
return false, manifestContents{}, fmt.Errorf("failed to get dynamo table: '%s' - %w", dm.table, err)
}
// !exists(dbAttr) => unitialized store
// !exists(dbAttr) => uninitialized store
if len(result.Item) > 0 {
valid, hasSpecs, hasAppendix := validateManifest(result.Item)
if !valid {
+2 -2
View File
@@ -105,7 +105,7 @@ func nomsFileTableReader(ctx context.Context, path string, h hash.Hash, chunkCou
}
if fi.Size() < 0 {
// Size returns the number of bytes for regular files and is system dependant for others (Some of which can be negative).
// Size returns the number of bytes for regular files and is system dependent for others (Some of which can be negative).
err = fmt.Errorf("%s has invalid size: %d", path, fi.Size())
return
}
@@ -150,7 +150,7 @@ func nomsFileTableReader(ctx context.Context, path string, h hash.Hash, chunkCou
if chunkCount != index.chunkCount() {
index.Close()
f.Close()
return nil, errors.New("unexpected chunk count")
return nil, errors.New("unexpectededed chunk count")
}
tr, err := newTableReader(index, &fileReaderAt{f, path, sz}, fileBlockSize)
+1 -1
View File
@@ -444,7 +444,7 @@ func (wr *journalWriter) writeCompressedChunk(ctx context.Context, cc Compressed
// We go through |commitRootHash|, instead of directly |Sync()|ing the
// file, because we also have accumulating delayed work in the form of
// journal index records which may need to be serialized and flushed.
// Assumptions in journal bootstraping and the contents of the journal
// Assumptions in journal bootstrapping and the contents of the journal
// index require us to have a newly written root hash record anytime we
// write index records out. It's perfectly fine to reuse the current
// root hash, and this will also take care of the |Sync|.
+1 -1
View File
@@ -119,7 +119,7 @@ type manifestContents struct {
gcGen hash.Hash
specs []tableSpec
// An appendix is a list of |tableSpecs| that track an auxillary collection of
// An appendix is a list of |tableSpecs| that track an auxiliary collection of
// table files used _only_ for query performance optimizations. These appendix |tableSpecs| can be safely
// managed with nbs.UpdateManifestWithAppendix, however generation and removal of the actual table files
// the appendix |tableSpecs| reference is done manually. All appendix |tableSpecs| will be prepended to the
+2 -2
View File
@@ -95,11 +95,11 @@ import (
-Total Uncompressed Chunk Data is the sum of the uncompressed byte lengths of all contained chunk byte slices.
-Magic Number is the first 8 bytes of the SHA256 hash of "https://github.com/attic-labs/nbs".
NOTE: Unsigned integer quanities, hashes and hash suffix are all encoded big-endian
NOTE: Unsigned integer quantities, hashes and hash suffix are all encoded big-endian
Looking up Chunks in an NBS Table
There are two phases to loading chunk data for a given Hash from an NBS Table: Checking for the chunk's presence, and fetching the chunk's bytes. When performing a has-check, only the first phase is necessary.
There are two phashes to loading chunk data for a given Hash from an NBS Table: Checking for the chunk's presence, and fetching the chunk's bytes. When performing a has-check, only the first phase is necessary.
Phase one: Chunk presence
- Slice off the first 8 bytes of your Hash to create a Prefix
+1 -1
View File
@@ -91,7 +91,7 @@ func newTableWriter(buff []byte, snapper snappyEncoder) *tableWriter {
func (tw *tableWriter) addChunk(h hash.Hash, data []byte) bool {
if len(data) == 0 {
panic("NBS blocks cannont be zero length")
panic("NBS blocks cannot be zero length")
}
// Compress data straight into tw.buff
+1 -1
View File
@@ -170,7 +170,7 @@ func getMergeArtifactCount(msg serial.Message) (uint16, error) {
if ma.KeyItemsLength() == 0 {
return 0, nil
}
// zeroth offset ommitted from array
// zeroth offset omitted from array
return uint16(ma.KeyOffsetsLength() + 1), nil
}
+2 -2
View File
@@ -66,7 +66,7 @@ func writeItemOffsets(b *fb.Builder, items [][]byte, sumSz int) fb.UOffsetT {
func countAddresses(items [][]byte, td val.TupleDesc) (cnt int) {
for i := len(items) - 1; i >= 0; i-- {
val.IterAddressFields(td, func(j int, t val.Type) {
// get offset of address withing |tup|
// get offset of address within |tup|
addr := val.Tuple(items[i]).GetField(j)
if len(addr) > 0 && !hash.New(addr).IsEmpty() {
cnt++
@@ -89,7 +89,7 @@ func writeAddressOffsets(b *fb.Builder, items [][]byte, sumSz int, td val.TupleD
if len(addr) == 0 || hash.New(addr).IsEmpty() {
return
}
// get offset of address withing |tup|
// get offset of address within |tup|
o, _ := tup.GetOffset(j)
o += off // offset is tuple start plus field start
b.PrependUint16(uint16(o))
+1 -1
View File
@@ -24,7 +24,7 @@ import (
"github.com/dolthub/dolt/go/store/val"
)
// Single layer trees are entirly root nodes - which are imbedded in the table flatbuffer, so we don't
// Single layer trees are entirely root nodes - which are embedded in the table flatbuffer, so we don't
// currently use them for purposes of grouping chunks.
func TestAddressDifferFromRootsOneLayer(t *testing.T) {
fromTups, desc := AscendingUintTuples(42)
+1 -1
View File
@@ -236,7 +236,7 @@ const (
// split on any of the records up to |size - thisSize|,
// the probability that we should split on this record
// is (CDF(end) - CDF(start)) / (1 - CDF(start)), or,
// the precentage of the remaining portion of the CDF
// the percentage of the remaining portion of the CDF
// that this record actually covers. We split is |hash|,
// treated as a uniform random number between [0,1),
// is less than this percentage.
+2 -2
View File
@@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) {
benchmarkPut(b, randomInts(65536))
})
})
b.Run("asending keys", func(b *testing.B) {
b.Run("ascending keys", func(b *testing.B) {
b.Run("n=64", func(b *testing.B) {
benchmarkPut(b, ascendingInts(64))
})
@@ -104,7 +104,7 @@ func BenchmarkIterAll(b *testing.B) {
benchmarkIterAll(b, randomInts(65536))
})
})
b.Run("asending keys", func(b *testing.B) {
b.Run("ascending keys", func(b *testing.B) {
b.Run("n=64", func(b *testing.B) {
benchmarkIterAll(b, ascendingInts(64))
})
+2 -2
View File
@@ -57,14 +57,14 @@ var maxOffset = int(1<<maxOffsetPOT - 1)
//
// When in the "matched state" (attempting to extend the current match), Snappy
// does not re-index new 4-byte sequences, but Sloppy does. The reason for this
// is that Sloppy would like match the most recent occurence as it moves
// is that Sloppy would like match the most recent occurrence as it moves
// forward.
//
// Lastly, Sloppy adds two novel heuritics, both aimed at further mitigating
// the chance of chunk boundaries being redrawn because of byte value changes:
//
// 1) During the first 2 bytes of match, it *continues* to look for closer
// matches (effectively prefering a closer but shorter copy to a further but
// matches (effectively preferring a closer but shorter copy to a further but
// longer one). The reason for this is that when sequences repeat frequently in
// a byte stream, randomness provides for a good chance that a one or two byte
// prefix on a repeated sequence will match "far away". E.g.
+2 -2
View File
@@ -322,7 +322,7 @@ The Mouse did not notice this question, but hurriedly went on, ‘“—found it
What I was going to say, said the Dodo in an offended tone, was, that the best thing to get us dry would be a Caucus-race.
What is a Caucus-race? said Alice; not that she wanted much to know, but the Dodo had paused as if it thought that somebody ought to speak, and no one else seemed inclined to say anything.
Why, said the Dodo, the best way to explain it is to do it. (And, as you might like to try the thing yourself, some winter day, I will tell you how the Dodo managed it.)
First it marked out a race-course, in a sort of circle, (the exact shape doesnt matter, it said,) and then all the party were placed along the course, here and there. There was no One, two, three, and away, but they began running when they liked, and left off when they liked, so that it was not easy to know when the race was over. However, when they had been running half an hour or so, and were quite dry again, the Dodo suddenly called out The race is over! and they all crowded round it, panting, and asking, But who has won?
First it marked out a race-course, in a sort of circle, (the exact shape doesnt matter, it said,) and then all the party were placed along the course, here and there. There was no One, two, three, and away, but they began running when they liked, and left off when they liked, so that it was not easy to know when the race was over. However, when they had been running half an hour or so, and were quite dry again, the Dodo suddenly called out The race is over! and they all crowded round it, painting, and asking, But who has won?
This question the Dodo could not answer without a great deal of thought, and it sat for a long time with one finger pressed upon its forehead (the position in which you usually see Shakespeare, in the pictures of him), while the rest waited in silence. At last the Dodo said, Everybody has won, and all must have prizes.
But who is to give the prizes? quite a chorus of voices asked.
Why, she, of course, said the Dodo, pointing to Alice with one finger; and the whole party at once crowded round her, calling out in a confused way, Prizes! Prizes!
@@ -438,7 +438,7 @@ So she swallowed one of the cakes, and was delighted to find that she began shri
The first thing Ive got to do, said Alice to herself, as she wandered about in the wood, is to grow to my right size again; and the second thing is to find my way into that lovely garden. I think that will be the best plan.
It sounded an excellent plan, no doubt, and very neatly and simply arranged; the only difficulty was, that she had not the smallest idea how to set about it; and while she was peering about anxiously among the trees, a little sharp bark just over her head made her look up in a great hurry.
An enormous puppy was looking down at her with large round eyes, and feebly stretching out one paw, trying to touch her. Poor little thing! said Alice, in a coaxing tone, and she tried hard to whistle to it; but she was terribly frightened all the time at the thought that it might be hungry, in which case it would be very likely to eat her up in spite of all her coaxing.
Hardly knowing what she did, she picked up a little bit of stick, and held it out to the puppy; whereupon the puppy jumped into the air off all its feet at once, with a yelp of delight, and rushed at the stick, and made believe to worry it; then Alice dodged behind a great thistle, to keep herself from being run over; and the moment she appeared on the other side, the puppy made another rush at the stick, and tumbled head over heels in its hurry to get hold of it; then Alice, thinking it was very like having a game of play with a cart-horse, and expecting every moment to be trampled under its feet, ran round the thistle again; then the puppy began a series of short charges at the stick, running a very little way forwards each time and a long way back, and barking hoarsely all the while, till at last it sat down a good way off, panting, with its tongue hanging out of its mouth, and its great eyes half shut.
Hardly knowing what she did, she picked up a little bit of stick, and held it out to the puppy; whereupon the puppy jumped into the air off all its feet at once, with a yelp of delight, and rushed at the stick, and made believe to worry it; then Alice dodged behind a great thistle, to keep herself from being run over; and the moment she appeared on the other side, the puppy made another rush at the stick, and tumbled head over heels in its hurry to get hold of it; then Alice, thinking it was very like having a game of play with a cart-horse, and expecting every moment to be trampled under its feet, ran round the thistle again; then the puppy began a series of short charges at the stick, running a very little way forwards each time and a long way back, and barking hoarsely all the while, till at last it sat down a good way off, painting, with its tongue hanging out of its mouth, and its great eyes half shut.
This seemed to Alice a good opportunity for making her escape; so she set off at once, and ran till she was quite tired and out of breath, and till the puppys bark sounded quite faint in the distance.
And yet what a dear little puppy it was! said Alice, as she leant against a buttercup to rest herself, and fanned herself with one of the leaves: I should have liked teaching it tricks very much, ifif Id only been the right size to do it! Oh dear! Id nearly forgotten that Ive got to grow up again! Let me seehow is it to be managed? I suppose I ought to eat or drink something or other; but the great question is, what?
The great question certainly was, what? Alice looked all round her at the flowers and the blades of grass, but she did not see anything that looked like the right thing to eat or drink under the circumstances. There was a large mushroom growing near her, about the same height as herself; and when she had looked under it, and on both sides of it, and behind it, it occurred to her that she might as well look and see what was on the top of it.
+7 -7
View File
@@ -62,22 +62,22 @@ func TestKVPCollItr(t *testing.T) {
itr := NewItr(vrw, coll)
for i := 0; i < 2; i++ {
for _, expRes := range test.itrResults {
for _, express := range test.itrResults {
kvp, buff, done := itr.nextForDestructiveMerge()
kval, err := kvp.Key.Value(ctx)
assert.NoError(t, err)
if !kval.Equals(types.Uint(expRes.keyVal)) {
t.Error("unexpected result")
if !kval.Equals(types.Uint(express.keyVal)) {
t.Error("unexpectededed result")
}
if (buff != nil) != expRes.exhaustedBuf {
t.Error("unexpected buffer result")
if (buff != nil) != express.exhaustedBuf {
t.Error("unexpectededed buffer result")
}
if done != expRes.done {
t.Error("unexpected is done value.")
if done != express.done {
t.Error("unexpectededed is done value.")
}
}
+1 -1
View File
@@ -771,7 +771,7 @@ func TestListSet(t *testing.T) {
}
testIdx(len(testList)-1, true)
// Compare list unequality, which doesn't require building a new list every iteration, so the increment can be smaller.
// Compare list inequality, which doesn't require building a new list every iteration, so the increment can be smaller.
for incr, i := 10, 0; i < len(testList); i += incr {
testIdx(i, false)
}
+3 -3
View File
@@ -497,7 +497,7 @@ func TestMapMutationReadWriteCount(t *testing.T) {
return temp.NewStruct(vs.Format(), []Value{
Bool(i%2 == 0),
Float(i),
String(fmt.Sprintf("I AM A REALLY REALY REALL SUPER CALIFRAGILISTICLY CRAZY-ASSED LONGTASTIC String %d", i)),
String(fmt.Sprintf("I AM A REALY REALY REAL SUPER CALIFRAGILISTICLY CRAZY-ASSED LONGTASTIC String %d", i)),
String(fmt.Sprintf("I am a bit shorter and also more chill: %d", i)),
})
}
@@ -765,9 +765,9 @@ func TestMapHasRemove(t *testing.T) {
m, err = me.Map(context.Background())
require.NoError(t, err)
expected := []string{"a", "c", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "p", "q", "r", "s"}
unexpected := []string{"b", "d", "o"}
unexpectededed := []string{"b", "d", "o"}
assert.True(hasAll(m, expected...))
assert.True(hasNone(m, unexpected...))
assert.True(hasNone(m, unexpectededed...))
assert.True(m.Len() == uint64(len(expected)))
}
+3 -3
View File
@@ -439,7 +439,7 @@ func (ms metaSequence) getChildSequence(ctx context.Context, idx int) (sequence,
}
// Returns the sequences pointed to by all items[i], s.t. start <= i < end, and returns the
// concatentation as one long composite sequence
// concatenation as one long composite sequence
func (ms metaSequence) getCompositeChildSequence(ctx context.Context, start uint64, length uint64) (sequence, error) {
ctx, span := tracer.Start(ctx, "metaSequence.getChildSequence", trace.WithAttributes(
attribute.Int64("level", int64(ms.treeLevel())),
@@ -541,10 +541,10 @@ func (ms metaSequence) getChildren(ctx context.Context, start, end uint64) ([]se
}
if len(hs) == 0 {
return seqs, nil // can occur with ptree that is fully uncommitted
return seqs, nil // can occur with ptree that is fully uncomitted
}
// Fetch committed child sequences in a single batch
// Fetch comitted child sequences in a single batch
readValues, err := ms.vrw.ReadManyValues(ctx, hs)
if err != nil {
+1 -1
View File
@@ -80,7 +80,7 @@ func (se *SetEditor) Set(ctx context.Context) (Set, error) {
}
if i+1 < len(se.edits.edits) && se.edits.edits[i+1].value.Equals(edit.value) {
continue // next edit supercedes this one
continue // next edit supersedes this one
}
edit := edit
+1 -1
View File
@@ -21,7 +21,7 @@ import (
func TestVerbose(t *testing.T) {
Logger(context.Background()).Sugar().Warn("This is a test")
Logger(context.Background()).Sugar().Debug("This is a test with verbse = false")
Logger(context.Background()).Sugar().Debug("This is a test with verbose = false")
SetVerbose(true)
Logger(context.Background()).Sugar().Debug("This is a test with verbose = true")
}
+1 -1
View File
@@ -48,7 +48,7 @@ const (
//
// Field offsets encode the byte-offset from the front of the Tuple to the beginning
// of the corresponding field in the Tuple. The offset for the first field is always
// zero and is therefor omitted. Offsets and the field count are little-endian
// zero and is therefore omitted. Offsets and the field count are little-endian
// encoded uint16 values.
//
// Tuples read and write field values as byte slices. Interpreting these encoded
+3 -3
View File
@@ -63,7 +63,7 @@ Isaac Dunham
Jaydeep Patil
Jens Gustedt
Jeremy Huntwork
Jo-Philipp Wich
Jo-Philipp Which
Joakim Sindholt
John Spencer
Julien Ramseier
@@ -76,7 +76,7 @@ Luca Barbato
Luka Perkov
M Farkas-Dyck (Strake)
Mahesh Bodapati
Markus Wichmann
Markus Whichmann
Masanori Ogino
Michael Clark
Michael Forney
@@ -163,7 +163,7 @@ under the standard MIT terms.
All other files which have no copyright comments are original works
produced specifically for use as part of this library, written either
by Rich Felker, the main author of the library, or by one or more
contibutors listed above. Details on authorship of individual files
contributors listed above. Details on authorship of individual files
can be found in the git version control history of the project. The
omission of copyright and license comments in each file is in the
interest of source tree size.
+1 -1
View File
@@ -299,7 +299,7 @@ func worker(jobs <-chan string, results chan<- batsResult, ctx context.Context,
// in the batsee_results directory in the CWD, and the error is written to the result.err field.
func runBats(path string, resultChan chan<- batsResult, ctx context.Context, cfg config) {
cmd := exec.CommandContext(ctx, "bats", path)
// Set the process group ID so that we can kill the entire process tree if it runs too long. We need to differenciate
// Set the process group ID so that we can kill the entire process tree if it runs too long. We need to differentiate
// process group of the sub process from this one, because kill the primary process if we don't.
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
cmd.Env = append(os.Environ(), fmt.Sprintf("DOLT_TEST_RETRIES=%d", cfg.retries))
+1 -1
View File
@@ -41,7 +41,7 @@ var (
)
// Runs |sqlScript| concurrently on multiple clients.
// Useful for repoducing concurrency bugs.
// Useful for reproducing concurrency bugs.
func main() {
connStr := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s",
user, pass, host, port, database)