[no-release-notes] Merge pull request #8289

Spelling and grammar fixes from outside contributor.
This commit is contained in:
Neil Macneale IV
2024-08-20 18:27:21 -07:00
committed by GitHub
68 changed files with 108 additions and 108 deletions
+1 -1
View File
@@ -17,7 +17,7 @@ package cli
// This is a starting point for storing common messages. Doing this correctly would probably mean using language files
// but that is overkill for the moment.
const (
// Single variable - the name of the command. `dolt <command>` is how the commandString is formated in calls to the Exec method
// Single variable - the name of the command. `dolt <command>` is how the commandString is formatted in calls to the Exec method
// for dolt commands.
RemoteUnsupportedMsg = "%s can not currently be used when there is a local server running. Please stop your dolt sql-server or connect using `dolt sql` instead."
)
+1 -1
View File
@@ -252,7 +252,7 @@ func extractJsonResponse(content string) map[string]interface{} {
}
func sqlQuery(ctx context.Context, query string) (string, bool, error) {
cli.Println(fmt.Sprintf("Runnning query \"%s\"...", query))
cli.Println(fmt.Sprintf("Running query \"%s\"...", query))
output, _, err := doltExec(ctx, fmt.Sprintf("dolt sql -q \"%s\"", query), false)
if err != nil {
+1 -1
View File
@@ -479,7 +479,7 @@ func generateForceDeleteMessage(args []string) string {
return newArgs
}
// callStoredProcedure generates and exectures the SQL query for calling the DOLT_BRANCH stored procedure.
// callStoredProcedure generates and executes the SQL query for calling the DOLT_BRANCH stored procedure.
// All actions that modify branches delegate to this after they validate their arguments.
// Actions that don't modify branches, such as `dolt branch --list` and `dolt branch --show-current`, don't call
// this method.
+1 -1
View File
@@ -231,7 +231,7 @@ func validateAndParseDolthubUrl(urlStr string) (string, bool) {
}
if u.Scheme == dbfactory.HTTPSScheme && u.Host == "www.dolthub.com" {
// Get the actual repo name and covert the remote
// Get the actual repo name and convert the remote
split := strings.Split(u.Path, "/")
if len(split) > 2 {
+1 -1
View File
@@ -310,7 +310,7 @@ func (s sqlDiffWriter) RowWriter(fromTableInfo, toTableInfo *diff.TableInfo, tds
targetSch = fromTableInfo.Sch
}
// TOOD: schema names
// TODO: schema names
return sqlexport.NewSqlDiffWriter(tds.ToTableName.Name, targetSch, iohelp.NopWrCloser(cli.CliOut)), nil
}
+2 -2
View File
@@ -800,7 +800,7 @@ func persistServerLocalCreds(port int, dEnv *env.DoltEnv) (*LocalCreds, error) {
// remotesapiAuth facilitates the implementation remotesrv.AccessControl for the remotesapi server.
type remotesapiAuth struct {
// ctxFactory is a function that returns a new sql.Context. This will create a new conext every time it is called,
// ctxFactory is a function that returns a new sql.Context. This will create a new context every time it is called,
// so it should be called once per API request.
ctxFactory func(context.Context) (*sql.Context, error)
rawDb *mysql_db.MySQLDb
@@ -827,7 +827,7 @@ func (r *remotesapiAuth) ApiAuthenticate(ctx context.Context) (context.Context,
if strings.Index(address, ":") > 0 {
address, _, err = net.SplitHostPort(creds.Address)
if err != nil {
return nil, fmt.Errorf("Invlaid Host string for authentication: %s", creds.Address)
return nil, fmt.Errorf("Invalid Host string for authentication: %s", creds.Address)
}
}
+1 -1
View File
@@ -37,7 +37,7 @@ var ErrNoConflictsResolved = errors.New("no conflicts resolved")
const dolt_row_hash_tag = 0
// IsValidTableName checks if name is a valid identifer, and doesn't end with space characters
// IsValidTableName checks if name is a valid identifier, and doesn't end with space characters
func IsValidTableName(name string) bool {
if len(name) == 0 || unicode.IsSpace(rune(name[len(name)-1])) {
return false
+1 -1
View File
@@ -101,7 +101,7 @@ func RowsAndSchema() ([]row.Row, schema.Schema, error) {
return rows, sch, err
}
// MustTuple contructs a types.Tuple for a slice of types.Values.
// MustTuple constructs a types.Tuple for a slice of types.Values.
func MustTuple(vals ...types.Value) types.Tuple {
tup, err := types.NewTuple(types.Format_Default, vals...)
if err != nil {
+1 -1
View File
@@ -124,7 +124,7 @@ func GetMultiEnvStorageMetadata(dataDirFS filesys.Filesys) (StorageMetadataMap,
return sms, nil
}
// NewMultiEnv returns a new MultiRepoEnv instance dirived from a root DoltEnv instance.
// NewMultiEnv returns a new MultiRepoEnv instance derived from a root DoltEnv instance.
func MultiEnvForSingleEnv(ctx context.Context, env *DoltEnv) (*MultiRepoEnv, error) {
return MultiEnvForDirectory(ctx, env.Config.WriteableConfig(), env.FS, env.Version, env)
}
@@ -1209,7 +1209,7 @@ func resolveDefaults(ctx *sql.Context, tableName string, mergedSchema schema.Sch
return nil, err
}
// The default expresions always come in the order of the merged schema, but the fields we need to apply them to
// The default expressions always come in the order of the merged schema, but the fields we need to apply them to
// might have different column indexes in the case of a schema change
if len(exprs) > 0 {
for i := range exprs {
@@ -75,7 +75,7 @@ func GetMutableSecondaryIdxsWithPending(ctx *sql.Context, ourSch, sch schema.Sch
// If the schema has changed, don't reuse the index.
// TODO: This isn't technically required, but correctly handling updating secondary indexes when only some
// of the table's rows have been updated is difficult to get right.
// Dropping the index is potentially slower but guarenteed to be correct.
// Dropping the index is potentially slower but guaranteed to be correct.
if !m.KeyDesc().Equals(index.Schema().GetKeyDescriptorWithNoConversion()) {
continue
}
@@ -184,7 +184,7 @@ func filterBranchTests() []filterBranchTest {
asserts: []testAssertion{
{
setup: []testCommand{
// expeced error: "table not found: test"
// expected error: "table not found: test"
{cmd.FilterBranchCmd{}, args{"--continue", "-q", "DELETE FROM test WHERE pk > 1;"}},
},
},
@@ -34,7 +34,7 @@ import (
// A remotestorage.ChunkFetcher is a pipelined chunk fetcher for fetching a
// large number of chunks where the downloads may benefit from range
// coallescing, hedging, automatic retries, pipelining of download location
// coalescing, hedging, automatic retries, pipelining of download location
// retrieval with the fetching of the actual chunk bytes, etc.
//
// It is expected that one goroutine will be calling `Get()` with batches of
@@ -392,7 +392,7 @@ func fetcherDownloadRangesThread(ctx context.Context, locCh chan []*remotesapi.D
// |toSend| could have come from a previous iteration
// of this loop or the outer loop. If it's |nil|, we
// can get the next range to download from
// |downlaods.ranges|.
// |downloads.ranges|.
if toSend == nil {
max := downloads.ranges.DeleteMaxRegion()
if len(max) == 0 {
@@ -486,9 +486,9 @@ type SizeSetter interface {
// This does additive increase, multiplicative decrease on calls to |SetSize|,
// reading successes and failures from calls to |RecordSuccess| and
// |RecordFailure|. If there have been any faliures in the last update window,
// |RecordFailure|. If there have been any failures in the last update window,
// it will call |SetSize| with a new size that's 1/2 the current size. If there
// have been no faliures in the last update window, but there has been at least
// have been no failures in the last update window, but there has been at least
// one success, it will call |SetSize| with a size 1 greater than the current
// size. Will not scale size greater than |MaxConcurrency|.
func (cc *ConcurrencyControl) Run(ctx context.Context, done <-chan struct{}, ss SizeSetter, sz int) error {
@@ -38,8 +38,8 @@ type GetRange struct {
// A |Region| represents a continuous range of bytes within in a Url.
// |ranges.Tree| maintains |Region| instances that cover every |GetRange|
// within the tree. As entries are inserted into the Tree, their Regions can
// coallesce with Regions which come before or after them in the same Url,
// based on the |coallesceLimit|.
// coalesce with Regions which come before or after them in the same Url,
// based on the |coalesceLimit|.
//
// |Region|s are maintained in a |RegionHeap| so that the |Tree| can quickly
// return a large download to get started on when a download worker is
@@ -103,11 +103,11 @@ func (rh *RegionHeap) Pop() any {
}
// A ranges.Tree is a tree data structure designed to support efficient
// coallescing of non-overlapping ranges inserted into it.
// coalescing of non-overlapping ranges inserted into it.
type Tree struct {
t *btree.BTreeG[*GetRange]
regions *RegionHeap
coallesceLimit int
t *btree.BTreeG[*GetRange]
regions *RegionHeap
coalesceLimit int
}
func GetRangeLess(a, b *GetRange) bool {
@@ -118,11 +118,11 @@ func GetRangeLess(a, b *GetRange) bool {
}
}
func NewTree(coallesceLimit int) *Tree {
func NewTree(coalesceLimit int) *Tree {
return &Tree{
t: btree.NewG[*GetRange](64, GetRangeLess),
regions: &RegionHeap{},
coallesceLimit: coallesceLimit,
t: btree.NewG[*GetRange](64, GetRangeLess),
regions: &RegionHeap{},
coalesceLimit: coalesceLimit,
}
}
@@ -154,12 +154,12 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
}
t.t.ReplaceOrInsert(ins)
// Check for coallesce with the range of the entry before the new one...
// Check for coalesce with the range of the entry before the new one...
t.t.DescendLessOrEqual(ins, func(gr *GetRange) bool {
if gr == ins {
return true
}
// If we coallesce...
// If we coalesce...
if ins.Url == gr.Url {
regionEnd := gr.Region.EndOffset
if regionEnd > ins.Offset {
@@ -167,8 +167,8 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
ins.Region = gr.Region
ins.Region.MatchedBytes += uint64(ins.Length)
heap.Fix(t.regions, ins.Region.HeapIndex)
} else if (ins.Offset - regionEnd) < uint64(t.coallesceLimit) {
// Inserted entry is within the limit to coallesce with the prior one.
} else if (ins.Offset - regionEnd) < uint64(t.coalesceLimit) {
// Inserted entry is within the limit to coalesce with the prior one.
ins.Region = gr.Region
ins.Region.MatchedBytes += uint64(ins.Length)
ins.Region.EndOffset = ins.Offset + uint64(ins.Length)
@@ -183,10 +183,10 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
if gr == ins {
return true
}
// If we coallesce...
// If we coalesce...
if ins.Url == gr.Url && gr.Region != ins.Region {
regionStart := gr.Region.StartOffset
if regionStart < (ins.Offset + uint64(ins.Length) + uint64(t.coallesceLimit)) {
if regionStart < (ins.Offset + uint64(ins.Length) + uint64(t.coalesceLimit)) {
if ins.Region == nil {
ins.Region = gr.Region
ins.Region.MatchedBytes += uint64(ins.Length)
@@ -216,7 +216,7 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
return false
})
// We didn't coallesce with any existing Regions. Insert a new Region
// We didn't coalesce with any existing Regions. Insert a new Region
// covering just this GetRange.
if ins.Region == nil {
ins.Region = &Region{
@@ -233,7 +233,7 @@ func (t *Tree) Insert(url string, hash []byte, offset uint64, length uint32) {
// Returns all the |*GetRange| entries in the tree that are encompassed by the
// current top entry in our |RegionHeap|. For |HeapStrategy_largest|, this will
// be the largest possible download we can currently start, given our
// |coallesceLimit|.
// |coalesceLimit|.
func (t *Tree) DeleteMaxRegion() []*GetRange {
if t.regions.Len() == 0 {
return nil
@@ -36,7 +36,7 @@ import (
//
// Close should always be called on an reliable.Chan to ensure resource cleanup.
type Chan[T any] struct {
// All unack'd |T|s are stored in |buff|. As they get Ackd, they get poped from here.
// All unack'd |T|s are stored in |buff|. As they get Ackd, they get popped from here.
buff *circular.Buff[T]
// We return new |T|s from here and they go into |buff| to be delivered
@@ -47,7 +47,7 @@ func newBinlogStreamer() *binlogStreamer {
}
// startStream listens for new binlog events sent to this streamer over its binlog event
// channel and sends them over |conn|. It also listens for ticker ticks to send hearbeats
// channel and sends them over |conn|. It also listens for ticker ticks to send heartbeats
// over |conn|. The specified |binlogFormat| is used to define the format of binlog events
// and |binlogEventMeta| records the position of the stream. This method blocks until an error
// is received over the stream (e.g. the connection closing) or the streamer is closed,
@@ -142,7 +142,7 @@ func (r *branchControlReplica) Run() {
r.progressNotifier.RecordSuccess(attempt)
r.fastFailReplicationWait = false
r.backoff.Reset()
r.lgr.Debugf("branchControlReplica[%s]: sucessfully replicated branch control permissions.", r.client.remote)
r.lgr.Debugf("branchControlReplica[%s]: successfully replicated branch control permissions.", r.client.remote)
r.replicatedVersion = version
}
}
@@ -160,7 +160,7 @@ func (r *mysqlDbReplica) Run() {
r.progressNotifier.RecordSuccess(attempt)
r.fastFailReplicationWait = false
r.backoff.Reset()
r.lgr.Debugf("mysqlDbReplica[%s]: sucessfully replicated users and grants at version %d.", r.client.remote, version)
r.lgr.Debugf("mysqlDbReplica[%s]: successfully replicated users and grants at version %d.", r.client.remote, version)
r.replicatedVersion = version
} else {
r.lgr.Debugf("mysqlDbReplica[%s]: not replicating empty users and grants at version %d.", r.client.remote, r.version)
@@ -240,7 +240,7 @@ func startRebase(ctx *sql.Context, upstreamPoint string, commitBecomesEmptyHandl
}
// rebaseWorkingBranch is the name of the temporary branch used when performing a rebase. In Git, a rebase
// happens with a detatched HEAD, but Dolt doesn't support that, we use a temporary branch.
// happens with a detached HEAD, but Dolt doesn't support that, we use a temporary branch.
rebaseWorkingBranch := "dolt_rebase_" + rebaseBranch
var rsc doltdb.ReplicationStatusController
err = actions.CreateBranchWithStartPt(ctx, dbData, rebaseWorkingBranch, upstreamPoint, false, &rsc)
@@ -7035,7 +7035,7 @@ END`,
},
},
{
Name: "Database syntax propogates to inner calls",
Name: "Database syntax propagates to inner calls",
SetUpScript: []string{
"CALL DOLT_CHECKOUT('main');",
`CREATE PROCEDURE p4()
@@ -3817,7 +3817,7 @@ var SchemaConflictScripts = []queries.ScriptTest{
SetUpScript: []string{
"set @@autocommit=1;",
"create table t (pk int primary key, c0 varchar(20))",
"call dolt_commit('-Am', 'added tabele t')",
"call dolt_commit('-Am', 'added table t')",
"call dolt_checkout('-b', 'other')",
"alter table t modify column c0 int",
"call dolt_commit('-am', 'altered t on branch other')",
@@ -3845,7 +3845,7 @@ var SchemaConflictScripts = []queries.ScriptTest{
SetUpScript: []string{
"set @@autocommit=0;",
"create table t (pk int primary key, c0 varchar(20))",
"call dolt_commit('-Am', 'added tabele t')",
"call dolt_commit('-Am', 'added table t')",
"call dolt_checkout('-b', 'other')",
"alter table t modify column c0 int",
"call dolt_commit('-am', 'altered t on branch other')",
@@ -4012,7 +4012,7 @@ var OldFormatMergeConflictsAndCVsScripts = []queries.ScriptTest{
},
{
Query: "CALL DOLT_MERGE('branch3');",
Expected: []sql.Row{{"", 0, 1, "conficts found"}},
Expected: []sql.Row{{"", 0, 1, "conflicts found"}},
},
{
Query: "SELECT violation_type, pk, parent_fk from dolt_constraint_violations_child;",
@@ -39,7 +39,7 @@ type AutoIncrementTracker interface {
// given, so the new global maximum is computed without regard for its value in that working set.
Set(ctx *sql.Context, tableName string, table *doltdb.Table, ws ref.WorkingSetRef, newAutoIncVal uint64) (*doltdb.Table, error)
// AcquireTableLock acquires the auto increment lock on a table, and reutrns a callback function to release the lock.
// AcquireTableLock acquires the auto increment lock on a table, and returns a callback function to release the lock.
// Depending on the value of the `innodb_autoinc_lock_mode` system variable, the engine may need to acquire and hold
// the lock for the duration of an insert statement.
AcquireTableLock(ctx *sql.Context, tableName string) (func(), error)
@@ -56,7 +56,7 @@ func TestJSONValueMarshallingRoundTrip(t *testing.T) {
doc: gmstypes.MustJSON(`2.71`),
},
{
name: "type homogenous object",
name: "type homogeneous object",
doc: gmstypes.MustJSON(`{"a": 2, "b": 3, "c": 4}`),
},
{
@@ -64,7 +64,7 @@ func TestJSONValueMarshallingRoundTrip(t *testing.T) {
doc: gmstypes.MustJSON(`{"a": 2, "b": "two", "c": false}`),
},
{
name: "homogenous array",
name: "homogeneous array",
doc: gmstypes.MustJSON(`[1, 2, 3]`),
},
{
@@ -196,7 +196,7 @@ func (p *Provider) checkRefresh(ctx *sql.Context, sqlDb sql.Database, dbName, br
ctx.GetLogger().Debugf("statistics updating: %s", updateMeta.qual)
// mark index for updating
idxMetas = append(idxMetas, updateMeta)
// update lastest hash if we haven't already
// update latest hash if we haven't already
statDb.SetLatestHash(branch, table, tableHash)
}
}
@@ -50,7 +50,7 @@ func mustRow(r row.Row, err error) row.Row {
}
// These are in noms-key-sorted order, since InMemoryTable.AppendRow sorts its rows. This should probably be done
// programatically instead of hard-coded.
// programmatically instead of hard-coded.
var rows = []row.Row{
mustRow(row.New(types.Format_Default, rowSch, row.TaggedValues{
nameTag: types.String("Bill Billerson"),
+1 -1
View File
@@ -40,7 +40,7 @@ func NewCounter(metricID eventsapi.MetricID) *Counter {
return &Counter{0, metricID}
}
// Inc incements a counter. This method happens atomically.
// Inc increments a counter. This method happens atomically.
func (c *Counter) Inc() {
c.Add(1)
}
+1 -1
View File
@@ -308,7 +308,7 @@ func TestDropValue(t *testing.T) {
}
newApr2 := apr.DropValue("flag")
require.NotEqualf(t, apr, newApr2, "DropValue failes to drop flag")
require.NotEqualf(t, apr, newApr2, "DropValue fails to drop flag")
_, hasVal = newApr2.GetValue("string")
if !hasVal {
@@ -100,7 +100,7 @@ func TestConcurrentMapIter(t *testing.T) {
t.Errorf("Iter failed, expected to iterate 3 times, iterated %d times", counter)
}
// Test that iteration yeilds all elements
// Test that iteration yields all elements
if len(elements) != 3 {
t.Errorf("Iter failed, there should be 3 elements in the map, got %d", len(elements))
}
@@ -109,7 +109,7 @@ func (ch *ConfigHierarchy) SetStrings(updates map[string]string) error {
ns, paramName := splitParamName(k)
if ns == "" {
// panicing in cases where developers have used this function incorrectly
// panicking in cases where developers have used this function incorrectly
panic("Calls to SetStrings for a ConfigHierarchy must include the config name. " + k + " is not in the format config_name::param_name")
}
@@ -182,7 +182,7 @@ func (ch *ConfigHierarchy) Unset(params []string) error {
ns, paramName := splitParamName(param)
if ns == "" {
// panicing in cases where developers have used this function incorrectly
// panicking in cases where developers have used this function incorrectly
panic("Calls to Unset for a ConfigHierarchy must include the config name. " + param + " is not in the format config_name::param_name")
}
+2 -2
View File
@@ -521,7 +521,7 @@ func (fs *InMemFS) MoveDir(srcPath, destPath string) error {
func (fs *InMemFS) moveDirHelper(dir *memDir, destPath string) error {
// All calls to moveDirHelper MUST happen with the filesystem's read-write mutex locked
if err := lockutil.AssertRWMutexIsLocked(fs.rwLock); err != nil {
return fmt.Errorf("moveDirHelper called without first aquiring filesystem read-write lock")
return fmt.Errorf("moveDirHelper called without first acquiring filesystem read-write lock")
}
if _, exists := fs.objs[destPath]; exists {
@@ -597,7 +597,7 @@ func (fs *InMemFS) MoveFile(srcPath, destPath string) error {
func (fs *InMemFS) moveFileHelper(obj *memFile, destPath string) error {
// All calls to moveFileHelper MUST happen with the filesystem's read-write mutex locked
if err := lockutil.AssertRWMutexIsLocked(fs.rwLock); err != nil {
return fmt.Errorf("moveFileHelper called without first aquiring filesystem read-write lock")
return fmt.Errorf("moveFileHelper called without first acquiring filesystem read-write lock")
}
destDir := filepath.Dir(destPath)
+1 -1
View File
@@ -27,7 +27,7 @@ import (
func YamlForVersion(st any, versionNum uint32) ([]byte, error) {
err := NullUnsupported(versionNum, st)
if err != nil {
return nil, fmt.Errorf("error nulling unspported fields for version %d: %w", versionNum, err)
return nil, fmt.Errorf("error nulling unsupported fields for version %d: %w", versionNum, err)
}
return yaml.Marshal(st)
+1 -1
View File
@@ -116,7 +116,7 @@ func (ss *ServiceState) CompareAndSwap(old, new ServiceState) (swapped bool) {
//
// |WaitForStart| can be called at any time on a Controller. It will block
// until |Start| is called. After |Start| is called, if all the services
// succesfully initialize, it will return |nil|. Otherwise it will return the
// successfully initialize, it will return |nil|. Otherwise it will return the
// same error |Start| returned.
//
// |WaitForStop| can be called at any time on a Controller. It will block until
+2 -2
View File
@@ -475,7 +475,7 @@ func (test *Script) RunExternalServerTests(repoName string, s *driver.ExternalSe
conf.Port = strconv.Itoa(s.Port)
conf.Password = s.Password
return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error {
log.Printf("starting scipt: %s", script)
log.Printf("starting script: %s", script)
db, err := driver.ConnectDB(s.User, s.Password, s.Name, s.Host, s.Port, nil)
if err != nil {
@@ -515,7 +515,7 @@ func (test *Script) RunExternalServerTests(repoName string, s *driver.ExternalSe
// RunSqlServerTests creates a new repo and server for every import test.
func (test *Script) RunSqlServerTests(repo driver.TestRepo, user driver.DoltUser, conf Config) error {
return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error {
log.Printf("starting scipt: %s", script)
log.Printf("starting script: %s", script)
//make a new server for every test
server, err := newServer(user, repo, conf)
if err != nil {
@@ -111,7 +111,7 @@ Configuration:
}
```
`Servers`: The server defintions to run the benchmark against. Accepts Dolt and MySQL configuratiosn.
`Servers`: The server definitions to run the benchmark against. Accepts Dolt and MySQL configuratiosn.
`ScriptDir`: The directory of the TPCC testing scripts
+2 -2
View File
@@ -87,14 +87,14 @@ func Run(ctx context.Context, config SysbenchConfig) error {
return err
}
fmt.Printf("Successfuly finished %s\n", st)
fmt.Printf("Successfully finished %s\n", st)
err = WriteResults(serverConfig, results)
if err != nil {
return err
}
fmt.Printf("Successfuly wrote results for %s\n", st)
fmt.Printf("Successfully wrote results for %s\n", st)
}
return nil
}
@@ -62,7 +62,7 @@ func RunTpcc(ctx context.Context, config TpccConfig) error {
return err
}
fmt.Printf("Successfuly wrote results for %s\n", st)
fmt.Printf("Successfully wrote results for %s\n", st)
}
return nil
+1 -1
View File
@@ -324,7 +324,7 @@ func TestConcurrentCheckAndPuts(t *testing.T) {
for _, bsTest := range newBlobStoreTests() {
t.Run(bsTest.bsType, func(t *testing.T) {
if bsTest.rmwIterations*bsTest.rmwConcurrency > 255 {
panic("Test epects less than 255 total updates or it won't work as is.")
panic("Test expects less than 255 total updates or it won't work as is.")
}
testConcurrentCheckAndPuts(t, bsTest, uuid.New().String())
})
+6 -6
View File
@@ -40,7 +40,7 @@ func (t *TestStorage) NewView() *TestStoreView {
type TestStoreView struct {
ChunkStore
reads int32
hases int32
hashes int32
writes int32
}
@@ -61,12 +61,12 @@ func (s *TestStoreView) CacheHas(_ hash.Hash) bool {
}
func (s *TestStoreView) Has(ctx context.Context, h hash.Hash) (bool, error) {
atomic.AddInt32(&s.hases, 1)
atomic.AddInt32(&s.hashes, 1)
return s.ChunkStore.Has(ctx, h)
}
func (s *TestStoreView) HasMany(ctx context.Context, hashes hash.HashSet) (hash.HashSet, error) {
atomic.AddInt32(&s.hases, int32(len(hashes)))
atomic.AddInt32(&s.hashes, int32(len(hashes)))
return s.ChunkStore.HasMany(ctx, hashes)
}
@@ -104,9 +104,9 @@ func (s *TestStoreView) Reads() int {
return int(reads)
}
func (s *TestStoreView) Hases() int {
hases := atomic.LoadInt32(&s.hases)
return int(hases)
func (s *TestStoreView) Hashes() int {
hashes := atomic.LoadInt32(&s.hashes)
return int(hashes)
}
func (s *TestStoreView) Writes() int {
+1 -1
View File
@@ -86,7 +86,7 @@ func (iter *CommitIterator) Next(ctx context.Context) (LogNode, bool) {
newCols = append(newCols, col+cnt)
}
// Now that the branchlist has been adusted, check to see if there are branches with common
// Now that the branchlist has been adjusted, check to see if there are branches with common
// ancestors that will be folded together on this commit's graph.
foldedCols := iter.branches.HighestBranchIndexes()
node := LogNode{
+1 -1
View File
@@ -141,7 +141,7 @@ func addDatabaseArg(cmd *kingpin.CmdClause) (arg *string) {
// addNomsDocs - adds documentation (docs only, not commands) for existing (pre-kingpin) commands.
func addNomsDocs(noms *kingpin.Application) {
// commmit
// commit
commit := noms.Command("commit", `Commits a specified value as head of the dataset
If absolute-path is not provided, then it is read from stdin. See Spelling Objects at https://github.com/attic-labs/noms/blob/master/doc/spelling.md for details on the dataset and absolute-path arguments.
`)
+2 -2
View File
@@ -289,7 +289,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash, w
return err
}
if !iscommit {
return fmt.Errorf("SetHead failed: reffered to value is not a commit:")
return fmt.Errorf("SetHead failed: referred to value is not a commit:")
}
case tagName:
istag, err := IsTag(ctx, newVal)
@@ -297,7 +297,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash, w
return err
}
if !istag {
return fmt.Errorf("SetHead failed: reffered to value is not a tag:")
return fmt.Errorf("SetHead failed: referred to value is not a tag:")
}
_, commitaddr, err := newHead.HeadTag()
if err != nil {
+1 -1
View File
@@ -43,7 +43,7 @@ type (
printFunc func(ctx context.Context, w io.Writer, op prefixOp, key, val types.Value) error
)
// PrintDiff writes a textual reprensentation of the diff from |v1| to |v2|
// PrintDiff writes a textual representation of the diff from |v1| to |v2|
// to |w|. If |leftRight| is true then the left-right diff is used for ordered
// sequences - see Diff vs DiffLeftRight in Set and Map.
func PrintDiff(ctx context.Context, w io.Writer, v1, v2 types.Value, leftRight bool) (err error) {
+2 -2
View File
@@ -38,7 +38,7 @@
//
// The textual serialization of hashes uses big-endian base32 with the alphabet {0-9,a-v}. This scheme was chosen because:
//
// - It's easy to convert to and from base32 without bignum arithemetic.
// - It's easy to convert to and from base32 without bignum arithmetic.
// - No special chars: you can double-click to select in GUIs.
// - Sorted hashes will be sorted textually, making it easy to scan for humans.
//
@@ -125,7 +125,7 @@ func IsValid(s string) bool {
func Parse(s string) Hash {
r, ok := MaybeParse(s)
if !ok {
d.PanicIfError(fmt.Errorf("cound not parse Hash: %s", s))
d.PanicIfError(fmt.Errorf("could not parse Hash: %s", s))
}
return r
}
+2 -2
View File
@@ -213,12 +213,12 @@ func float64Encoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWri
}
func intEncoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWriter) (types.Value, error) {
// TODO: encoding types.Int as types.Float is lossy, but will recquire a migration to change
// TODO: encoding types.Int as types.Float is lossy, but will require a migration to change
return types.Float(float64(v.Int())), nil
}
func uintEncoder(ctx context.Context, v reflect.Value, vrw types.ValueReadWriter) (types.Value, error) {
// TODO: encoding types.Int as types.Uint is lossy, but will recquire a migration to change
// TODO: encoding types.Int as types.Uint is lossy, but will require a migration to change
return types.Float(float64(v.Uint())), nil
}
+1 -1
View File
@@ -227,7 +227,7 @@ func (m *merger) threeWay(ctx context.Context, a, b, parent types.Value, path ty
defer updateProgress(m.progress)
if a == nil || b == nil {
d.Panic("Merge candidates cannont be nil: a = %v, b = %v", a, b)
d.Panic("Merge candidates cannot be nil: a = %v, b = %v", a, b)
}
switch a.Kind() {
+2 -2
View File
@@ -38,9 +38,9 @@ import (
//
// It logically stores a running histogram of uint64 values and shares some
// important features of its inspiration:
// * It acccepts a correctness deficit in return for not needing to lock.
// * It accepts a correctness deficit in return for not needing to lock.
// IOW, concurrent calls to Sample may clobber each other.
// * It trades compactness and ease of arithmatic across histograms for
// * It trades compactness and ease of arithmetic across histograms for
// precision. Samples lose precision up to the range of the values which
// are stored in a bucket
//
+1 -1
View File
@@ -22,7 +22,7 @@ When backed by AWS, NBS stores its data mainly in S3, along with a single Dynamo
## Perf
For the file back-end, perf is substantially better than LevelDB mainly because LDB spends substantial IO with the goal of keeping KV pairs in key-order which doesn't benenfit Noms at all. NBS locates related chunks together and thus reading data from a NBS store can be done quite alot faster. As an example, storing & retrieving a 1.1GB MP4 video file on a MBP i5 2.9Ghz:
For the file back-end, perf is substantially better than LevelDB mainly because LDB spends substantial IO with the goal of keeping KV pairs in key-order which doesn't benefit Noms at all. NBS locates related chunks together and thus reading data from an NBS store can be done quite a lot faster. As an example, storing & retrieving a 1.1GB MP4 video file on a MBP i5 2.9Ghz:
* LDB
* Initial import: 44 MB/s, size on disk: 1.1 GB.
+1 -1
View File
@@ -531,7 +531,7 @@ type chunkGroup struct {
type chunkCmpScore struct {
chunkId hash.Hash
// The compression score. Higher is better. This is the ratio of the compressed size to the raw size, using the group's
// dictionary. IE, this number only has meaning withing the group
// dictionary. IE, this number only has meaning within the group
score float64
// The size of the compressed chunk using the group's dictionary.
dictCmpSize int
+1 -1
View File
@@ -105,7 +105,7 @@ func (aw *archiveWriter) writeByteSpan(b []byte) (uint32, error) {
}
if len(b) == 0 {
return 0, fmt.Errorf("Rutime error: empty compressed byte span")
return 0, fmt.Errorf("Runtime error: empty compressed byte span")
}
offset := aw.bytesWritten
+1 -1
View File
@@ -97,7 +97,7 @@ func (dm dynamoManifest) ParseIfExists(ctx context.Context, stats *Stats, readHo
return false, manifestContents{}, fmt.Errorf("failed to get dynamo table: '%s' - %w", dm.table, err)
}
// !exists(dbAttr) => unitialized store
// !exists(dbAttr) => uninitialized store
if len(result.Item) > 0 {
valid, hasSpecs, hasAppendix := validateManifest(result.Item)
if !valid {
+1 -1
View File
@@ -105,7 +105,7 @@ func nomsFileTableReader(ctx context.Context, path string, h hash.Hash, chunkCou
}
if fi.Size() < 0 {
// Size returns the number of bytes for regular files and is system dependant for others (Some of which can be negative).
// Size returns the number of bytes for regular files and is system dependent for others (Some of which can be negative).
err = fmt.Errorf("%s has invalid size: %d", path, fi.Size())
return
}
+1 -1
View File
@@ -444,7 +444,7 @@ func (wr *journalWriter) writeCompressedChunk(ctx context.Context, cc Compressed
// We go through |commitRootHash|, instead of directly |Sync()|ing the
// file, because we also have accumulating delayed work in the form of
// journal index records which may need to be serialized and flushed.
// Assumptions in journal bootstraping and the contents of the journal
// Assumptions in journal bootstrapping and the contents of the journal
// index require us to have a newly written root hash record anytime we
// write index records out. It's perfectly fine to reuse the current
// root hash, and this will also take care of the |Sync|.
+1 -1
View File
@@ -119,7 +119,7 @@ type manifestContents struct {
gcGen hash.Hash
specs []tableSpec
// An appendix is a list of |tableSpecs| that track an auxillary collection of
// An appendix is a list of |tableSpecs| that track an auxiliary collection of
// table files used _only_ for query performance optimizations. These appendix |tableSpecs| can be safely
// managed with nbs.UpdateManifestWithAppendix, however generation and removal of the actual table files
// the appendix |tableSpecs| reference is done manually. All appendix |tableSpecs| will be prepended to the
+1 -1
View File
@@ -95,7 +95,7 @@ import (
-Total Uncompressed Chunk Data is the sum of the uncompressed byte lengths of all contained chunk byte slices.
-Magic Number is the first 8 bytes of the SHA256 hash of "https://github.com/attic-labs/nbs".
NOTE: Unsigned integer quanities, hashes and hash suffix are all encoded big-endian
NOTE: Unsigned integer quantities, hashes and hash suffix are all encoded big-endian
Looking up Chunks in an NBS Table
+1 -1
View File
@@ -91,7 +91,7 @@ func newTableWriter(buff []byte, snapper snappyEncoder) *tableWriter {
func (tw *tableWriter) addChunk(h hash.Hash, data []byte) bool {
if len(data) == 0 {
panic("NBS blocks cannont be zero length")
panic("NBS blocks cannot be zero length")
}
// Compress data straight into tw.buff
+1 -1
View File
@@ -170,7 +170,7 @@ func getMergeArtifactCount(msg serial.Message) (uint16, error) {
if ma.KeyItemsLength() == 0 {
return 0, nil
}
// zeroth offset ommitted from array
// zeroth offset omitted from array
return uint16(ma.KeyOffsetsLength() + 1), nil
}
+2 -2
View File
@@ -66,7 +66,7 @@ func writeItemOffsets(b *fb.Builder, items [][]byte, sumSz int) fb.UOffsetT {
func countAddresses(items [][]byte, td val.TupleDesc) (cnt int) {
for i := len(items) - 1; i >= 0; i-- {
val.IterAddressFields(td, func(j int, t val.Type) {
// get offset of address withing |tup|
// get offset of address within |tup|
addr := val.Tuple(items[i]).GetField(j)
if len(addr) > 0 && !hash.New(addr).IsEmpty() {
cnt++
@@ -89,7 +89,7 @@ func writeAddressOffsets(b *fb.Builder, items [][]byte, sumSz int, td val.TupleD
if len(addr) == 0 || hash.New(addr).IsEmpty() {
return
}
// get offset of address withing |tup|
// get offset of address within |tup|
o, _ := tup.GetOffset(j)
o += off // offset is tuple start plus field start
b.PrependUint16(uint16(o))
+1 -1
View File
@@ -24,7 +24,7 @@ import (
"github.com/dolthub/dolt/go/store/val"
)
// Single layer trees are entirly root nodes - which are imbedded in the table flatbuffer, so we don't
// Single layer trees are entirely root nodes - which are embedded in the table flatbuffer, so we don't
// currently use them for purposes of grouping chunks.
func TestAddressDifferFromRootsOneLayer(t *testing.T) {
fromTups, desc := AscendingUintTuples(42)
+1 -1
View File
@@ -236,7 +236,7 @@ const (
// split on any of the records up to |size - thisSize|,
// the probability that we should split on this record
// is (CDF(end) - CDF(start)) / (1 - CDF(start)), or,
// the precentage of the remaining portion of the CDF
// the percentage of the remaining portion of the CDF
// that this record actually covers. We split is |hash|,
// treated as a uniform random number between [0,1),
// is less than this percentage.
+2 -2
View File
@@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) {
benchmarkPut(b, randomInts(65536))
})
})
b.Run("asending keys", func(b *testing.B) {
b.Run("ascending keys", func(b *testing.B) {
b.Run("n=64", func(b *testing.B) {
benchmarkPut(b, ascendingInts(64))
})
@@ -104,7 +104,7 @@ func BenchmarkIterAll(b *testing.B) {
benchmarkIterAll(b, randomInts(65536))
})
})
b.Run("asending keys", func(b *testing.B) {
b.Run("ascending keys", func(b *testing.B) {
b.Run("n=64", func(b *testing.B) {
benchmarkIterAll(b, ascendingInts(64))
})
+2 -2
View File
@@ -57,14 +57,14 @@ var maxOffset = int(1<<maxOffsetPOT - 1)
//
// When in the "matched state" (attempting to extend the current match), Snappy
// does not re-index new 4-byte sequences, but Sloppy does. The reason for this
// is that Sloppy would like match the most recent occurence as it moves
// is that Sloppy would like match the most recent occurrence as it moves
// forward.
//
// Lastly, Sloppy adds two novel heuritics, both aimed at further mitigating
// the chance of chunk boundaries being redrawn because of byte value changes:
//
// 1) During the first 2 bytes of match, it *continues* to look for closer
// matches (effectively prefering a closer but shorter copy to a further but
// matches (effectively preferring a closer but shorter copy to a further but
// longer one). The reason for this is that when sequences repeat frequently in
// a byte stream, randomness provides for a good chance that a one or two byte
// prefix on a repeated sequence will match "far away". E.g.
+1 -1
View File
@@ -771,7 +771,7 @@ func TestListSet(t *testing.T) {
}
testIdx(len(testList)-1, true)
// Compare list unequality, which doesn't require building a new list every iteration, so the increment can be smaller.
// Compare list inequality, which doesn't require building a new list every iteration, so the increment can be smaller.
for incr, i := 10, 0; i < len(testList); i += incr {
testIdx(i, false)
}
+1 -1
View File
@@ -439,7 +439,7 @@ func (ms metaSequence) getChildSequence(ctx context.Context, idx int) (sequence,
}
// Returns the sequences pointed to by all items[i], s.t. start <= i < end, and returns the
// concatentation as one long composite sequence
// concatenation as one long composite sequence
func (ms metaSequence) getCompositeChildSequence(ctx context.Context, start uint64, length uint64) (sequence, error) {
ctx, span := tracer.Start(ctx, "metaSequence.getChildSequence", trace.WithAttributes(
attribute.Int64("level", int64(ms.treeLevel())),
+1 -1
View File
@@ -80,7 +80,7 @@ func (se *SetEditor) Set(ctx context.Context) (Set, error) {
}
if i+1 < len(se.edits.edits) && se.edits.edits[i+1].value.Equals(edit.value) {
continue // next edit supercedes this one
continue // next edit supersedes this one
}
edit := edit
+1 -1
View File
@@ -21,7 +21,7 @@ import (
func TestVerbose(t *testing.T) {
Logger(context.Background()).Sugar().Warn("This is a test")
Logger(context.Background()).Sugar().Debug("This is a test with verbse = false")
Logger(context.Background()).Sugar().Debug("This is a test with verbose = false")
SetVerbose(true)
Logger(context.Background()).Sugar().Debug("This is a test with verbose = true")
}
+1 -1
View File
@@ -48,7 +48,7 @@ const (
//
// Field offsets encode the byte-offset from the front of the Tuple to the beginning
// of the corresponding field in the Tuple. The offset for the first field is always
// zero and is therefor omitted. Offsets and the field count are little-endian
// zero and is therefore omitted. Offsets and the field count are little-endian
// encoded uint16 values.
//
// Tuples read and write field values as byte slices. Interpreting these encoded
+1 -1
View File
@@ -299,7 +299,7 @@ func worker(jobs <-chan string, results chan<- batsResult, ctx context.Context,
// in the batsee_results directory in the CWD, and the error is written to the result.err field.
func runBats(path string, resultChan chan<- batsResult, ctx context.Context, cfg config) {
cmd := exec.CommandContext(ctx, "bats", path)
// Set the process group ID so that we can kill the entire process tree if it runs too long. We need to differenciate
// Set the process group ID so that we can kill the entire process tree if it runs too long. We need to differentiate
// process group of the sub process from this one, because kill the primary process if we don't.
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
cmd.Env = append(os.Environ(), fmt.Sprintf("DOLT_TEST_RETRIES=%d", cfg.retries))
+1 -1
View File
@@ -41,7 +41,7 @@ var (
)
// Runs |sqlScript| concurrently on multiple clients.
// Useful for repoducing concurrency bugs.
// Useful for reproducing concurrency bugs.
func main() {
connStr := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s",
user, pass, host, port, database)