mirror of
https://github.com/dolthub/dolt.git
synced 2026-02-20 18:19:15 -06:00
Merge pull request #5593 from dolthub/andy/fix-journal-lock-hash-on-bootstrap
go/store/nbs: compute fresh lock hash during manifest true-up on chun…
This commit is contained in:
@@ -75,6 +75,11 @@ func (cmd SendMetricsCmd) ArgParser() *argparser.ArgParser {
|
||||
// Exec is the implementation of the command that flushes the events to the grpc service
|
||||
// Exec executes the command
|
||||
func (cmd SendMetricsCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
|
||||
if dEnv.DoltDB != nil { // see go/cmd/dolt/dolt.go:interceptSendMetrics()
|
||||
cli.PrintErrln("expected DoltEnv without DoltDB")
|
||||
return 1
|
||||
}
|
||||
|
||||
ap := cmd.ArgParser()
|
||||
|
||||
help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, cli.CommandDocumentationContent{ShortDesc: sendMetricsShortDesc}, ap))
|
||||
|
||||
@@ -308,6 +308,10 @@ func runMain() int {
|
||||
warnIfMaxFilesTooLow()
|
||||
|
||||
ctx := context.Background()
|
||||
if ok, exit := interceptSendMetrics(ctx, args); ok {
|
||||
return exit
|
||||
}
|
||||
|
||||
dEnv := env.Load(ctx, env.GetCurrentUserHomeDir, filesys.LocalFS, doltdb.LocalDirDoltDB, Version)
|
||||
dEnv.IgnoreLockFile = ignoreLockFile
|
||||
|
||||
@@ -447,3 +451,11 @@ func processEventsDir(args []string, dEnv *env.DoltEnv) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func interceptSendMetrics(ctx context.Context, args []string) (bool, int) {
|
||||
if len(args) < 1 || args[0] != commands.SendMetricsCommand {
|
||||
return false, 0
|
||||
}
|
||||
dEnv := env.LoadWithoutDB(ctx, env.GetCurrentUserHomeDir, filesys.LocalFS, Version)
|
||||
return true, doltCommand.Exec(ctx, "dolt", args, dEnv)
|
||||
}
|
||||
|
||||
56
go/libraries/doltcore/env/environment.go
vendored
56
go/libraries/doltcore/env/environment.go
vendored
@@ -106,40 +106,46 @@ func (dEnv *DoltEnv) GetRemoteDB(ctx context.Context, format *types.NomsBinForma
|
||||
}
|
||||
}
|
||||
|
||||
// Load loads the DoltEnv for the .dolt directory determined by resolving the specified urlStr with the specified Filesys.
|
||||
func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr string, version string) *DoltEnv {
|
||||
func LoadWithoutDB(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, version string) *DoltEnv {
|
||||
cfg, cfgErr := LoadDoltCliConfig(hdp, fs)
|
||||
repoState, rsErr := LoadRepoState(fs)
|
||||
|
||||
ddb, dbLoadErr := doltdb.LoadDoltDB(ctx, types.Format_Default, urlStr, fs)
|
||||
|
||||
dEnv := &DoltEnv{
|
||||
Version: version,
|
||||
Config: cfg,
|
||||
CfgLoadErr: cfgErr,
|
||||
RepoState: repoState,
|
||||
RSLoadErr: rsErr,
|
||||
DoltDB: ddb,
|
||||
DBLoadError: dbLoadErr,
|
||||
FS: fs,
|
||||
urlStr: urlStr,
|
||||
hdp: hdp,
|
||||
}
|
||||
|
||||
if dEnv.RepoState != nil {
|
||||
remotes := make(map[string]Remote, len(dEnv.RepoState.Remotes))
|
||||
for n, r := range dEnv.RepoState.Remotes {
|
||||
// deep copy remotes and backups ¯\_(ツ)_/¯ (see commit c59cbead)
|
||||
if repoState != nil {
|
||||
remotes := make(map[string]Remote, len(repoState.Remotes))
|
||||
for n, r := range repoState.Remotes {
|
||||
remotes[n] = r
|
||||
}
|
||||
dEnv.RepoState.Remotes = remotes
|
||||
repoState.Remotes = remotes
|
||||
|
||||
backups := make(map[string]Remote, len(dEnv.RepoState.Backups))
|
||||
for n, r := range dEnv.RepoState.Backups {
|
||||
backups := make(map[string]Remote, len(repoState.Backups))
|
||||
for n, r := range repoState.Backups {
|
||||
backups[n] = r
|
||||
}
|
||||
dEnv.RepoState.Backups = backups
|
||||
repoState.Backups = backups
|
||||
}
|
||||
|
||||
return &DoltEnv{
|
||||
Version: version,
|
||||
Config: cfg,
|
||||
CfgLoadErr: cfgErr,
|
||||
RepoState: repoState,
|
||||
RSLoadErr: rsErr,
|
||||
FS: fs,
|
||||
hdp: hdp,
|
||||
}
|
||||
}
|
||||
|
||||
// Load loads the DoltEnv for the .dolt directory determined by resolving the specified urlStr with the specified Filesys.
|
||||
func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr string, version string) *DoltEnv {
|
||||
dEnv := LoadWithoutDB(ctx, hdp, fs, version)
|
||||
|
||||
ddb, dbLoadErr := doltdb.LoadDoltDB(ctx, types.Format_Default, urlStr, fs)
|
||||
|
||||
dEnv.DoltDB = ddb
|
||||
dEnv.DBLoadError = dbLoadErr
|
||||
dEnv.urlStr = urlStr
|
||||
|
||||
if dbLoadErr == nil && dEnv.HasDoltDir() {
|
||||
if !dEnv.HasDoltTempTableDir() {
|
||||
tmpDir, err := dEnv.TempTableFilesDir()
|
||||
@@ -172,7 +178,7 @@ func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr s
|
||||
}
|
||||
}
|
||||
|
||||
if rsErr == nil && dbLoadErr == nil {
|
||||
if dEnv.RSLoadErr == nil && dbLoadErr == nil {
|
||||
// If the working set isn't present in the DB, create it from the repo state. This step can be removed post 1.0.
|
||||
_, err := dEnv.WorkingSet(ctx)
|
||||
if errors.Is(err, doltdb.ErrWorkingSetNotFound) {
|
||||
|
||||
@@ -129,26 +129,39 @@ func (j *chunkJournal) bootstrapJournalWriter(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
var contents manifestContents
|
||||
ok, contents, err = j.backing.ParseIfExists(ctx, &Stats{}, nil)
|
||||
mc, err := trueUpBackingManifest(ctx, root, j.backing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
// the journal file is the source of truth for the root hash, true-up persisted manifest
|
||||
contents.root = root
|
||||
contents, err = j.backing.Update(ctx, contents.lock, contents, &Stats{}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("manifest not found when opening chunk journal")
|
||||
}
|
||||
j.contents = contents
|
||||
j.contents = mc
|
||||
return
|
||||
}
|
||||
|
||||
// the journal file is the source of truth for the root hash, true-up persisted manifest
|
||||
func trueUpBackingManifest(ctx context.Context, root hash.Hash, backing manifest) (manifestContents, error) {
|
||||
ok, mc, err := backing.ParseIfExists(ctx, &Stats{}, nil)
|
||||
if err != nil {
|
||||
return manifestContents{}, err
|
||||
} else if !ok {
|
||||
return manifestContents{}, fmt.Errorf("manifest not found when opening chunk journal")
|
||||
}
|
||||
|
||||
prev := mc.lock
|
||||
next := generateLockHash(root, mc.specs, mc.appendix)
|
||||
mc.lock = next
|
||||
mc.root = root
|
||||
|
||||
mc, err = backing.Update(ctx, prev, mc, &Stats{}, nil)
|
||||
if err != nil {
|
||||
return manifestContents{}, err
|
||||
} else if mc.lock != next {
|
||||
return manifestContents{}, errOptimisticLockFailedTables
|
||||
} else if mc.root != root {
|
||||
return manifestContents{}, errOptimisticLockFailedRoot
|
||||
}
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
// Persist implements tablePersister.
|
||||
func (j *chunkJournal) Persist(ctx context.Context, mt *memTable, haver chunkReader, stats *Stats) (chunkSource, error) {
|
||||
if err := j.maybeInit(ctx); err != nil {
|
||||
|
||||
Reference in New Issue
Block a user