Merge pull request #9999 from dolthub/aaron/nbs-no-export-chunk-journal

[no-release-notes] go/store/nbs: Small cleanup to not expose ChunkJournal() from NomsBlockStore.
This commit is contained in:
Aaron Son
2025-11-18 08:52:32 -08:00
committed by GitHub
3 changed files with 34 additions and 13 deletions

View File

@@ -2010,8 +2010,15 @@ func (ddb *DoltDB) IsTableFileStore() bool {
return ok
}
// ChunkJournal returns the ChunkJournal for this DoltDB, if one is in use.
func (ddb *DoltDB) ChunkJournal() *nbs.ChunkJournal {
// Iterates an unspecified number of previous root hashes for this
// DoltDB, including the time at which they were written. Ends by
// visiting the current root hash.
//
// Only works in the case that the underlying store is a
// NomsBlockStore instance which exposes its roots through
// IterateRoots. Otherwise returns |nil| without visiting any roots,
// including the current one.
func (ddb *DoltDB) IterateRoots(cb func(root string, timestamp *time.Time) error) error {
cs := datas.ChunkStoreFromDatabase(ddb.db)
if generationalNBS, ok := cs.(*nbs.GenerationalNBS); ok {
@@ -2019,7 +2026,7 @@ func (ddb *DoltDB) ChunkJournal() *nbs.ChunkJournal {
}
if nbsStore, ok := cs.(*nbs.NomsBlockStore); ok {
return nbsStore.ChunkJournal()
return nbsStore.IterateRoots(cb)
} else {
return nil
}
@@ -2061,10 +2068,10 @@ func (ddb *DoltDB) StoreSizes(ctx context.Context) (StoreSizes, error) {
if err != nil {
return StoreSizes{}, err
}
journal := newGenNBS.ChunkJournal()
if journal != nil {
journalSz, ok := newGenNBS.ChunkJournalSize()
if ok {
return StoreSizes{
JournalBytes: uint64(journal.Size()),
JournalBytes: uint64(journalSz),
NewGenBytes: newgenSz,
TotalBytes: totalSz,
}, nil

View File

@@ -92,14 +92,10 @@ func (rltf *ReflogTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.Row
}
ddb := sqlDb.DbData().Ddb
journal := ddb.ChunkJournal()
if journal == nil {
return sql.RowsToRowIter(), nil
}
previousCommitsByRef := make(map[string]string)
rows := make([]sql.Row, 0)
err := journal.IterateRoots(func(root string, timestamp *time.Time) error {
err := ddb.IterateRoots(func(root string, timestamp *time.Time) error {
hashof := hash.Parse(root)
datasets, err := ddb.DatasetsByRootHash(ctx, hashof)
if err != nil {

View File

@@ -155,14 +155,32 @@ type Range struct {
DictLength uint32
}
// IterateRoots iterates over the in-memory roots tracked by the ChunkJournal, if there is one.
func (nbs *NomsBlockStore) IterateRoots(f func(root string, timestamp *time.Time) error) error {
cj := nbs.chunkJournal()
if cj == nil {
return nil
}
return cj.IterateRoots(f)
}
// ChunkJournal returns the ChunkJournal in use by this NomsBlockStore, or nil if no ChunkJournal is being used.
func (nbs *NomsBlockStore) ChunkJournal() *ChunkJournal {
func (nbs *NomsBlockStore) chunkJournal() *ChunkJournal {
if cj, ok := nbs.persister.(*ChunkJournal); ok {
return cj
}
return nil
}
func (nbs *NomsBlockStore) ChunkJournalSize() (int64, bool) {
nbs.mu.Lock()
defer nbs.mu.Unlock()
if cj := nbs.chunkJournal(); cj != nil {
return cj.Size(), true
}
return 0, false
}
func (nbs *NomsBlockStore) GetChunkLocationsWithPaths(ctx context.Context, hashes hash.HashSet) (map[string]map[hash.Hash]Range, error) {
valctx.ValidateContext(ctx)
sourcesToRanges, err := nbs.getChunkLocations(ctx, hashes)
@@ -1974,7 +1992,7 @@ func (nbs *NomsBlockStore) hasLocalGCNovelty() bool {
if len(nbs.tables.novel) != 0 {
return true
}
if cj := nbs.ChunkJournal(); cj != nil && cj.wr != nil {
if cj := nbs.chunkJournal(); cj != nil && cj.wr != nil {
return true
}
return false