go/store/nbs: Add info level logging for a store conjoin.

This commit is contained in:
Aaron Son
2025-04-01 13:42:47 -07:00
parent 11703eaa67
commit 027cf87b4f
4 changed files with 42 additions and 5 deletions

View File

@@ -23,6 +23,8 @@ import (
"path/filepath"
"sync"
"github.com/sirupsen/logrus"
"github.com/dolthub/dolt/go/libraries/doltcore/dconfig"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/datas"
@@ -51,6 +53,8 @@ const (
StatsDir = "stats"
ChunkJournalParam = "journal"
DatabaseNameParam = "database_name"
)
// DoltDataDir is the directory where noms files will be stored
@@ -179,6 +183,14 @@ func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat,
st := nbs.NewGenerationalCS(oldGenSt, newGenSt, ghostGen)
// metrics?
if params != nil {
if nameV, ok := params[DatabaseNameParam]; ok && nameV != nil {
if name, ok := nameV.(string); ok && name != "" {
st.AppendLoggerFields(logrus.Fields{"database": name})
}
}
}
vrw := types.NewValueStore(st)
ns := tree.NewNodeStore(st)
ddb := datas.NewTypesDatabase(vrw, ns)

View File

@@ -120,6 +120,8 @@ func LoadDoltDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs
}
func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs filesys.Filesys, params map[string]interface{}) (*DoltDB, error) {
params = make(map[string]any)
if urlStr == LocalDirDoltDB {
exists, isDir := fs.Exists(dbfactory.DoltDataDir)
if !exists {
@@ -135,9 +137,6 @@ func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr
urlStr = earl.FileUrlFromPath(filepath.ToSlash(absPath), os.PathSeparator)
if params == nil {
params = make(map[string]any)
}
params[dbfactory.ChunkJournalParam] = struct{}{}
}
@@ -146,6 +145,8 @@ func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr
// won't work for other storage schemes though.
name := findParentDirectory(urlStr, ".dolt")
params[dbfactory.DatabaseNameParam] = name
db, vrw, ns, err := dbfactory.CreateDB(ctx, nbf, urlStr, params)
if err != nil {
return nil, err

View File

@@ -23,6 +23,8 @@ import (
"strings"
"sync"
"github.com/sirupsen/logrus"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -58,6 +60,9 @@ func NewGenerationalCS(oldGen, newGen *NomsBlockStore, ghostGen *GhostBlockStore
panic("oldgen and newgen chunkstore versions vary")
}
oldGen.AppendLoggerFields(logrus.Fields{"generation": "old"})
newGen.AppendLoggerFields(logrus.Fields{"generation": "new"})
return &GenerationalNBS{
oldGen: oldGen,
newGen: newGen,
@@ -65,6 +70,11 @@ func NewGenerationalCS(oldGen, newGen *NomsBlockStore, ghostGen *GhostBlockStore
}
}
func (gcs *GenerationalNBS) AppendLoggerFields(fields logrus.Fields) {
gcs.oldGen.AppendLoggerFields(fields)
gcs.newGen.AppendLoggerFields(fields)
}
func (gcs *GenerationalNBS) NewGen() chunks.ChunkStoreGarbageCollector {
return gcs.newGen
}

View File

@@ -38,6 +38,7 @@ import (
lru "github.com/hashicorp/golang-lru/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -123,6 +124,8 @@ type NomsBlockStore struct {
mtSize uint64
putCount uint64
logger *logrus.Entry
hasCache *lru.TwoQueueCache[hash.Hash, struct{}]
stats *Stats
@@ -132,8 +135,8 @@ func (nbs *NomsBlockStore) PersistGhostHashes(ctx context.Context, refs hash.Has
return fmt.Errorf("runtime error: PersistGhostHashes should never be called on the NomsBlockStore")
}
var _ chunks.TableFileStore = &NomsBlockStore{}
var _ chunks.ChunkStoreGarbageCollector = &NomsBlockStore{}
var _ chunks.TableFileStore = (*NomsBlockStore)(nil)
var _ chunks.ChunkStoreGarbageCollector = (*NomsBlockStore)(nil)
// 20-byte keys, ~2MB of key data.
//
@@ -266,19 +269,23 @@ func (nbs *NomsBlockStore) handleUnlockedRead(ctx context.Context, gcb gcBehavio
func (nbs *NomsBlockStore) conjoinIfRequired(ctx context.Context) (bool, error) {
if nbs.c.conjoinRequired(nbs.tables) {
nbs.logger.WithField("upstream_len", len(nbs.tables.upstream)).Info("beginning conjoin of database")
newUpstream, cleanup, err := conjoin(ctx, nbs.c, nbs.upstream, nbs.mm, nbs.p, nbs.stats)
if err != nil {
nbs.logger.WithError(err).Info("conjoin of database failed")
return false, err
}
newTables, err := nbs.tables.rebase(ctx, newUpstream.specs, nil, nbs.stats)
if err != nil {
nbs.logger.WithError(err).Info("during conjoin, updating database with new table files failed")
return false, err
}
nbs.upstream = newUpstream
oldTables := nbs.tables
nbs.tables = newTables
nbs.logger.WithField("new_upstream_len", len(nbs.tables.upstream)).Info("conjoin completed successfully")
err = oldTables.close()
if err != nil {
return true, err
@@ -643,6 +650,7 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager
mtSize: memTableSize,
hasCache: hasCache,
stats: NewStats(),
logger: logrus.StandardLogger().WithField("pkg", "store.noms"),
}
nbs.cond = sync.NewCond(&nbs.mu)
@@ -674,6 +682,11 @@ func newNomsBlockStore(ctx context.Context, nbfVerStr string, mm manifestManager
return nbs, nil
}
// Sets logging fields for the logger used by this store.
func (nbs *NomsBlockStore) AppendLoggerFields(fields logrus.Fields) {
nbs.logger = nbs.logger.WithFields(fields)
}
// WithoutConjoiner returns a new *NomsBlockStore instance that will not
// conjoin table files during manifest updates. Used in some server-side
// contexts when things like table file maintenance is done out-of-process. Not
@@ -691,6 +704,7 @@ func (nbs *NomsBlockStore) WithoutConjoiner() *NomsBlockStore {
putCount: nbs.putCount,
hasCache: nbs.hasCache,
stats: nbs.stats,
logger: nbs.logger,
}
}