mirror of
https://github.com/XTXMarkets/ternfs.git
synced 2025-12-30 15:30:28 -06:00
Factor out go files in more meaningful directories
Since we're going to add licenses soon
This commit is contained in:
@@ -1,70 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include <utility>
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "Exception.hpp"
|
||||
#include "Common.hpp"
|
||||
|
||||
class FDHolder {
|
||||
public:
|
||||
FDHolder() : _fd(-1) {}
|
||||
FDHolder(int fd) : _fd(fd) {}
|
||||
FDHolder(FDHolder && rhs) : _fd(-1) { std::swap(_fd, rhs._fd); }
|
||||
FDHolder & operator=(FDHolder && rhs) { std::swap(_fd, rhs._fd); return *this; }
|
||||
FDHolder & operator=(int fd) { return *this = FDHolder(fd); }
|
||||
~FDHolder() { if (_fd >= 0) close(_fd); }
|
||||
int operator*() const { return _fd; }
|
||||
explicit operator bool() const { return _fd >= 0; }
|
||||
void reset() { *this = FDHolder(); }
|
||||
FDHolder clone() const { int ret = dup(_fd); if (ret == -1) throw SYSCALL_EXCEPTION("dup"); return ret; }
|
||||
|
||||
private:
|
||||
int _fd;
|
||||
};
|
||||
|
||||
|
||||
template<typename Type, size_t Size>
|
||||
class MMapHolder {
|
||||
public:
|
||||
static constexpr size_t SIZE = Size;
|
||||
|
||||
MMapHolder() : _ptr(MAP_FAILED) {}
|
||||
MMapHolder(void * ptr) : _ptr(ptr) {}
|
||||
MMapHolder(MMapHolder && rhs) : _ptr(MAP_FAILED) { std::swap(_ptr, rhs._ptr); }
|
||||
MMapHolder & operator=(MMapHolder && rhs) { std::swap(_ptr, rhs._ptr); return *this; }
|
||||
MMapHolder & operator=(void * ptr) { return *this = MMapHolder(ptr); }
|
||||
~MMapHolder() { if (_ptr != MAP_FAILED) munmap(_ptr, Size); }
|
||||
Type operator*() const { return reinterpret_cast<Type>(_ptr); }
|
||||
Type operator->() const { return reinterpret_cast<Type>(_ptr); }
|
||||
typename std::remove_pointer<Type>::type & operator[](size_t i) const { return reinterpret_cast<Type>(_ptr)[i]; }
|
||||
explicit operator bool() const { return _ptr != MAP_FAILED; }
|
||||
void reset() { *this = MMapHolder(); }
|
||||
|
||||
private:
|
||||
void * _ptr;
|
||||
};
|
||||
|
||||
|
||||
template<typename Type>
|
||||
class DynamicMMapHolder {
|
||||
public:
|
||||
DynamicMMapHolder() : _ptr(MAP_FAILED), _sz(0) {}
|
||||
DynamicMMapHolder(void * ptr, size_t sz) : _ptr(ptr), _sz(sz) {}
|
||||
DynamicMMapHolder(DynamicMMapHolder && rhs) : _ptr(MAP_FAILED), _sz(0) { std::swap(_ptr, rhs._ptr); std::swap(_sz, rhs._sz); }
|
||||
DynamicMMapHolder & operator=(DynamicMMapHolder && rhs) { std::swap(_ptr, rhs._ptr); std::swap(_sz, rhs._sz); return *this; }
|
||||
~DynamicMMapHolder() { if (_ptr != MAP_FAILED) munmap(_ptr, _sz); }
|
||||
Type operator*() const { return reinterpret_cast<Type>(_ptr); }
|
||||
Type operator->() const { return reinterpret_cast<Type>(_ptr); }
|
||||
typename std::remove_pointer<Type>::type & operator[](size_t i) const { return reinterpret_cast<Type>(_ptr)[i]; }
|
||||
explicit operator bool() const { return _ptr != MAP_FAILED; }
|
||||
void reset() { *this = DynamicMMapHolder(); }
|
||||
size_t size() const { return _sz; }
|
||||
|
||||
private:
|
||||
void * _ptr;
|
||||
size_t _sz;
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package bufpool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package cbcmac
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package cbcmac
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
@@ -1,15 +0,0 @@
|
||||
package cdckey
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func CDCKey() cipher.Block {
|
||||
cipher, err := aes.NewCipher([]byte("\xa1\x11\x1c\xf0\xf6+\xba\x02%\xd2f\xe7\xa6\x94\x86\xfe"))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("could not create AES-128 key: %w", err))
|
||||
}
|
||||
return cipher
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/cbcmac"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ func BlockWriteCertificate(cipher cipher.Block, blockServiceId msgs.BlockService
|
||||
binary.Write(w, binary.LittleEndian, uint64(req.BlockId))
|
||||
binary.Write(w, binary.LittleEndian, uint32(req.Crc))
|
||||
binary.Write(w, binary.LittleEndian, uint32(req.Size))
|
||||
return lib.CBCMAC(cipher, w.Bytes())
|
||||
return cbcmac.CBCMAC(cipher, w.Bytes())
|
||||
}
|
||||
|
||||
func CheckBlockWriteCertificate(cipher cipher.Block, blockServiceId msgs.BlockServiceId, req *msgs.WriteBlockReq) ([8]byte, bool) {
|
||||
@@ -30,7 +30,7 @@ func BlockEraseCertificate(blockServiceId msgs.BlockServiceId, blockId msgs.Bloc
|
||||
buf.Write([]byte{'e'})
|
||||
binary.Write(buf, binary.LittleEndian, uint64(blockId))
|
||||
|
||||
return lib.CBCMAC(key, buf.Bytes())
|
||||
return cbcmac.CBCMAC(key, buf.Bytes())
|
||||
}
|
||||
|
||||
func CheckBlockEraseCertificate(blockServiceId msgs.BlockServiceId, cipher cipher.Block, req *msgs.EraseBlockReq) ([8]byte, bool) {
|
||||
@@ -45,7 +45,7 @@ func BlockEraseProof(blockServiceId msgs.BlockServiceId, blockId msgs.BlockId, k
|
||||
buf.Write([]byte{'E'})
|
||||
binary.Write(buf, binary.LittleEndian, uint64(blockId))
|
||||
|
||||
return lib.CBCMAC(key, buf.Bytes())
|
||||
return cbcmac.CBCMAC(key, buf.Bytes())
|
||||
}
|
||||
|
||||
func CheckBlockEraseProof(blockServiceId msgs.BlockServiceId, cipher cipher.Block, req *msgs.EraseBlockReq) ([8]byte, bool) {
|
||||
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/timing"
|
||||
)
|
||||
|
||||
type CollectDirectoriesStats struct {
|
||||
@@ -26,7 +28,7 @@ type CollectDirectoriesState struct {
|
||||
|
||||
// returns whether all the edges were removed
|
||||
func applyPolicy(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *CollectDirectoriesStats,
|
||||
dirId msgs.InodeId,
|
||||
@@ -90,7 +92,7 @@ func applyPolicy(
|
||||
return toCollect == len(edges), nil
|
||||
}
|
||||
|
||||
func CollectDirectory(log *lib.Logger, c *client.Client, dirInfoCache *client.DirInfoCache, stats *CollectDirectoriesStats, dirId msgs.InodeId, minEdgeAge time.Duration) error {
|
||||
func CollectDirectory(log *log.Logger, c *client.Client, dirInfoCache *client.DirInfoCache, stats *CollectDirectoriesStats, dirId msgs.InodeId, minEdgeAge time.Duration) error {
|
||||
log.Debug("%v: collecting", dirId)
|
||||
atomic.AddUint64(&stats.VisitedDirectories, 1)
|
||||
|
||||
@@ -158,10 +160,10 @@ func CollectDirectory(log *lib.Logger, c *client.Client, dirInfoCache *client.Di
|
||||
}
|
||||
|
||||
func collectDirectoriesWorker(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
rateLimit *lib.RateLimit,
|
||||
rateLimit *timing.RateLimit,
|
||||
stats *CollectDirectoriesState,
|
||||
shid msgs.ShardId,
|
||||
workersChan chan msgs.InodeId,
|
||||
@@ -191,7 +193,7 @@ func collectDirectoriesWorker(
|
||||
}
|
||||
|
||||
func collectDirectoriesScraper(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
state *CollectDirectoriesState,
|
||||
shid msgs.ShardId,
|
||||
@@ -230,10 +232,10 @@ type CollectDirectoriesOpts struct {
|
||||
}
|
||||
|
||||
func CollectDirectories(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
rateLimit *lib.RateLimit,
|
||||
rateLimit *timing.RateLimit,
|
||||
opts *CollectDirectoriesOpts,
|
||||
state *CollectDirectoriesState,
|
||||
shid msgs.ShardId,
|
||||
@@ -249,7 +251,7 @@ func CollectDirectories(
|
||||
workerChan := make(chan msgs.InodeId, opts.WorkersQueueSize)
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
collectDirectoriesScraper(log, c, state, shid, workerChan, terminateChan)
|
||||
}()
|
||||
|
||||
@@ -257,7 +259,7 @@ func CollectDirectories(
|
||||
workersWg.Add(opts.NumWorkersPerShard)
|
||||
for j := 0; j < opts.NumWorkersPerShard; j++ {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
collectDirectoriesWorker(log, c, dirInfoCache, rateLimit, state, shid, workerChan, terminateChan, minEdgeAge)
|
||||
workersWg.Done()
|
||||
}()
|
||||
@@ -279,10 +281,10 @@ func CollectDirectories(
|
||||
}
|
||||
|
||||
func CollectDirectoriesInAllShards(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
rateLimit *lib.RateLimit,
|
||||
rateLimit *timing.RateLimit,
|
||||
opts *CollectDirectoriesOpts,
|
||||
state *CollectDirectoriesState,
|
||||
minEdgeAge time.Duration,
|
||||
@@ -294,7 +296,7 @@ func CollectDirectoriesInAllShards(
|
||||
for i := 0; i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
if err := CollectDirectories(log, c, dirInfoCache, rateLimit, opts, state, shid, minEdgeAge); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -8,10 +8,11 @@ import (
|
||||
"path"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/cleanup/scratch"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/crc32c"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -27,7 +28,7 @@ type DefragStats struct {
|
||||
DefraggedPhysicalBytesAfter uint64
|
||||
}
|
||||
|
||||
func defragPrintStatsLastReport(log *lib.Logger, c *client.Client, stats *DefragStats, timeStats *timeStats, progressReportAlert *lib.XmonNCAlert, lastReport int64, now int64) {
|
||||
func defragPrintStatsLastReport(log *log.Logger, c *client.Client, stats *DefragStats, timeStats *timeStats, progressReportAlert *log.XmonNCAlert, lastReport int64, now int64) {
|
||||
timeSinceStart := time.Duration(now - atomic.LoadInt64(&timeStats.startedAt))
|
||||
physicalDeltaMB := (float64(stats.DefraggedPhysicalBytesAfter) - float64(stats.DefraggedPhysicalBytesBefore)) / 1e6
|
||||
physicalDeltaMBs := 1000.0 * physicalDeltaMB / float64(timeSinceStart.Milliseconds())
|
||||
@@ -44,12 +45,12 @@ func defragPrintStatsLastReport(log *lib.Logger, c *client.Client, stats *Defrag
|
||||
}
|
||||
|
||||
func defragFileInternal(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
stats *DefragStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
timeStats *timeStats,
|
||||
parent msgs.InodeId,
|
||||
fileId msgs.InodeId,
|
||||
@@ -173,12 +174,12 @@ func defragFileInternal(
|
||||
}
|
||||
|
||||
func DefragFile(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
client *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
stats *DefragStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
parent msgs.InodeId,
|
||||
fileId msgs.InodeId,
|
||||
filePath string,
|
||||
@@ -195,12 +196,12 @@ type DefragOptions struct {
|
||||
}
|
||||
|
||||
func DefragFiles(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
stats *DefragStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
options *DefragOptions,
|
||||
root string,
|
||||
) error {
|
||||
@@ -234,12 +235,12 @@ type DefragSpansStats struct {
|
||||
|
||||
// Replaces a file with another, identical one.
|
||||
func defragFileReplace(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
stats *DefragSpansStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
timeStats *timeStats,
|
||||
parent msgs.InodeId,
|
||||
fileId msgs.InodeId,
|
||||
@@ -311,12 +312,12 @@ func defragFileReplace(
|
||||
}
|
||||
|
||||
func DefragSpans(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
stats *DefragSpansStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
root string,
|
||||
) error {
|
||||
timeStats := newTimeStats()
|
||||
|
||||
@@ -5,7 +5,8 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -33,7 +34,7 @@ func (c CouldNotEraseBlocksInBlockServices) Error() string {
|
||||
}
|
||||
|
||||
func DestructFile(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *DestructFilesStats,
|
||||
id msgs.InodeId,
|
||||
@@ -138,7 +139,7 @@ type destructFileRequest struct {
|
||||
}
|
||||
|
||||
func destructFilesWorker(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *DestructFilesState,
|
||||
shid msgs.ShardId,
|
||||
@@ -170,7 +171,7 @@ func destructFilesWorker(
|
||||
}
|
||||
|
||||
func destructFilesScraper(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
state *DestructFilesState,
|
||||
terminateChan chan<- any,
|
||||
@@ -222,7 +223,7 @@ type DestructFilesOptions struct {
|
||||
}
|
||||
|
||||
func DestructFiles(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
opts *DestructFilesOptions,
|
||||
stats *DestructFilesState,
|
||||
@@ -238,7 +239,7 @@ func DestructFiles(
|
||||
log.Info("destructing files in shard %v", shid)
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
destructFilesScraper(log, c, stats, terminateChan, shid, workersChan)
|
||||
}()
|
||||
|
||||
@@ -248,7 +249,7 @@ func DestructFiles(
|
||||
for j := 0; j < opts.NumWorkersPerShard; j++ {
|
||||
go func() {
|
||||
defer workersWg.Done()
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
destructFilesWorker(log, c, stats, shid, workersChan, terminateChan)
|
||||
}()
|
||||
}
|
||||
@@ -278,7 +279,7 @@ func DestructFiles(
|
||||
}
|
||||
|
||||
func DestructFilesInAllShards(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
opts *DestructFilesOptions,
|
||||
stats *DestructFilesState,
|
||||
@@ -290,7 +291,7 @@ func DestructFilesInAllShards(
|
||||
for i := 0; i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
if err := DestructFiles(log, c, opts, stats, shid); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -21,13 +21,15 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/cleanup/scratch"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/crc32c"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/parity"
|
||||
"xtx/ternfs/rs"
|
||||
"xtx/ternfs/timing"
|
||||
)
|
||||
|
||||
type MigrateStats struct {
|
||||
@@ -42,7 +44,7 @@ type MigrateState struct {
|
||||
}
|
||||
|
||||
func fetchBlock(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
fileId msgs.InodeId,
|
||||
blockServices []msgs.BlockService,
|
||||
@@ -51,7 +53,7 @@ func fetchBlock(
|
||||
) (*bytes.Buffer, error) {
|
||||
blockService := &blockServices[block.BlockServiceIx]
|
||||
// fail immediately to other block services
|
||||
data, err := c.FetchBlock(log, &lib.NoTimeouts, blockService, block.BlockId, 0, blockSize, block.Crc)
|
||||
data, err := c.FetchBlock(log, &timing.NoTimeouts, blockService, block.BlockId, 0, blockSize, block.Crc)
|
||||
if err != nil {
|
||||
log.Info("couldn't fetch block %v in file %v in block service %v: %v", block.BlockId, fileId, blockService, err)
|
||||
return nil, err
|
||||
@@ -68,7 +70,7 @@ func fetchBlock(
|
||||
}
|
||||
|
||||
func writeBlock(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
scratch scratch.ScratchFile,
|
||||
file msgs.InodeId,
|
||||
@@ -140,7 +142,7 @@ func writeBlock(
|
||||
|
||||
// the bool is whether we found an error that we can retry
|
||||
func copyBlock(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
scratch scratch.ScratchFile,
|
||||
file msgs.InodeId,
|
||||
@@ -161,9 +163,9 @@ func copyBlock(
|
||||
}
|
||||
|
||||
func reconstructBlock(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
fileId msgs.InodeId,
|
||||
scratchFile scratch.ScratchFile,
|
||||
blockServices []msgs.BlockService,
|
||||
@@ -239,7 +241,7 @@ func newTimeStats() *timeStats {
|
||||
return &timeStats{startedAt: now, lastReportAt: now}
|
||||
}
|
||||
|
||||
func printStatsLastReport(log *lib.Logger, what string, c *client.Client, stats *MigrateStats, timeStats *timeStats, progressReportAlert *lib.XmonNCAlert, lastReport int64, now int64) {
|
||||
func printStatsLastReport(log *log.Logger, what string, c *client.Client, stats *MigrateStats, timeStats *timeStats, progressReportAlert *log.XmonNCAlert, lastReport int64, now int64) {
|
||||
timeSinceLastReport := time.Duration(now - lastReport)
|
||||
timeSinceStart := time.Duration(now - atomic.LoadInt64(&timeStats.startedAt))
|
||||
overallMB := float64(stats.MigratedBytes) / 1e6
|
||||
@@ -251,19 +253,19 @@ func printStatsLastReport(log *lib.Logger, what string, c *client.Client, stats
|
||||
timeStats.lastReportBytes = stats.MigratedBytes
|
||||
}
|
||||
|
||||
func printMigrateStats(log *lib.Logger, what string, c *client.Client, stats *MigrateStats, timeStats *timeStats, progressReportAlert *lib.XmonNCAlert) {
|
||||
func printMigrateStats(log *log.Logger, what string, c *client.Client, stats *MigrateStats, timeStats *timeStats, progressReportAlert *log.XmonNCAlert) {
|
||||
printStatsLastReport(log, what, c, stats, timeStats, progressReportAlert, atomic.LoadInt64(&timeStats.lastReportAt), time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// We reuse this functionality for scrubbing, they're basically doing the same
|
||||
// thing.
|
||||
func migrateBlocksInFileGeneric(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
stats *MigrateStats,
|
||||
timeStats *timeStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
what string,
|
||||
badBlock func(blockService *msgs.BlockService, blockSize uint32, block *msgs.FetchedBlock) (bool, error),
|
||||
scratchFile scratch.ScratchFile,
|
||||
@@ -454,10 +456,10 @@ func migrateBlocksInFileGeneric(
|
||||
// If the source block service it's still healthy, it'll just copy the block over, otherwise
|
||||
// it'll be recovered from the other. If possible, anyway.
|
||||
func MigrateBlocksInFile(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *MigrateStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
blockServiceId msgs.BlockServiceId,
|
||||
fileId msgs.InodeId,
|
||||
) error {
|
||||
@@ -466,18 +468,18 @@ func MigrateBlocksInFile(
|
||||
badBlock := func(blockService *msgs.BlockService, blockSize uint32, block *msgs.FetchedBlock) (bool, error) {
|
||||
return blockService.Id == blockServiceId, nil
|
||||
}
|
||||
return migrateBlocksInFileGeneric(log, c, lib.NewBufPool(), stats, newTimeStats(), progressReportAlert, fmt.Sprintf("%v: migrated", blockServiceId), badBlock, scratchFile, fileId)
|
||||
return migrateBlocksInFileGeneric(log, c, bufpool.NewBufPool(), stats, newTimeStats(), progressReportAlert, fmt.Sprintf("%v: migrated", blockServiceId), badBlock, scratchFile, fileId)
|
||||
}
|
||||
|
||||
// Tries to migrate as many blocks as possible from that block service in a certain
|
||||
// shard.
|
||||
func migrateBlocksInternal(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
stats *MigrateStats,
|
||||
timeStats *timeStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
shid msgs.ShardId,
|
||||
blockServiceId msgs.BlockServiceId,
|
||||
) error {
|
||||
@@ -521,15 +523,15 @@ func migrateBlocksInternal(
|
||||
}
|
||||
|
||||
func MigrateBlocks(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *MigrateStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
shid msgs.ShardId,
|
||||
blockServiceId msgs.BlockServiceId,
|
||||
) error {
|
||||
timeStats := newTimeStats()
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
if err := migrateBlocksInternal(log, c, bufPool, stats, timeStats, progressReportAlert, shid, blockServiceId); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -539,14 +541,14 @@ func MigrateBlocks(
|
||||
}
|
||||
|
||||
func MigrateBlocksInAllShards(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *MigrateStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
blockServiceId msgs.BlockServiceId,
|
||||
) error {
|
||||
timeStats := newTimeStats()
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(256)
|
||||
failed := int32(0)
|
||||
@@ -577,7 +579,7 @@ type fileMigrationResult struct {
|
||||
|
||||
type migrator struct {
|
||||
shuckleAddress string
|
||||
log *lib.Logger
|
||||
log *log.Logger
|
||||
client *client.Client
|
||||
numMigrators uint64
|
||||
migratorIdx uint64
|
||||
@@ -597,7 +599,7 @@ type migrator struct {
|
||||
failureDomainFilter string
|
||||
}
|
||||
|
||||
func Migrator(shuckleAddress string, log *lib.Logger, client *client.Client, numMigrators uint64, migratorIdx uint64, numFilesPerShard int, logOnly bool, failureDomain string) *migrator {
|
||||
func Migrator(shuckleAddress string, log *log.Logger, client *client.Client, numMigrators uint64, migratorIdx uint64, numFilesPerShard int, logOnly bool, failureDomain string) *migrator {
|
||||
res := migrator{
|
||||
shuckleAddress,
|
||||
log,
|
||||
@@ -633,7 +635,7 @@ func (m *migrator) Run() {
|
||||
m.runFileAggregator(&aggregatorWaitGroup)
|
||||
m.runFileMigrators(&migratorsWaitGroup)
|
||||
shuckleResponseAlert := m.log.NewNCAlert(5 * time.Minute)
|
||||
shuckleResponseAlert.SetAppType(lib.XMON_DAYTIME)
|
||||
shuckleResponseAlert.SetAppType(log.XMON_DAYTIME)
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
OUT:
|
||||
@@ -826,7 +828,7 @@ func (m *migrator) runFileAggregator(wg *sync.WaitGroup) {
|
||||
}
|
||||
}
|
||||
inProgressAlert := m.log.NewNCAlert(1 * time.Minute)
|
||||
inProgressAlert.SetAppType(lib.XMON_NEVER)
|
||||
inProgressAlert.SetAppType(log.XMON_NEVER)
|
||||
for {
|
||||
select {
|
||||
case newFileId, ok := <-m.fileAggregatorNewFile:
|
||||
@@ -906,7 +908,7 @@ func (m *migrator) runFileMigrators(wg *sync.WaitGroup) {
|
||||
_, ok := m.scheduledBlockServices[blockService.Id]
|
||||
return ok, nil
|
||||
}
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
for i := 0; i < len(m.fileMigratorsNewFile); i++ {
|
||||
for j := 0; j < m.numFilesPerShard; j++ {
|
||||
wg.Add(1)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -16,7 +16,7 @@ type ScratchFile interface {
|
||||
FileId() msgs.InodeId
|
||||
}
|
||||
|
||||
func NewScratchFile(log *lib.Logger, c *client.Client, shard msgs.ShardId, note string) ScratchFile {
|
||||
func NewScratchFile(log *log.Logger, c *client.Client, shard msgs.ShardId, note string) ScratchFile {
|
||||
scratch := &scratchFile{
|
||||
log: log,
|
||||
c: c,
|
||||
@@ -186,7 +186,7 @@ func (f *scratchFile) FileId() msgs.InodeId {
|
||||
}
|
||||
|
||||
type scratchFile struct {
|
||||
log *lib.Logger
|
||||
log *log.Logger
|
||||
c *client.Client
|
||||
shard msgs.ShardId
|
||||
note string
|
||||
|
||||
@@ -5,10 +5,13 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/cleanup/scratch"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/timing"
|
||||
)
|
||||
|
||||
type ScrubState struct {
|
||||
@@ -32,12 +35,12 @@ func badBlockError(err error) bool {
|
||||
}
|
||||
|
||||
func scrubFileInternal(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
stats *ScrubState,
|
||||
timeStats *timeStats,
|
||||
progressReportAlert *lib.XmonNCAlert,
|
||||
progressReportAlert *log.XmonNCAlert,
|
||||
scratchFile scratch.ScratchFile,
|
||||
file msgs.InodeId,
|
||||
) error {
|
||||
@@ -63,11 +66,11 @@ type scrubRequest struct {
|
||||
}
|
||||
|
||||
func scrubWorker(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
opts *ScrubOptions,
|
||||
stats *ScrubState,
|
||||
rateLimit *lib.RateLimit,
|
||||
rateLimit *timing.RateLimit,
|
||||
shid msgs.ShardId,
|
||||
workerChan chan *scrubRequest,
|
||||
terminateChan chan any,
|
||||
@@ -76,7 +79,7 @@ func scrubWorker(
|
||||
migratingFiles map[msgs.InodeId]struct{},
|
||||
migratingFilesMu *sync.RWMutex,
|
||||
) {
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
blockNotFoundAlert := log.NewNCAlert(0)
|
||||
defer log.ClearNC(blockNotFoundAlert)
|
||||
for {
|
||||
@@ -131,7 +134,7 @@ func scrubWorker(
|
||||
}
|
||||
|
||||
func migrateFileOnError(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *ScrubState,
|
||||
shid msgs.ShardId,
|
||||
@@ -141,8 +144,8 @@ func migrateFileOnError(
|
||||
migratingFilesMu *sync.RWMutex,
|
||||
req *scrubRequest,
|
||||
err error,
|
||||
bufPool *lib.BufPool,
|
||||
blockNotFoundAlert *lib.XmonNCAlert,
|
||||
bufPool *bufpool.BufPool,
|
||||
blockNotFoundAlert *log.XmonNCAlert,
|
||||
) bool {
|
||||
migratingFilesMu.Lock()
|
||||
_, ok := migratingFiles[req.file]
|
||||
@@ -184,7 +187,7 @@ func migrateFileOnError(
|
||||
}
|
||||
|
||||
func scrubScraper(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *ScrubState,
|
||||
shid msgs.ShardId,
|
||||
@@ -251,12 +254,12 @@ func scrubScraper(
|
||||
}
|
||||
|
||||
func ScrubFile(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
stats *ScrubState,
|
||||
file msgs.InodeId,
|
||||
) error {
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
scratchFile := scratch.NewScratchFile(log, c, file.Shard(), fmt.Sprintf("scrub file %v", file))
|
||||
defer scratchFile.Close()
|
||||
|
||||
@@ -264,10 +267,10 @@ func ScrubFile(
|
||||
}
|
||||
|
||||
func ScrubFiles(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
opts *ScrubOptions,
|
||||
rateLimit *lib.RateLimit,
|
||||
rateLimit *timing.RateLimit,
|
||||
stats *ScrubState,
|
||||
shid msgs.ShardId,
|
||||
) error {
|
||||
@@ -279,7 +282,7 @@ func ScrubFiles(
|
||||
sendChan := make(chan *scrubRequest, opts.WorkersQueueSize)
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
scrubScraper(log, c, stats, shid, terminateChan, sendChan)
|
||||
}()
|
||||
|
||||
@@ -291,7 +294,7 @@ func ScrubFiles(
|
||||
|
||||
for i := 0; i < opts.NumWorkersPerShard; i++ {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
scratchFile := scratch.NewScratchFile(log, c, shid, fmt.Sprintf("scrubbing shard %v worked %d", shid, i))
|
||||
defer scratchFile.Close()
|
||||
scrubWorker(log, c, opts, stats, rateLimit, shid, sendChan, terminateChan, scratchFile, &scrubbingMu, migratingFiles, &migratingFilesMu)
|
||||
@@ -317,10 +320,10 @@ func ScrubFiles(
|
||||
}
|
||||
|
||||
func ScrubFilesInAllShards(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
opts *ScrubOptions,
|
||||
rateLimit *lib.RateLimit,
|
||||
rateLimit *timing.RateLimit,
|
||||
state *ScrubState,
|
||||
) error {
|
||||
terminateChan := make(chan any, 1)
|
||||
@@ -330,7 +333,7 @@ func ScrubFilesInAllShards(
|
||||
for i := 0; i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
if err := ScrubFiles(log, c, opts, rateLimit, state, shid); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ type ZeroBlockServiceFilesStats struct {
|
||||
ZeroBlockServiceFilesRemoved uint64
|
||||
}
|
||||
|
||||
func CollectZeroBlockServiceFiles(log *lib.Logger, c *client.Client, stats *ZeroBlockServiceFilesStats) error {
|
||||
func CollectZeroBlockServiceFiles(log *log.Logger, c *client.Client, stats *ZeroBlockServiceFilesStats) error {
|
||||
log.Info("starting to collect block services files")
|
||||
reqs := make([]msgs.RemoveZeroBlockServiceFilesReq, 256)
|
||||
resps := make([]msgs.RemoveZeroBlockServiceFilesResp, 256)
|
||||
|
||||
@@ -6,14 +6,14 @@ import (
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
// A low-level utility for directly communication with block services.
|
||||
//
|
||||
// Currently this is not used by the main [Client] library at all.
|
||||
func BlockServiceConnection(log *lib.Logger, addrs msgs.AddrsInfo) (*net.TCPConn, error) {
|
||||
func BlockServiceConnection(log *log.Logger, addrs msgs.AddrsInfo) (*net.TCPConn, error) {
|
||||
if addrs.Addr1.Port == 0 {
|
||||
panic(fmt.Errorf("ip1/port1 must be provided"))
|
||||
}
|
||||
@@ -47,7 +47,7 @@ func BlockServiceConnection(log *lib.Logger, addrs msgs.AddrsInfo) (*net.TCPConn
|
||||
panic("impossible")
|
||||
}
|
||||
|
||||
func writeBlocksRequest(log *lib.Logger, w io.Writer, blockServiceId msgs.BlockServiceId, req msgs.BlocksRequest) error {
|
||||
func writeBlocksRequest(log *log.Logger, w io.Writer, blockServiceId msgs.BlockServiceId, req msgs.BlocksRequest) error {
|
||||
// log.Debug("writing blocks request %v for block service id %v: %+v", req.BlocksRequestKind(), blockServiceId, req)
|
||||
if err := binary.Write(w, binary.LittleEndian, msgs.BLOCKS_REQ_PROTOCOL_VERSION); err != nil {
|
||||
return err
|
||||
@@ -65,7 +65,7 @@ func writeBlocksRequest(log *lib.Logger, w io.Writer, blockServiceId msgs.BlockS
|
||||
}
|
||||
|
||||
func readBlocksResponse(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
r io.Reader,
|
||||
resp msgs.BlocksResponse,
|
||||
) error {
|
||||
@@ -102,7 +102,7 @@ func readBlocksResponse(
|
||||
}
|
||||
|
||||
func WriteBlock(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
conn interface {
|
||||
io.ReaderFrom
|
||||
io.Reader
|
||||
@@ -147,7 +147,7 @@ func WriteBlock(
|
||||
// Note that this function will _not_ check the CRC of the block! You should probably do that
|
||||
// before using the block in any meaningful way.
|
||||
func FetchBlock(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
conn interface {
|
||||
io.Reader
|
||||
io.Writer
|
||||
@@ -174,7 +174,7 @@ func FetchBlock(
|
||||
}
|
||||
|
||||
func EraseBlock(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
conn interface {
|
||||
io.Writer
|
||||
io.Reader
|
||||
@@ -199,7 +199,7 @@ func EraseBlock(
|
||||
}
|
||||
|
||||
func TestWrite(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
conn interface {
|
||||
io.ReaderFrom
|
||||
io.Reader
|
||||
|
||||
@@ -3,12 +3,12 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
func (c *Client) checkRepeatedCDCRequestError(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
// these are already filled in by now
|
||||
reqBody msgs.CDCRequest,
|
||||
resp msgs.CDCResponse,
|
||||
@@ -66,7 +66,7 @@ func (c *Client) checkRepeatedCDCRequestError(
|
||||
}
|
||||
|
||||
func (c *Client) CDCRequest(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
reqBody msgs.CDCRequest,
|
||||
// Result will be written in here. If an error is returned, no guarantees
|
||||
// are made regarding the contents of `respBody`.
|
||||
|
||||
@@ -27,12 +27,13 @@ import (
|
||||
"unsafe"
|
||||
"xtx/ternfs/bincode"
|
||||
"xtx/ternfs/crc32c"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/timing"
|
||||
)
|
||||
|
||||
type ReqCounters struct {
|
||||
Timings lib.Timings
|
||||
Timings timing.Timings
|
||||
Attempts uint64
|
||||
}
|
||||
|
||||
@@ -60,19 +61,19 @@ func NewClientCounters() *ClientCounters {
|
||||
var shards [256]ReqCounters
|
||||
counters.Shard[uint8(k)] = &shards
|
||||
for i := 0; i < 256; i++ {
|
||||
shards[i].Timings = *lib.NewTimings(40, time.Microsecond*10, 1.5)
|
||||
shards[i].Timings = *timing.NewTimings(40, time.Microsecond*10, 1.5)
|
||||
}
|
||||
}
|
||||
for _, k := range msgs.AllCDCMessageKind {
|
||||
// max = ~2min
|
||||
counters.CDC[uint8(k)] = &ReqCounters{
|
||||
Timings: *lib.NewTimings(35, time.Millisecond, 1.5),
|
||||
Timings: *timing.NewTimings(35, time.Millisecond, 1.5),
|
||||
}
|
||||
}
|
||||
return &counters
|
||||
}
|
||||
|
||||
func (counters *ClientCounters) Log(log *lib.Logger) {
|
||||
func (counters *ClientCounters) Log(log *log.Logger) {
|
||||
formatCounters := func(c *ReqCounters) {
|
||||
totalCount := uint64(0)
|
||||
for _, bin := range c.Timings.Histogram() {
|
||||
@@ -137,7 +138,7 @@ func (counters *ClientCounters) Log(log *lib.Logger) {
|
||||
|
||||
}
|
||||
|
||||
var DefaultShardTimeout = lib.ReqTimeouts{
|
||||
var DefaultShardTimeout = timing.ReqTimeouts{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 2 * time.Second,
|
||||
Overall: 10 * time.Second,
|
||||
@@ -145,7 +146,7 @@ var DefaultShardTimeout = lib.ReqTimeouts{
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
var DefaultCDCTimeout = lib.ReqTimeouts{
|
||||
var DefaultCDCTimeout = timing.ReqTimeouts{
|
||||
Initial: time.Second,
|
||||
Max: 10 * time.Second,
|
||||
Overall: time.Minute,
|
||||
@@ -153,7 +154,7 @@ var DefaultCDCTimeout = lib.ReqTimeouts{
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
var DefaultBlockTimeout = lib.ReqTimeouts{
|
||||
var DefaultBlockTimeout = timing.ReqTimeouts{
|
||||
Initial: time.Second,
|
||||
Max: 10 * time.Second,
|
||||
Overall: 5 * time.Minute,
|
||||
@@ -239,7 +240,7 @@ type clientMetadata struct {
|
||||
|
||||
var whichMetadatataAddr int
|
||||
|
||||
func (cm *clientMetadata) init(log *lib.Logger, client *Client) error {
|
||||
func (cm *clientMetadata) init(log *log.Logger, client *Client) error {
|
||||
log.Debug("initiating clientMetadata")
|
||||
defer log.Debug("finished initializing clientMetadata")
|
||||
cm.client = client
|
||||
@@ -282,7 +283,7 @@ func (cm *clientMetadata) close() {
|
||||
}
|
||||
|
||||
// terminates when cm.incoming gets nil
|
||||
func (cm *clientMetadata) processRequests(log *lib.Logger) {
|
||||
func (cm *clientMetadata) processRequests(log *log.Logger) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
for req := range cm.incoming {
|
||||
dontWait := req.resp == nil
|
||||
@@ -361,7 +362,7 @@ func (cm *clientMetadata) processRequests(log *lib.Logger) {
|
||||
log.Debug("got close request in request processor, winding down")
|
||||
}
|
||||
|
||||
func (cm *clientMetadata) parseResponse(log *lib.Logger, req *metadataProcessorRequest, rawResp *rawMetadataResponse, dischargeBuf bool) {
|
||||
func (cm *clientMetadata) parseResponse(log *log.Logger, req *metadataProcessorRequest, rawResp *rawMetadataResponse, dischargeBuf bool) {
|
||||
defer func() {
|
||||
if !dischargeBuf {
|
||||
return
|
||||
@@ -453,7 +454,7 @@ func (cm *clientMetadata) parseResponse(log *lib.Logger, req *metadataProcessorR
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *clientMetadata) processRawResponse(log *lib.Logger, rawResp *rawMetadataResponse) {
|
||||
func (cm *clientMetadata) processRawResponse(log *log.Logger, rawResp *rawMetadataResponse) {
|
||||
if rawResp.buf != nil {
|
||||
if req, found := cm.requestsById[rawResp.requestId]; found {
|
||||
// Common case, the request is already there
|
||||
@@ -506,7 +507,7 @@ func (cm *clientMetadata) processRawResponse(log *lib.Logger, rawResp *rawMetada
|
||||
}
|
||||
|
||||
// terminates when `cm.quitResponseProcessor` gets a message
|
||||
func (cm *clientMetadata) processResponses(log *lib.Logger) {
|
||||
func (cm *clientMetadata) processResponses(log *log.Logger) {
|
||||
for {
|
||||
// prioritize responses to requests, we want to get
|
||||
// them soon to not get spurious timeouts
|
||||
@@ -546,7 +547,7 @@ func (cm *clientMetadata) processResponses(log *lib.Logger) {
|
||||
}
|
||||
|
||||
// terminates when the socket is closed
|
||||
func (cm *clientMetadata) drainSocket(log *lib.Logger) {
|
||||
func (cm *clientMetadata) drainSocket(log *log.Logger) {
|
||||
for {
|
||||
buf := <-cm.responsesBufs
|
||||
cm.sock.SetReadDeadline(time.Now().Add(DefaultShardTimeout.Initial / 2))
|
||||
@@ -600,7 +601,7 @@ type clientBlockResponse struct {
|
||||
completionChan chan *blockCompletion
|
||||
}
|
||||
|
||||
func (resp *clientBlockResponse) done(log *lib.Logger, addr1 *net.TCPAddr, addr2 *net.TCPAddr, extra any, err error) {
|
||||
func (resp *clientBlockResponse) done(log *log.Logger, addr1 *net.TCPAddr, addr2 *net.TCPAddr, extra any, err error) {
|
||||
if resp.err == nil && err != nil {
|
||||
log.InfoStack(1, "failing request %T %+v addr1=%+v addr2=%+v extra=%+v: %v", resp.req, resp.req, addr1, addr2, extra, err)
|
||||
resp.err = err
|
||||
@@ -638,7 +639,7 @@ type blocksProcessor struct {
|
||||
sourceAddr1 *net.TCPAddr
|
||||
sourceAddr2 *net.TCPAddr
|
||||
what string
|
||||
timeout **lib.ReqTimeouts
|
||||
timeout **timing.ReqTimeouts
|
||||
_conn *blocksProcessorConn // this must be loaded through loadConn
|
||||
}
|
||||
|
||||
@@ -659,7 +660,7 @@ func (proc *blocksProcessor) storeConn(conn *net.TCPConn) *blocksProcessorConn {
|
||||
var whichBlockIp uint64
|
||||
var whichSourceIp uint64
|
||||
|
||||
func (proc *blocksProcessor) connect(log *lib.Logger) (*net.TCPConn, error) {
|
||||
func (proc *blocksProcessor) connect(log *log.Logger) (*net.TCPConn, error) {
|
||||
var err error
|
||||
sourceIpSelector := atomic.AddUint64(&whichSourceIp, 1)
|
||||
var sourceAddr *net.TCPAddr
|
||||
@@ -724,7 +725,7 @@ func connCheck(conn *net.TCPConn) error {
|
||||
return sysErr
|
||||
}
|
||||
|
||||
func (proc *blocksProcessor) processRequests(log *lib.Logger) {
|
||||
func (proc *blocksProcessor) processRequests(log *log.Logger) {
|
||||
log.Debug("%v: starting request processor for addr1=%v addr2=%v", proc.what, proc.addr1, proc.addr2)
|
||||
// one iteration = one request
|
||||
for {
|
||||
@@ -792,7 +793,7 @@ func (proc *blocksProcessor) processRequests(log *lib.Logger) {
|
||||
}
|
||||
}
|
||||
|
||||
func (proc *blocksProcessor) processResponses(log *lib.Logger) {
|
||||
func (proc *blocksProcessor) processResponses(log *log.Logger) {
|
||||
log.Debug("%v: starting response processor for addr1=%v addr2=%v", proc.what, proc.addr1, proc.addr2)
|
||||
// one iteration = one request
|
||||
for resp := range proc.inFlightReqChan {
|
||||
@@ -868,7 +869,7 @@ type blocksProcessorKey struct {
|
||||
|
||||
type blocksProcessors struct {
|
||||
what string
|
||||
timeouts **lib.ReqTimeouts
|
||||
timeouts **timing.ReqTimeouts
|
||||
// how many bits of the block service id to use for blockServiceKey
|
||||
blockServiceBits uint8
|
||||
// blocksProcessorKey -> *blocksProcessor
|
||||
@@ -877,7 +878,7 @@ type blocksProcessors struct {
|
||||
sourceAddr2 net.TCPAddr
|
||||
}
|
||||
|
||||
func (procs *blocksProcessors) init(what string, timeouts **lib.ReqTimeouts, localAddresses msgs.AddrsInfo) {
|
||||
func (procs *blocksProcessors) init(what string, timeouts **timing.ReqTimeouts, localAddresses msgs.AddrsInfo) {
|
||||
procs.what = what
|
||||
procs.timeouts = timeouts
|
||||
if localAddresses.Addr1.Addrs[0] != 0 {
|
||||
@@ -901,7 +902,7 @@ type sendArgs struct {
|
||||
// This currently never fails (everything network related happens in
|
||||
// the processor loops), keeping error since it might fail in the future
|
||||
func (procs *blocksProcessors) send(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
args *sendArgs,
|
||||
completionChan chan *blockCompletion,
|
||||
) error {
|
||||
@@ -973,9 +974,9 @@ type Client struct {
|
||||
fetchBlockBufs sync.Pool
|
||||
eraseBlockProcessors blocksProcessors
|
||||
checkBlockProcessors blocksProcessors
|
||||
shardTimeout *lib.ReqTimeouts
|
||||
cdcTimeout *lib.ReqTimeouts
|
||||
blockTimeout *lib.ReqTimeouts
|
||||
shardTimeout *timing.ReqTimeouts
|
||||
cdcTimeout *timing.ReqTimeouts
|
||||
blockTimeout *timing.ReqTimeouts
|
||||
requestIdCounter uint64
|
||||
shuckleAddress string
|
||||
addrsRefreshTicker *time.Ticker
|
||||
@@ -987,7 +988,7 @@ type Client struct {
|
||||
blockServiceToFailureDomain map[msgs.BlockServiceId]msgs.FailureDomain
|
||||
}
|
||||
|
||||
func (c *Client) refreshAddrs(log *lib.Logger) error {
|
||||
func (c *Client) refreshAddrs(log *log.Logger) error {
|
||||
var shardAddrs [256]msgs.AddrsInfo
|
||||
var cdcAddrs msgs.AddrsInfo
|
||||
{
|
||||
@@ -1054,7 +1055,7 @@ func (c *Client) refreshAddrs(log *lib.Logger) error {
|
||||
// Create a new client by acquiring the CDC and Shard connection details from Shuckle. It
|
||||
// also refreshes the shard/cdc infos every minute.
|
||||
func NewClient(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
shuckleTimeout *ShuckleTimeouts,
|
||||
shuckleAddress string,
|
||||
localAddresses msgs.AddrsInfo,
|
||||
@@ -1099,19 +1100,19 @@ func (c *Client) SetCounters(counters *ClientCounters) {
|
||||
|
||||
// Override the shard timeout parameters.
|
||||
// This is only safe to use during initialization.
|
||||
func (c *Client) SetShardTimeouts(t *lib.ReqTimeouts) {
|
||||
func (c *Client) SetShardTimeouts(t *timing.ReqTimeouts) {
|
||||
c.shardTimeout = t
|
||||
}
|
||||
|
||||
// Override the CDC timeout parameters.
|
||||
// This is only safe to use during initialization.
|
||||
func (c *Client) SetCDCTimeouts(t *lib.ReqTimeouts) {
|
||||
func (c *Client) SetCDCTimeouts(t *timing.ReqTimeouts) {
|
||||
c.cdcTimeout = t
|
||||
}
|
||||
|
||||
// Override the block timeout parameters.
|
||||
// This is only safe to use during initialization.
|
||||
func (c *Client) SetBlockTimeout(t *lib.ReqTimeouts) {
|
||||
func (c *Client) SetBlockTimeout(t *timing.ReqTimeouts) {
|
||||
c.blockTimeout = t
|
||||
}
|
||||
|
||||
@@ -1134,7 +1135,7 @@ func SetMTU(mtu uint64) {
|
||||
}
|
||||
|
||||
func NewClientDirectNoAddrs(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
localAddresses msgs.AddrsInfo,
|
||||
) (c *Client, err error) {
|
||||
c = &Client{
|
||||
@@ -1246,7 +1247,7 @@ func (c *Client) Close() {
|
||||
}
|
||||
|
||||
// Not atomic between the read/write
|
||||
func (c *Client) MergeDirectoryInfo(log *lib.Logger, id msgs.InodeId, entry msgs.IsDirectoryInfoEntry) error {
|
||||
func (c *Client) MergeDirectoryInfo(log *log.Logger, id msgs.InodeId, entry msgs.IsDirectoryInfoEntry) error {
|
||||
packedEntry := msgs.DirectoryInfoEntry{
|
||||
Body: bincode.Pack(entry),
|
||||
Tag: entry.Tag(),
|
||||
@@ -1274,7 +1275,7 @@ func (c *Client) MergeDirectoryInfo(log *lib.Logger, id msgs.InodeId, entry msgs
|
||||
}
|
||||
|
||||
// Not atomic between the read/write
|
||||
func (c *Client) RemoveDirectoryInfoEntry(log *lib.Logger, id msgs.InodeId, tag msgs.DirectoryInfoTag) error {
|
||||
func (c *Client) RemoveDirectoryInfoEntry(log *log.Logger, id msgs.InodeId, tag msgs.DirectoryInfoTag) error {
|
||||
statResp := msgs.StatDirectoryResp{}
|
||||
if err := c.ShardRequest(log, id.Shard(), &msgs.StatDirectoryReq{Id: id}, &statResp); err != nil {
|
||||
return err
|
||||
@@ -1293,7 +1294,7 @@ func (c *Client) RemoveDirectoryInfoEntry(log *lib.Logger, id msgs.InodeId, tag
|
||||
}
|
||||
|
||||
func (c *Client) ResolveDirectoryInfoEntry(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
dirInfoCache *DirInfoCache,
|
||||
dirId msgs.InodeId,
|
||||
entry msgs.IsDirectoryInfoEntry, // output will be stored in here
|
||||
@@ -1341,7 +1342,7 @@ TraverseDirectories:
|
||||
}
|
||||
|
||||
// High-level helper function to take a string path and return the inode and parent inode
|
||||
func (c *Client) ResolvePathWithParent(log *lib.Logger, path string) (id msgs.InodeId, creationTime msgs.TernTime, parent msgs.InodeId, err error) {
|
||||
func (c *Client) ResolvePathWithParent(log *log.Logger, path string) (id msgs.InodeId, creationTime msgs.TernTime, parent msgs.InodeId, err error) {
|
||||
if !filepath.IsAbs(path) {
|
||||
return msgs.NULL_INODE_ID, 0, msgs.NULL_INODE_ID, fmt.Errorf("expected absolute path, got '%v'", path)
|
||||
}
|
||||
@@ -1363,7 +1364,7 @@ func (c *Client) ResolvePathWithParent(log *lib.Logger, path string) (id msgs.In
|
||||
}
|
||||
|
||||
// High-level helper function to take a string path and return the inode
|
||||
func (c *Client) ResolvePath(log *lib.Logger, path string) (msgs.InodeId, error) {
|
||||
func (c *Client) ResolvePath(log *log.Logger, path string) (msgs.InodeId, error) {
|
||||
id, _, _, err := c.ResolvePathWithParent(log, path)
|
||||
return id, err
|
||||
}
|
||||
@@ -1386,7 +1387,7 @@ func writeBlockSendArgs(block *msgs.AddSpanInitiateBlockInfo, r io.ReadSeeker, s
|
||||
}
|
||||
|
||||
// An asynchronous version of [StartBlock] that is currently unused.
|
||||
func (c *Client) StartWriteBlock(log *lib.Logger, block *msgs.AddSpanInitiateBlockInfo, r io.ReadSeeker, size uint32, crc msgs.Crc, extra any, completion chan *blockCompletion) error {
|
||||
func (c *Client) StartWriteBlock(log *log.Logger, block *msgs.AddSpanInitiateBlockInfo, r io.ReadSeeker, size uint32, crc msgs.Crc, extra any, completion chan *blockCompletion) error {
|
||||
return c.writeBlockProcessors.send(log, writeBlockSendArgs(block, r, size, crc, extra), completion)
|
||||
}
|
||||
|
||||
@@ -1394,7 +1395,7 @@ func retriableBlockError(err error) bool {
|
||||
return errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed)
|
||||
}
|
||||
|
||||
func (c *Client) singleBlockReq(log *lib.Logger, timeouts *lib.ReqTimeouts, processor *blocksProcessors, args *sendArgs) (msgs.BlocksResponse, error) {
|
||||
func (c *Client) singleBlockReq(log *log.Logger, timeouts *timing.ReqTimeouts, processor *blocksProcessors, args *sendArgs) (msgs.BlocksResponse, error) {
|
||||
if timeouts == nil {
|
||||
timeouts = c.blockTimeout
|
||||
}
|
||||
@@ -1434,7 +1435,7 @@ func (c *Client) singleBlockReq(log *lib.Logger, timeouts *lib.ReqTimeouts, proc
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) WriteBlock(log *lib.Logger, timeouts *lib.ReqTimeouts, block *msgs.AddSpanInitiateBlockInfo, r io.ReadSeeker, size uint32, crc msgs.Crc) (proof [8]byte, err error) {
|
||||
func (c *Client) WriteBlock(log *log.Logger, timeouts *timing.ReqTimeouts, block *msgs.AddSpanInitiateBlockInfo, r io.ReadSeeker, size uint32, crc msgs.Crc) (proof [8]byte, err error) {
|
||||
resp, err := c.singleBlockReq(log, timeouts, &c.writeBlockProcessors, writeBlockSendArgs(block, r, size, crc, nil))
|
||||
if err != nil {
|
||||
return proof, err
|
||||
@@ -1460,7 +1461,7 @@ func fetchBlockSendArgs(blockService *msgs.BlockService, blockId msgs.BlockId, o
|
||||
}
|
||||
|
||||
// An asynchronous version of [FetchBlock] that is currently unused.
|
||||
func (c *Client) StartFetchBlock(log *lib.Logger, blockService *msgs.BlockService, blockId msgs.BlockId, offset uint32, count uint32, w io.ReaderFrom, extra any, completion chan *blockCompletion, crc msgs.Crc) error {
|
||||
func (c *Client) StartFetchBlock(log *log.Logger, blockService *msgs.BlockService, blockId msgs.BlockId, offset uint32, count uint32, w io.ReaderFrom, extra any, completion chan *blockCompletion, crc msgs.Crc) error {
|
||||
return c.fetchBlockProcessors.send(log, fetchBlockSendArgs(blockService, blockId, offset, count, w, extra, crc), completion)
|
||||
}
|
||||
|
||||
@@ -1477,7 +1478,7 @@ func (c *Client) PutFetchedBlock(body *bytes.Buffer) {
|
||||
//
|
||||
// The function returns a buffer that is allocated from an internal pool. Once you have finished with this buffer it must
|
||||
// be returned via the [PutFetchedBlock] function.
|
||||
func (c *Client) FetchBlock(log *lib.Logger, timeouts *lib.ReqTimeouts, blockService *msgs.BlockService, blockId msgs.BlockId, offset uint32, count uint32, crc msgs.Crc) (body *bytes.Buffer, err error) {
|
||||
func (c *Client) FetchBlock(log *log.Logger, timeouts *timing.ReqTimeouts, blockService *msgs.BlockService, blockId msgs.BlockId, offset uint32, count uint32, crc msgs.Crc) (body *bytes.Buffer, err error) {
|
||||
buf := c.fetchBlockBufs.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
|
||||
@@ -1504,11 +1505,11 @@ func eraseBlockSendArgs(block *msgs.RemoveSpanInitiateBlockInfo, extra any) *sen
|
||||
}
|
||||
|
||||
// An asynchronous version of [EraseBlock] that is currently unused.
|
||||
func (c *Client) StartEraseBlock(log *lib.Logger, block *msgs.RemoveSpanInitiateBlockInfo, extra any, completion chan *blockCompletion) error {
|
||||
func (c *Client) StartEraseBlock(log *log.Logger, block *msgs.RemoveSpanInitiateBlockInfo, extra any, completion chan *blockCompletion) error {
|
||||
return c.eraseBlockProcessors.send(log, eraseBlockSendArgs(block, extra), completion)
|
||||
}
|
||||
|
||||
func (c *Client) EraseBlock(log *lib.Logger, block *msgs.RemoveSpanInitiateBlockInfo) (proof [8]byte, err error) {
|
||||
func (c *Client) EraseBlock(log *log.Logger, block *msgs.RemoveSpanInitiateBlockInfo) (proof [8]byte, err error) {
|
||||
resp, err := c.singleBlockReq(log, nil, &c.eraseBlockProcessors, eraseBlockSendArgs(block, nil))
|
||||
if err != nil {
|
||||
return proof, err
|
||||
@@ -1545,11 +1546,11 @@ func checkBlockSendArgs(blockService *msgs.BlockService, blockId msgs.BlockId, s
|
||||
}
|
||||
|
||||
// An asynchronous version of [CheckBlock] that is currently unused.
|
||||
func (c *Client) StartCheckBlock(log *lib.Logger, blockService *msgs.BlockService, blockId msgs.BlockId, size uint32, crc msgs.Crc, extra any, completion chan *blockCompletion) error {
|
||||
func (c *Client) StartCheckBlock(log *log.Logger, blockService *msgs.BlockService, blockId msgs.BlockId, size uint32, crc msgs.Crc, extra any, completion chan *blockCompletion) error {
|
||||
return c.checkBlockProcessors.send(log, checkBlockSendArgs(blockService, blockId, size, crc, extra), completion)
|
||||
}
|
||||
|
||||
func (c *Client) CheckBlock(log *lib.Logger, blockService *msgs.BlockService, blockId msgs.BlockId, size uint32, crc msgs.Crc) error {
|
||||
func (c *Client) CheckBlock(log *log.Logger, blockService *msgs.BlockService, blockId msgs.BlockId, size uint32, crc msgs.Crc) error {
|
||||
_, err := c.singleBlockReq(log, nil, &c.checkBlockProcessors, checkBlockSendArgs(blockService, blockId, size, crc, nil))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"xtx/ternfs/bincode"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,7 @@ func (c *Client) newRequestId() uint64 {
|
||||
}
|
||||
|
||||
func (c *Client) metadataRequest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
shid int16, // -1 for cdc
|
||||
reqBody bincode.Packable,
|
||||
respBody bincode.Unpackable,
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -26,7 +26,7 @@ type parwalkEnv struct {
|
||||
}
|
||||
|
||||
func (env *parwalkEnv) visit(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
homeShid msgs.ShardId,
|
||||
parent msgs.InodeId,
|
||||
parentPath string,
|
||||
@@ -69,7 +69,7 @@ func (env *parwalkEnv) visit(
|
||||
}
|
||||
|
||||
func (env *parwalkEnv) process(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
homeShid msgs.ShardId,
|
||||
id msgs.InodeId,
|
||||
path string,
|
||||
@@ -132,7 +132,7 @@ type ParwalkOptions struct {
|
||||
}
|
||||
|
||||
func Parwalk(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
client *Client,
|
||||
options *ParwalkOptions,
|
||||
root string,
|
||||
|
||||
@@ -3,12 +3,12 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
func (c *Client) checkDeletedEdge(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
dirId msgs.InodeId,
|
||||
targetId msgs.InodeId,
|
||||
name string,
|
||||
@@ -46,7 +46,7 @@ func (c *Client) checkDeletedEdge(
|
||||
}
|
||||
|
||||
func (c *Client) checkNewEdgeAfterRename(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
dirId msgs.InodeId,
|
||||
targetId msgs.InodeId,
|
||||
name string,
|
||||
@@ -68,7 +68,7 @@ func (c *Client) checkNewEdgeAfterRename(
|
||||
}
|
||||
|
||||
func (c *Client) checkRepeatedShardRequestError(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
// these are already filled in by now
|
||||
reqBody msgs.ShardRequest,
|
||||
resp msgs.ShardResponse,
|
||||
@@ -108,7 +108,7 @@ func (c *Client) checkRepeatedShardRequestError(
|
||||
}
|
||||
|
||||
func (c *Client) shardRequestInternal(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
shid msgs.ShardId,
|
||||
reqBody msgs.ShardRequest,
|
||||
// Result will be written in here. If an error is returned, no guarantees
|
||||
@@ -145,7 +145,7 @@ func (c *Client) shardRequestInternal(
|
||||
}
|
||||
|
||||
func (c *Client) ShardRequestDontWait(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
shid msgs.ShardId,
|
||||
reqBody msgs.ShardRequest,
|
||||
) error {
|
||||
@@ -154,7 +154,7 @@ func (c *Client) ShardRequestDontWait(
|
||||
|
||||
// This function will set the mtu field for requests that have it with whatever is in `SetMTU`
|
||||
func (c *Client) ShardRequest(
|
||||
logger *lib.Logger,
|
||||
logger *log.Logger,
|
||||
shid msgs.ShardId,
|
||||
reqBody msgs.ShardRequest,
|
||||
// Result will be written in here. If an error is returned, no guarantees
|
||||
|
||||
@@ -9,11 +9,12 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
"xtx/ternfs/bincode"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/timing"
|
||||
)
|
||||
|
||||
func writeShuckleRequest(log *lib.Logger, w io.Writer, req msgs.ShuckleRequest) error {
|
||||
func writeShuckleRequest(log *log.Logger, w io.Writer, req msgs.ShuckleRequest) error {
|
||||
log.Debug("sending request %v to shuckle", req.ShuckleRequestKind())
|
||||
// serialize
|
||||
bytes := bincode.Pack(req)
|
||||
@@ -34,7 +35,7 @@ func writeShuckleRequest(log *lib.Logger, w io.Writer, req msgs.ShuckleRequest)
|
||||
}
|
||||
|
||||
func readShuckleResponse(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
r io.Reader,
|
||||
) (msgs.ShuckleResponse, error) {
|
||||
log.Debug("reading response from shuckle")
|
||||
@@ -128,12 +129,12 @@ func readShuckleResponse(
|
||||
}
|
||||
|
||||
type ShuckleTimeouts struct {
|
||||
ReconnectTimeout lib.ReqTimeouts
|
||||
ReconnectTimeout timing.ReqTimeouts
|
||||
RequestTimeout time.Duration
|
||||
}
|
||||
|
||||
var DefaultShuckleTimeout = ShuckleTimeouts{
|
||||
ReconnectTimeout: lib.ReqTimeouts{
|
||||
ReconnectTimeout: timing.ReqTimeouts{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 1 * time.Second,
|
||||
Overall: 10 * time.Second,
|
||||
@@ -144,7 +145,7 @@ var DefaultShuckleTimeout = ShuckleTimeouts{
|
||||
}
|
||||
|
||||
func ShuckleRequest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
timeout *ShuckleTimeouts,
|
||||
shuckleAddress string,
|
||||
req msgs.ShuckleRequest,
|
||||
@@ -166,7 +167,7 @@ type shuckReq struct {
|
||||
}
|
||||
|
||||
type ShuckleConn struct {
|
||||
log *lib.Logger
|
||||
log *log.Logger
|
||||
timeout ShuckleTimeouts
|
||||
shuckleAddress string
|
||||
reqChan chan shuckReq
|
||||
@@ -174,7 +175,7 @@ type ShuckleConn struct {
|
||||
}
|
||||
|
||||
func MakeShuckleConn(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
timeout *ShuckleTimeouts,
|
||||
shuckleAddress string,
|
||||
numHandlers uint,
|
||||
|
||||
@@ -8,11 +8,13 @@ import (
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/crc32c"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/parity"
|
||||
"xtx/ternfs/rs"
|
||||
"xtx/ternfs/timing"
|
||||
)
|
||||
|
||||
type blockReader struct {
|
||||
@@ -54,7 +56,7 @@ func (r *blockReader) Seek(offset int64, whence int) (int64, error) {
|
||||
}
|
||||
|
||||
func (c *Client) createInlineSpan(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
id msgs.InodeId,
|
||||
cookie [8]byte,
|
||||
offset uint64,
|
||||
@@ -255,7 +257,7 @@ func mkBlockReader(
|
||||
// Return which block ids were created for the span, this is needed in defragmentation
|
||||
// so we return it immediately here
|
||||
func (c *Client) CreateSpan(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
blacklist []msgs.BlacklistEntry,
|
||||
spanPolicies *msgs.SpanPolicy,
|
||||
blockPolicies *msgs.BlockPolicy,
|
||||
@@ -319,7 +321,7 @@ func (c *Client) CreateSpan(
|
||||
var proof [8]byte
|
||||
blockCrc, blockReader := mkBlockReader(&initiateReq.Req, *data, i)
|
||||
// fail immediately to other block services
|
||||
proof, err = c.WriteBlock(log, &lib.NoTimeouts, &block, blockReader, initiateReq.Req.CellSize*uint32(initiateReq.Req.Stripes), blockCrc)
|
||||
proof, err = c.WriteBlock(log, &timing.NoTimeouts, &block, blockReader, initiateReq.Req.CellSize*uint32(initiateReq.Req.Stripes), blockCrc)
|
||||
if err != nil {
|
||||
initiateReq.Req.Blacklist = append(initiateReq.Req.Blacklist, msgs.BlacklistEntry{FailureDomain: block.BlockServiceFailureDomain})
|
||||
log.Info("failed to write block to %+v: %v, might retry without failure domain %q", block, err, string(block.BlockServiceFailureDomain.Name[:]))
|
||||
@@ -362,8 +364,8 @@ func (c *Client) CreateSpan(
|
||||
}
|
||||
|
||||
func (c *Client) WriteFile(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *DirInfoCache,
|
||||
dirId msgs.InodeId, // to get policies
|
||||
fileId msgs.InodeId,
|
||||
@@ -412,8 +414,8 @@ func (c *Client) WriteFile(
|
||||
}
|
||||
|
||||
func (c *Client) CreateFile(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *DirInfoCache,
|
||||
path string, // must be absolute
|
||||
r io.Reader,
|
||||
@@ -446,12 +448,12 @@ func (c *Client) CreateFile(
|
||||
}
|
||||
|
||||
type FetchedStripe struct {
|
||||
Buf *lib.Buf
|
||||
Buf *bufpool.Buf
|
||||
Start uint64
|
||||
owned bool
|
||||
}
|
||||
|
||||
func (fs *FetchedStripe) Put(bufPool *lib.BufPool) {
|
||||
func (fs *FetchedStripe) Put(bufPool *bufpool.BufPool) {
|
||||
if !fs.owned {
|
||||
return
|
||||
}
|
||||
@@ -460,13 +462,13 @@ func (fs *FetchedStripe) Put(bufPool *lib.BufPool) {
|
||||
}
|
||||
|
||||
func (c *Client) fetchCell(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
blockServices []msgs.BlockService,
|
||||
body *msgs.FetchedBlocksSpan,
|
||||
blockIx uint8,
|
||||
cell uint8,
|
||||
) (buf *lib.Buf, err error) {
|
||||
) (buf *bufpool.Buf, err error) {
|
||||
buf = bufPool.Get(int(body.CellSize))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
@@ -477,7 +479,7 @@ func (c *Client) fetchCell(
|
||||
blockService := &blockServices[block.BlockServiceIx]
|
||||
var data *bytes.Buffer
|
||||
// fail immediately to other block services
|
||||
data, err = c.FetchBlock(log, &lib.NoTimeouts, blockService, block.BlockId, uint32(cell)*body.CellSize, body.CellSize, block.Crc)
|
||||
data, err = c.FetchBlock(log, &timing.NoTimeouts, blockService, block.BlockId, uint32(cell)*body.CellSize, body.CellSize, block.Crc)
|
||||
if err != nil {
|
||||
log.Info("could not fetch block from block service %+v: %+v", blockService, err)
|
||||
return nil, err
|
||||
@@ -490,13 +492,13 @@ func (c *Client) fetchCell(
|
||||
}
|
||||
|
||||
func (c *Client) fetchMirroredStripe(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
blockServices []msgs.BlockService,
|
||||
span *msgs.FetchedSpan,
|
||||
body *msgs.FetchedBlocksSpan,
|
||||
offset uint64,
|
||||
) (start uint64, buf *lib.Buf, err error) {
|
||||
) (start uint64, buf *bufpool.Buf, err error) {
|
||||
spanOffset := uint32(offset - span.Header.ByteOffset)
|
||||
cell := spanOffset / body.CellSize
|
||||
B := body.Parity.Blocks()
|
||||
@@ -531,18 +533,18 @@ func (c *Client) fetchMirroredStripe(
|
||||
}
|
||||
|
||||
func (c *Client) fetchRsStripe(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
fileId msgs.InodeId,
|
||||
blockServices []msgs.BlockService,
|
||||
span *msgs.FetchedSpan,
|
||||
body *msgs.FetchedBlocksSpan,
|
||||
offset uint64,
|
||||
) (start uint64, buf *lib.Buf, err error) {
|
||||
) (start uint64, buf *bufpool.Buf, err error) {
|
||||
D := body.Parity.DataBlocks()
|
||||
B := body.Parity.Blocks()
|
||||
spanOffset := uint32(offset - span.Header.ByteOffset)
|
||||
blocks := make([]*lib.Buf, B)
|
||||
blocks := make([]*bufpool.Buf, B)
|
||||
defer func() {
|
||||
for i := range blocks {
|
||||
bufPool.Put(blocks[i])
|
||||
@@ -609,8 +611,8 @@ func (c *Client) fetchRsStripe(
|
||||
}
|
||||
|
||||
func (c *Client) FetchStripeFromSpan(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
fileId msgs.InodeId,
|
||||
blockServices []msgs.BlockService,
|
||||
span *msgs.FetchedSpan,
|
||||
@@ -629,7 +631,7 @@ func (c *Client) FetchStripeFromSpan(
|
||||
panic(fmt.Errorf("header CRC for inline span is %v, but data is %v", span.Header.Crc, dataCrc))
|
||||
}
|
||||
stripe := &FetchedStripe{
|
||||
Buf: lib.NewBuf(&span.Body.(*msgs.FetchedInlineSpan).Body),
|
||||
Buf: bufpool.NewBuf(&span.Body.(*msgs.FetchedInlineSpan).Body),
|
||||
Start: span.Header.ByteOffset,
|
||||
}
|
||||
log.Debug("fetched inline span")
|
||||
@@ -659,7 +661,7 @@ func (c *Client) FetchStripeFromSpan(
|
||||
|
||||
// otherwise just fetch
|
||||
var start uint64
|
||||
var buf *lib.Buf
|
||||
var buf *bufpool.Buf
|
||||
var err error
|
||||
if D == 1 {
|
||||
start, buf, err = c.fetchMirroredStripe(log, bufPool, blockServices, span, body, offset)
|
||||
@@ -692,19 +694,19 @@ func (c *Client) FetchStripeFromSpan(
|
||||
func (c *Client) fetchInlineSpan(
|
||||
inlineSpan *msgs.FetchedInlineSpan,
|
||||
crc msgs.Crc,
|
||||
) (*lib.Buf, error) {
|
||||
) (*bufpool.Buf, error) {
|
||||
dataCrc := msgs.Crc(crc32c.Sum(0, inlineSpan.Body))
|
||||
if dataCrc != crc {
|
||||
return nil, fmt.Errorf("header CRC for inline span is %v, but data is %v", crc, dataCrc)
|
||||
}
|
||||
return lib.NewBuf(&inlineSpan.Body), nil
|
||||
return bufpool.NewBuf(&inlineSpan.Body), nil
|
||||
}
|
||||
|
||||
func (c *Client) fetchMirroredSpan(log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
func (c *Client) fetchMirroredSpan(log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
blockServices []msgs.BlockService,
|
||||
span *msgs.FetchedSpan,
|
||||
) (*lib.Buf, error){
|
||||
) (*bufpool.Buf, error){
|
||||
body := span.Body.(*msgs.FetchedBlocksSpan)
|
||||
blockSize := uint32(body.Stripes)*uint32(body.CellSize)
|
||||
|
||||
@@ -730,11 +732,11 @@ func (c *Client) fetchMirroredSpan(log *lib.Logger,
|
||||
return nil, fmt.Errorf("could not find any suitable blocks")
|
||||
}
|
||||
|
||||
func (c *Client) fetchRsSpan(log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
func (c *Client) fetchRsSpan(log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
blockServices []msgs.BlockService,
|
||||
span *msgs.FetchedSpan,
|
||||
) (*lib.Buf, error){
|
||||
) (*bufpool.Buf, error){
|
||||
body := span.Body.(*msgs.FetchedBlocksSpan)
|
||||
blockSize := uint32(body.Stripes)*uint32(body.CellSize)
|
||||
|
||||
@@ -836,12 +838,12 @@ scheduleMoreBlocks:
|
||||
|
||||
// The buf we get out must be returned to the bufPool.
|
||||
func (c *Client) FetchSpan(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
fileId msgs.InodeId,
|
||||
blockServices []msgs.BlockService,
|
||||
span *msgs.FetchedSpan,
|
||||
) (*lib.Buf, error) {
|
||||
) (*bufpool.Buf, error) {
|
||||
switch {
|
||||
// inline storage
|
||||
case span.Header.StorageClass == msgs.INLINE_STORAGE:
|
||||
@@ -858,8 +860,8 @@ func (c *Client) FetchSpan(
|
||||
// Returns nil, nil if span or stripe cannot be found.
|
||||
// Stripe might not be found because
|
||||
func (c *Client) FetchStripe(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
fileId msgs.InodeId,
|
||||
blockServices []msgs.BlockService,
|
||||
spans []msgs.FetchedSpan,
|
||||
@@ -883,7 +885,7 @@ func (c *Client) FetchStripe(
|
||||
}
|
||||
|
||||
func (c *Client) FetchSpans(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
fileId msgs.InodeId,
|
||||
) (blockServices []msgs.BlockService, spans []msgs.FetchedSpan, err error) {
|
||||
req := msgs.LocalFileSpansReq{FileId: fileId}
|
||||
@@ -929,8 +931,8 @@ func (c *Client) FetchSpans(
|
||||
|
||||
type fileReader struct {
|
||||
client *Client
|
||||
log *lib.Logger
|
||||
bufPool *lib.BufPool
|
||||
log *log.Logger
|
||||
bufPool *bufpool.BufPool
|
||||
fileId msgs.InodeId
|
||||
fileSize int64 // if -1, we haven't initialized this yet
|
||||
blockServices []msgs.BlockService
|
||||
@@ -985,8 +987,8 @@ func (f *fileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
}
|
||||
|
||||
func (c *Client) ReadFile(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
id msgs.InodeId,
|
||||
) (io.ReadSeekCloser, error) {
|
||||
blockServices, spans, err := c.FetchSpans(log, id)
|
||||
@@ -1006,15 +1008,15 @@ func (c *Client) ReadFile(
|
||||
}
|
||||
|
||||
func (c *Client) FetchFile(
|
||||
log *lib.Logger,
|
||||
bufPool *lib.BufPool,
|
||||
log *log.Logger,
|
||||
bufPool *bufpool.BufPool,
|
||||
id msgs.InodeId,
|
||||
) (*lib.Buf, error) {
|
||||
) (*bufpool.Buf, error) {
|
||||
blockServices, spans, err := c.FetchSpans(log, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bufs := make([]*lib.Buf, len(spans))
|
||||
bufs := make([]*bufpool.Buf, len(spans))
|
||||
defer func() {
|
||||
for _, b := range bufs {
|
||||
if b != nil {
|
||||
|
||||
@@ -3,11 +3,11 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
func WaitForBlockServices(ll *lib.Logger, shuckleAddress string, expectedBlockServices int, waitCurrentServicesCalcuation bool, timeout time.Duration) []msgs.BlockServiceDeprecatedInfo {
|
||||
func WaitForBlockServices(ll *log.Logger, shuckleAddress string, expectedBlockServices int, waitCurrentServicesCalcuation bool, timeout time.Duration) []msgs.BlockServiceDeprecatedInfo {
|
||||
var err error
|
||||
for {
|
||||
var resp msgs.ShuckleResponse
|
||||
@@ -37,7 +37,7 @@ func WaitForBlockServices(ll *lib.Logger, shuckleAddress string, expectedBlockSe
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForShuckle(ll *lib.Logger, shuckleAddress string, timeout time.Duration) error {
|
||||
func WaitForShuckle(ll *log.Logger, shuckleAddress string, timeout time.Duration) error {
|
||||
t0 := time.Now()
|
||||
for {
|
||||
_, err := ShuckleRequest(ll, nil, shuckleAddress, &msgs.InfoReq{})
|
||||
@@ -52,7 +52,7 @@ func WaitForShuckle(ll *lib.Logger, shuckleAddress string, timeout time.Duration
|
||||
}
|
||||
|
||||
// getting a client implies having all shards and cdc.
|
||||
func WaitForClient(log *lib.Logger, shuckleAddress string, timeout time.Duration) {
|
||||
func WaitForClient(log *log.Logger, shuckleAddress string, timeout time.Duration) {
|
||||
t0 := time.Now()
|
||||
var err error
|
||||
var client *Client
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package flags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package flags
|
||||
|
||||
import "fmt"
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
package lib
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
func BlockServiceIdFromKey(secretKey [16]byte) msgs.BlockServiceId {
|
||||
// we don't really care about leaking part or all of the key -- the whole key business is
|
||||
// to defend against bugs, not malicious agents.
|
||||
//
|
||||
// That said, we prefer to encode the knowledge of how to generate block service ids once,
|
||||
// in the block service.
|
||||
//
|
||||
// Also, we remove the highest bit for the sake of SQLite, at least for now
|
||||
return msgs.BlockServiceId(binary.LittleEndian.Uint64(secretKey[:8]) & uint64(0x7FFFFFFFFFFFFFFF))
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -1,6 +1,6 @@
|
||||
// See <https://docs.influxdata.com/influxdb/v2/reference/syntax/line-protocol/>
|
||||
// for docs.
|
||||
package lib
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package recover
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"xtx/ternfs/log"
|
||||
)
|
||||
|
||||
var stacktraceLock sync.Mutex
|
||||
|
||||
func HandleRecoverChan(log *Logger, terminateChan chan any, err any) {
|
||||
func HandleRecoverChan(log *log.Logger, terminateChan chan any, err any) {
|
||||
if err != nil {
|
||||
log.RaiseAlert(fmt.Sprintf("caught stray error: %v", err))
|
||||
stacktraceLock.Lock()
|
||||
@@ -23,7 +24,7 @@ func HandleRecoverChan(log *Logger, terminateChan chan any, err any) {
|
||||
}
|
||||
}
|
||||
|
||||
func HandleRecoverPanic(log *Logger, err any) {
|
||||
func HandleRecoverPanic(log *log.Logger, err any) {
|
||||
if err != nil {
|
||||
log.RaiseAlert(fmt.Sprintf("caught stray error: %v", err))
|
||||
stacktraceLock.Lock()
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -119,7 +119,7 @@ func closeOut(out io.Writer) {
|
||||
|
||||
type ManagedProcessId uint64
|
||||
|
||||
func (procs *ManagedProcesses) Start(ll *lib.Logger, args *ManagedProcessArgs) ManagedProcessId {
|
||||
func (procs *ManagedProcesses) Start(ll *log.Logger, args *ManagedProcessArgs) ManagedProcessId {
|
||||
exitedChan := make(chan struct{}, 1)
|
||||
|
||||
procs.mu.Lock()
|
||||
@@ -269,7 +269,7 @@ type BlockServiceOpts struct {
|
||||
FailureDomain string
|
||||
Location msgs.Location
|
||||
FutureCutoff *time.Duration
|
||||
LogLevel lib.LogLevel
|
||||
LogLevel log.LogLevel
|
||||
ShuckleAddress string
|
||||
Profile bool
|
||||
Xmon string
|
||||
@@ -284,7 +284,7 @@ func createDataDir(dir string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (procs *ManagedProcesses) StartBlockService(ll *lib.Logger, opts *BlockServiceOpts) ManagedProcessId {
|
||||
func (procs *ManagedProcesses) StartBlockService(ll *log.Logger, opts *BlockServiceOpts) ManagedProcessId {
|
||||
createDataDir(opts.Path)
|
||||
args := []string{
|
||||
"-failure-domain", opts.FailureDomain,
|
||||
@@ -298,10 +298,10 @@ func (procs *ManagedProcesses) StartBlockService(ll *lib.Logger, opts *BlockServ
|
||||
if opts.FutureCutoff != nil {
|
||||
args = append(args, "-future-cutoff", opts.FutureCutoff.String())
|
||||
}
|
||||
if opts.LogLevel == lib.DEBUG {
|
||||
if opts.LogLevel == log.DEBUG {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
if opts.LogLevel == lib.TRACE {
|
||||
if opts.LogLevel == log.TRACE {
|
||||
args = append(args, "-trace")
|
||||
}
|
||||
if opts.ShuckleAddress != "" {
|
||||
@@ -332,7 +332,7 @@ func (procs *ManagedProcesses) StartBlockService(ll *lib.Logger, opts *BlockServ
|
||||
type FuseOpts struct {
|
||||
Exe string
|
||||
Path string
|
||||
LogLevel lib.LogLevel
|
||||
LogLevel log.LogLevel
|
||||
Wait bool
|
||||
ShuckleAddress string
|
||||
Profile bool
|
||||
@@ -340,7 +340,7 @@ type FuseOpts struct {
|
||||
InitialCDCTimeout time.Duration
|
||||
}
|
||||
|
||||
func (procs *ManagedProcesses) StartFuse(ll *lib.Logger, opts *FuseOpts) string {
|
||||
func (procs *ManagedProcesses) StartFuse(ll *log.Logger, opts *FuseOpts) string {
|
||||
createDataDir(opts.Path)
|
||||
mountPoint := path.Join(opts.Path, "mnt")
|
||||
createDataDir(mountPoint)
|
||||
@@ -354,10 +354,10 @@ func (procs *ManagedProcesses) StartFuse(ll *lib.Logger, opts *FuseOpts) string
|
||||
signalChan = make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGUSR1)
|
||||
}
|
||||
if opts.LogLevel == lib.DEBUG {
|
||||
if opts.LogLevel == log.DEBUG {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
if opts.LogLevel == lib.TRACE {
|
||||
if opts.LogLevel == log.TRACE {
|
||||
args = append(args, "-trace")
|
||||
}
|
||||
if opts.Profile {
|
||||
@@ -389,7 +389,7 @@ func (procs *ManagedProcesses) StartFuse(ll *lib.Logger, opts *FuseOpts) string
|
||||
type ShuckleOpts struct {
|
||||
Exe string
|
||||
Dir string
|
||||
LogLevel lib.LogLevel
|
||||
LogLevel log.LogLevel
|
||||
HttpPort uint16
|
||||
Stale time.Duration
|
||||
Xmon string
|
||||
@@ -398,7 +398,7 @@ type ShuckleOpts struct {
|
||||
Addr2 string
|
||||
}
|
||||
|
||||
func (procs *ManagedProcesses) StartShuckle(ll *lib.Logger, opts *ShuckleOpts) {
|
||||
func (procs *ManagedProcesses) StartShuckle(ll *log.Logger, opts *ShuckleOpts) {
|
||||
createDataDir(opts.Dir)
|
||||
args := []string{
|
||||
"-http-port", fmt.Sprintf("%d", opts.HttpPort),
|
||||
@@ -406,10 +406,10 @@ func (procs *ManagedProcesses) StartShuckle(ll *lib.Logger, opts *ShuckleOpts) {
|
||||
"-data-dir", opts.Dir,
|
||||
"-addr", opts.Addr1,
|
||||
}
|
||||
if opts.LogLevel == lib.DEBUG {
|
||||
if opts.LogLevel == log.DEBUG {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
if opts.LogLevel == lib.TRACE {
|
||||
if opts.LogLevel == log.TRACE {
|
||||
args = append(args, "-trace")
|
||||
}
|
||||
if opts.Stale != 0 {
|
||||
@@ -437,7 +437,7 @@ func (procs *ManagedProcesses) StartShuckle(ll *lib.Logger, opts *ShuckleOpts) {
|
||||
type ShuckleProxyOpts struct {
|
||||
Exe string
|
||||
Dir string
|
||||
LogLevel lib.LogLevel
|
||||
LogLevel log.LogLevel
|
||||
Xmon string
|
||||
Addr1 string
|
||||
Addr2 string
|
||||
@@ -445,7 +445,7 @@ type ShuckleProxyOpts struct {
|
||||
Location msgs.Location
|
||||
}
|
||||
|
||||
func (procs *ManagedProcesses) StartShuckleProxy(ll *lib.Logger, opts *ShuckleProxyOpts) {
|
||||
func (procs *ManagedProcesses) StartShuckleProxy(ll *log.Logger, opts *ShuckleProxyOpts) {
|
||||
createDataDir(opts.Dir)
|
||||
args := []string{
|
||||
"-log-file", path.Join(opts.Dir, "log"),
|
||||
@@ -453,10 +453,10 @@ func (procs *ManagedProcesses) StartShuckleProxy(ll *lib.Logger, opts *ShucklePr
|
||||
"-shuckle-address", opts.ShuckleAddress,
|
||||
"-location", fmt.Sprintf("%d",opts.Location),
|
||||
}
|
||||
if opts.LogLevel == lib.DEBUG {
|
||||
if opts.LogLevel == log.DEBUG {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
if opts.LogLevel == lib.TRACE {
|
||||
if opts.LogLevel == log.TRACE {
|
||||
args = append(args, "-trace")
|
||||
}
|
||||
if opts.Xmon != "" {
|
||||
@@ -482,7 +482,7 @@ type GoExes struct {
|
||||
ShuckleProxyExe string
|
||||
}
|
||||
|
||||
func BuildGoExes(ll *lib.Logger, repoDir string, race bool) *GoExes {
|
||||
func BuildGoExes(ll *log.Logger, repoDir string, race bool) *GoExes {
|
||||
args := []string{"ternshuckle", "ternblocks", "ternfuse"}
|
||||
if race {
|
||||
args = append(args, "--race")
|
||||
@@ -507,7 +507,7 @@ type ShardOpts struct {
|
||||
Exe string
|
||||
Dir string
|
||||
Shrid msgs.ShardReplicaId
|
||||
LogLevel lib.LogLevel
|
||||
LogLevel log.LogLevel
|
||||
Valgrind bool
|
||||
Perf bool
|
||||
OutgoingPacketDrop float64
|
||||
@@ -520,7 +520,7 @@ type ShardOpts struct {
|
||||
LogsDBFlags []string
|
||||
}
|
||||
|
||||
func (procs *ManagedProcesses) StartShard(ll *lib.Logger, repoDir string, opts *ShardOpts) {
|
||||
func (procs *ManagedProcesses) StartShard(ll *log.Logger, repoDir string, opts *ShardOpts) {
|
||||
if opts.Valgrind && opts.Perf {
|
||||
panic(fmt.Errorf("cannot do valgrind and perf together"))
|
||||
}
|
||||
@@ -544,13 +544,13 @@ func (procs *ManagedProcesses) StartShard(ll *lib.Logger, repoDir string, opts *
|
||||
args = append(args, opts.LogsDBFlags...)
|
||||
}
|
||||
switch opts.LogLevel {
|
||||
case lib.TRACE:
|
||||
case log.TRACE:
|
||||
args = append(args, "-log-level", "trace")
|
||||
case lib.DEBUG:
|
||||
case log.DEBUG:
|
||||
args = append(args, "-log-level", "debug")
|
||||
case lib.INFO:
|
||||
case log.INFO:
|
||||
args = append(args, "-log-level", "info")
|
||||
case lib.ERROR:
|
||||
case log.ERROR:
|
||||
args = append(args, "-log-level", "error")
|
||||
}
|
||||
args = append(args,
|
||||
@@ -604,7 +604,7 @@ type CDCOpts struct {
|
||||
Exe string
|
||||
Dir string
|
||||
ReplicaId msgs.ReplicaId
|
||||
LogLevel lib.LogLevel
|
||||
LogLevel log.LogLevel
|
||||
Valgrind bool
|
||||
Perf bool
|
||||
ShuckleAddress string
|
||||
@@ -615,7 +615,7 @@ type CDCOpts struct {
|
||||
LogsDBFlags []string
|
||||
}
|
||||
|
||||
func (procs *ManagedProcesses) StartCDC(ll *lib.Logger, repoDir string, opts *CDCOpts) {
|
||||
func (procs *ManagedProcesses) StartCDC(ll *log.Logger, repoDir string, opts *CDCOpts) {
|
||||
if opts.Valgrind && opts.Perf {
|
||||
panic(fmt.Errorf("cannot do valgrind and perf together"))
|
||||
}
|
||||
@@ -638,13 +638,13 @@ func (procs *ManagedProcesses) StartCDC(ll *lib.Logger, repoDir string, opts *CD
|
||||
args = append(args, opts.LogsDBFlags...)
|
||||
}
|
||||
switch opts.LogLevel {
|
||||
case lib.TRACE:
|
||||
case log.TRACE:
|
||||
args = append(args, "-log-level", "trace")
|
||||
case lib.DEBUG:
|
||||
case log.DEBUG:
|
||||
args = append(args, "-log-level", "debug")
|
||||
case lib.INFO:
|
||||
case log.INFO:
|
||||
args = append(args, "-log-level", "info")
|
||||
case lib.ERROR:
|
||||
case log.ERROR:
|
||||
args = append(args, "-log-level", "error")
|
||||
}
|
||||
args = append(args, opts.Dir, fmt.Sprintf("%d", int(opts.ReplicaId)))
|
||||
@@ -695,7 +695,7 @@ type BuildCppOpts struct {
|
||||
}
|
||||
|
||||
// Returns build dir
|
||||
func buildCpp(ll *lib.Logger, repoDir string, buildType string, targets []string) string {
|
||||
func buildCpp(ll *log.Logger, repoDir string, buildType string, targets []string) string {
|
||||
cppDir := cppDir(repoDir)
|
||||
buildArgs := append([]string{buildType}, targets...)
|
||||
buildCmd := exec.Command("./build.py", buildArgs...)
|
||||
@@ -715,7 +715,7 @@ type CppExes struct {
|
||||
DBToolsExe string
|
||||
}
|
||||
|
||||
func BuildCppExes(ll *lib.Logger, repoDir string, buildType string) *CppExes {
|
||||
func BuildCppExes(ll *log.Logger, repoDir string, buildType string) *CppExes {
|
||||
buildDir := buildCpp(ll, repoDir, buildType, []string{"shard/ternshard", "cdc/terncdc", "dbtools/terndbtools"})
|
||||
return &CppExes{
|
||||
ShardExe: path.Join(buildDir, "shard/ternshard"),
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestComputeParity(t *testing.T) {
|
||||
for i := range blocks {
|
||||
blocks[i] = data[i*blockSize : (i+1)*blockSize]
|
||||
}
|
||||
rs := Get(MkParity(uint8(numData), uint8(numParity)))
|
||||
rs := Get(parity.MkParity(uint8(numData), uint8(numParity)))
|
||||
rs.ComputeParityInto(blocks[:numData], blocks[numData:])
|
||||
// Verify that the first parity block is the XOR of all the data blocks
|
||||
expectedParity0 := make([]byte, blockSize)
|
||||
|
||||
13
go/s3/s3.go
13
go/s3/s3.go
@@ -14,8 +14,9 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -151,18 +152,18 @@ func parseObjectPath(str string) *objectPath {
|
||||
|
||||
// --- Handler Implementation ---
|
||||
type S3Server struct {
|
||||
log *lib.Logger
|
||||
log *log.Logger
|
||||
c *client.Client
|
||||
bufPool *lib.BufPool
|
||||
bufPool *bufpool.BufPool
|
||||
dirInfoCache *client.DirInfoCache
|
||||
bucketPaths map[string]string
|
||||
virtualHost string
|
||||
}
|
||||
|
||||
func NewS3Server(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
client *client.Client,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
buckets map[string]string, // mapping from bucket to paths
|
||||
virtualHost string, // if not present, path-style bucket resolution will be used
|
||||
@@ -879,7 +880,7 @@ func (s *S3Server) handleDeleteObjects(ctx context.Context, w http.ResponseWrite
|
||||
}
|
||||
|
||||
// lookupPath is a helper to resolve a full path from the filesystem root to an InodeId.
|
||||
func lookupPath(ctx context.Context, log *lib.Logger, c *client.Client, path *objectPath) (msgs.InodeId, error) {
|
||||
func lookupPath(ctx context.Context, log *log.Logger, c *client.Client, path *objectPath) (msgs.InodeId, error) {
|
||||
current := msgs.ROOT_DIR_INODE_ID
|
||||
for _, segment := range path.segments {
|
||||
lookupResp := &msgs.LookupResp{}
|
||||
|
||||
@@ -27,11 +27,16 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/cbcmac"
|
||||
"xtx/ternfs/certificate"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/crc32c"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/flags"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/timing"
|
||||
"xtx/ternfs/wyhash"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -117,9 +122,9 @@ type blockServiceStats struct {
|
||||
blockConversionDiscarded uint64
|
||||
}
|
||||
type env struct {
|
||||
bufPool *lib.BufPool
|
||||
bufPool *bufpool.BufPool
|
||||
stats map[msgs.BlockServiceId]*blockServiceStats
|
||||
counters map[msgs.BlocksMessageKind]*lib.Timings
|
||||
counters map[msgs.BlocksMessageKind]*timing.Timings
|
||||
eraseLocks map[msgs.BlockServiceId]*sync.Mutex
|
||||
shuckleConn *client.ShuckleConn
|
||||
failureDomain string
|
||||
@@ -135,10 +140,10 @@ func BlockWriteProof(blockServiceId msgs.BlockServiceId, blockId msgs.BlockId, k
|
||||
binary.Write(buf, binary.LittleEndian, uint64(blockServiceId))
|
||||
buf.Write([]byte{'W'})
|
||||
binary.Write(buf, binary.LittleEndian, uint64(blockId))
|
||||
return lib.CBCMAC(key, buf.Bytes())
|
||||
return cbcmac.CBCMAC(key, buf.Bytes())
|
||||
}
|
||||
|
||||
func raiseAlertAndHardwareEvent(logger *lib.Logger, hostname string, blockServiceId string, msg string) {
|
||||
func raiseAlertAndHardwareEvent(logger *log.Logger, hostname string, blockServiceId string, msg string) {
|
||||
logger.RaiseHardwareEvent(hostname, blockServiceId, msg)
|
||||
}
|
||||
|
||||
@@ -171,7 +176,7 @@ func countBlocks(basePath string) (uint64, error) {
|
||||
}
|
||||
|
||||
func updateBlockServiceInfoCapacity(
|
||||
_ *lib.Logger,
|
||||
_ *log.Logger,
|
||||
blockService *blockService,
|
||||
reservedStorage uint64,
|
||||
) error {
|
||||
@@ -199,7 +204,7 @@ func updateBlockServiceInfoCapacity(
|
||||
|
||||
// either updates `blockService`, or returns an error.
|
||||
func updateBlockServiceInfoBlocks(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
blockService *blockService,
|
||||
) error {
|
||||
t := time.Now()
|
||||
@@ -216,7 +221,7 @@ func updateBlockServiceInfoBlocks(
|
||||
|
||||
func initBlockServicesInfo(
|
||||
env *env,
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
locationId msgs.Location,
|
||||
addrs msgs.AddrsInfo,
|
||||
failureDomain [16]byte,
|
||||
@@ -264,7 +269,7 @@ var maximumRegisterInterval time.Duration = minimumRegisterInterval * 2
|
||||
var variantRegisterInterval time.Duration = maximumRegisterInterval - minimumRegisterInterval
|
||||
|
||||
func registerPeriodically(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
blockServices map[msgs.BlockServiceId]*blockService,
|
||||
env *env,
|
||||
) {
|
||||
@@ -293,7 +298,7 @@ func registerPeriodically(
|
||||
}
|
||||
|
||||
func updateBlockServiceInfoBlocksForever(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
blockServices map[msgs.BlockServiceId]*blockService,
|
||||
) {
|
||||
for {
|
||||
@@ -311,7 +316,7 @@ func updateBlockServiceInfoBlocksForever(
|
||||
}
|
||||
|
||||
func updateBlockServiceInfoCapacityForever(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
blockServices map[msgs.BlockServiceId]*blockService,
|
||||
reservedStorage uint64,
|
||||
) {
|
||||
@@ -329,7 +334,7 @@ func updateBlockServiceInfoCapacityForever(
|
||||
}
|
||||
}
|
||||
|
||||
func checkEraseCertificate(log *lib.Logger, blockServiceId msgs.BlockServiceId, cipher cipher.Block, req *msgs.EraseBlockReq) error {
|
||||
func checkEraseCertificate(log *log.Logger, blockServiceId msgs.BlockServiceId, cipher cipher.Block, req *msgs.EraseBlockReq) error {
|
||||
expectedMac, good := certificate.CheckBlockEraseCertificate(blockServiceId, cipher, req)
|
||||
if !good {
|
||||
log.RaiseAlert("bad MAC, got %v, expected %v", req.Certificate, expectedMac)
|
||||
@@ -338,7 +343,7 @@ func checkEraseCertificate(log *lib.Logger, blockServiceId msgs.BlockServiceId,
|
||||
return nil
|
||||
}
|
||||
|
||||
func eraseBlock(log *lib.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, blockId msgs.BlockId) error {
|
||||
func eraseBlock(log *log.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, blockId msgs.BlockId) error {
|
||||
m := env.eraseLocks[blockServiceId]
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
@@ -352,7 +357,7 @@ func eraseBlock(log *lib.Logger, env *env, blockServiceId msgs.BlockServiceId, b
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeBlocksResponse(log *lib.Logger, w io.Writer, resp msgs.BlocksResponse) error {
|
||||
func writeBlocksResponse(log *log.Logger, w io.Writer, resp msgs.BlocksResponse) error {
|
||||
log.Trace("writing response %T %+v", resp, resp)
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
if err := binary.Write(buf, binary.LittleEndian, msgs.BLOCKS_RESP_PROTOCOL_VERSION); err != nil {
|
||||
@@ -370,7 +375,7 @@ func writeBlocksResponse(log *lib.Logger, w io.Writer, resp msgs.BlocksResponse)
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeBlocksResponseError(log *lib.Logger, w io.Writer, err msgs.TernError) error {
|
||||
func writeBlocksResponseError(log *log.Logger, w io.Writer, err msgs.TernError) error {
|
||||
log.Debug("writing blocks error %v", err)
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
if err := binary.Write(buf, binary.LittleEndian, msgs.BLOCKS_RESP_PROTOCOL_VERSION); err != nil {
|
||||
@@ -389,7 +394,7 @@ func writeBlocksResponseError(log *lib.Logger, w io.Writer, err msgs.TernError)
|
||||
}
|
||||
|
||||
type newToOldReadConverter struct {
|
||||
log *lib.Logger
|
||||
log *log.Logger
|
||||
r io.Reader
|
||||
b []byte
|
||||
totalRead int
|
||||
@@ -441,7 +446,7 @@ func (c *newToOldReadConverter) Read(p []byte) (int, error) {
|
||||
return read, nil
|
||||
}
|
||||
|
||||
func sendFetchBlock(log *lib.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, blockId msgs.BlockId, offset uint32, count uint32, conn *net.TCPConn, withCrc bool, fileId msgs.InodeId) error {
|
||||
func sendFetchBlock(log *log.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, blockId msgs.BlockId, offset uint32, count uint32, conn *net.TCPConn, withCrc bool, fileId msgs.InodeId) error {
|
||||
if offset%msgs.TERN_PAGE_SIZE != 0 {
|
||||
log.RaiseAlert("trying to read from offset other than page boundary")
|
||||
return msgs.BLOCK_FETCH_OUT_OF_BOUNDS
|
||||
@@ -569,7 +574,7 @@ func getPhysicalBlockSize(path string) (int, error) {
|
||||
return int(fs.Bsize), nil
|
||||
}
|
||||
|
||||
func checkBlock(log *lib.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, blockId msgs.BlockId, expectedSize uint32, crc msgs.Crc, conn *net.TCPConn) error {
|
||||
func checkBlock(log *log.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, blockId msgs.BlockId, expectedSize uint32, crc msgs.Crc, conn *net.TCPConn) error {
|
||||
blockPath := path.Join(basePath, blockId.Path())
|
||||
log.Debug("checking block id %v at path %v", blockId, blockPath)
|
||||
|
||||
@@ -632,7 +637,7 @@ func checkBlock(log *lib.Logger, env *env, blockServiceId msgs.BlockServiceId, b
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkWriteCertificate(log *lib.Logger, cipher cipher.Block, blockServiceId msgs.BlockServiceId, req *msgs.WriteBlockReq) error {
|
||||
func checkWriteCertificate(log *log.Logger, cipher cipher.Block, blockServiceId msgs.BlockServiceId, req *msgs.WriteBlockReq) error {
|
||||
expectedMac, good := certificate.CheckBlockWriteCertificate(cipher, blockServiceId, req)
|
||||
if !good {
|
||||
log.Debug("mac computed for %v %v %v %v", blockServiceId, req.BlockId, req.Crc, req.Size)
|
||||
@@ -642,7 +647,7 @@ func checkWriteCertificate(log *lib.Logger, cipher cipher.Block, blockServiceId
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToBuf(log *lib.Logger, env *env, reader io.Reader, size int64) (*lib.Buf, error) {
|
||||
func writeToBuf(log *log.Logger, env *env, reader io.Reader, size int64) (*bufpool.Buf, error) {
|
||||
readBufPtr := env.bufPool.Get(1 << 20)
|
||||
defer env.bufPool.Put(readBufPtr)
|
||||
readBuffer := readBufPtr.Bytes()
|
||||
@@ -707,7 +712,7 @@ func writeToBuf(log *lib.Logger, env *env, reader io.Reader, size int64) (*lib.B
|
||||
}
|
||||
|
||||
func writeBlockInternal(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
env *env,
|
||||
reader io.LimitedReader,
|
||||
blockServiceId msgs.BlockServiceId,
|
||||
@@ -743,7 +748,7 @@ func writeBlockInternal(
|
||||
}
|
||||
|
||||
func writeBlock(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
env *env,
|
||||
blockServiceId msgs.BlockServiceId, cipher cipher.Block, basePath string,
|
||||
blockId msgs.BlockId, expectedCrc msgs.Crc, size uint32, conn *net.TCPConn,
|
||||
@@ -765,7 +770,7 @@ func writeBlock(
|
||||
}
|
||||
|
||||
func testWrite(
|
||||
log *lib.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, size uint64, conn *net.TCPConn,
|
||||
log *log.Logger, env *env, blockServiceId msgs.BlockServiceId, basePath string, size uint64, conn *net.TCPConn,
|
||||
) error {
|
||||
filePath := path.Join(basePath, fmt.Sprintf("tmp.test-write%d", rand.Int63()))
|
||||
defer os.Remove(filePath)
|
||||
@@ -788,7 +793,7 @@ const MAX_OBJECT_SIZE uint32 = 100 << 20
|
||||
|
||||
// The bool is whether we should keep going
|
||||
func handleRequestError(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
blockServices map[msgs.BlockServiceId]*blockService,
|
||||
deadBlockServices map[msgs.BlockServiceId]deadBlockService,
|
||||
conn *net.TCPConn,
|
||||
@@ -892,7 +897,7 @@ func handleRequestError(
|
||||
type deadBlockService struct{}
|
||||
|
||||
func readBlocksRequest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
r io.Reader,
|
||||
) (msgs.BlockServiceId, msgs.BlocksRequest, error) {
|
||||
var protocol uint32
|
||||
@@ -938,7 +943,7 @@ func readBlocksRequest(
|
||||
|
||||
// The bool tells us whether we should keep going
|
||||
func handleSingleRequest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
env *env,
|
||||
_ chan any,
|
||||
lastError *error,
|
||||
@@ -1052,7 +1057,7 @@ func handleSingleRequest(
|
||||
}
|
||||
|
||||
func handleRequest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
env *env,
|
||||
terminateChan chan any,
|
||||
blockServices map[msgs.BlockServiceId]*blockService,
|
||||
@@ -1089,7 +1094,7 @@ Options:`
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func retrieveOrCreateKey(log *lib.Logger, dir string) [16]byte {
|
||||
func retrieveOrCreateKey(log *log.Logger, dir string) [16]byte {
|
||||
var err error
|
||||
var keyFile *os.File
|
||||
keyFilePath := path.Join(dir, "secret.key")
|
||||
@@ -1147,7 +1152,7 @@ type diskStats struct {
|
||||
weightedIoMs uint64
|
||||
}
|
||||
|
||||
func getDiskStats(log *lib.Logger, statsPath string) (map[string]diskStats, error) {
|
||||
func getDiskStats(log *log.Logger, statsPath string) (map[string]diskStats, error) {
|
||||
file, err := os.Open(statsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1187,7 +1192,7 @@ func getDiskStats(log *lib.Logger, statsPath string) (map[string]diskStats, erro
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func raiseAlerts(log *lib.Logger, env *env, blockServices map[msgs.BlockServiceId]*blockService) {
|
||||
func raiseAlerts(log *log.Logger, env *env, blockServices map[msgs.BlockServiceId]*blockService) {
|
||||
for {
|
||||
for bsId, bs := range blockServices {
|
||||
ioErrors := bs.lastIoErrors
|
||||
@@ -1208,17 +1213,17 @@ func raiseAlerts(log *lib.Logger, env *env, blockServices map[msgs.BlockServiceI
|
||||
}
|
||||
}
|
||||
|
||||
func sendMetrics(log *lib.Logger, env *env, influxDB *lib.InfluxDB, blockServices map[msgs.BlockServiceId]*blockService, failureDomain string) {
|
||||
metrics := lib.MetricsBuilder{}
|
||||
func sendMetrics(l *log.Logger, env *env, influxDB *log.InfluxDB, blockServices map[msgs.BlockServiceId]*blockService, failureDomain string) {
|
||||
metrics := log.MetricsBuilder{}
|
||||
rand := wyhash.New(mrand.Uint64())
|
||||
alert := log.NewNCAlert(10 * time.Second)
|
||||
alert := l.NewNCAlert(10 * time.Second)
|
||||
failureDomainEscaped := strings.ReplaceAll(failureDomain, " ", "-")
|
||||
for {
|
||||
diskMetrics, err := getDiskStats(log, "/proc/diskstats")
|
||||
diskMetrics, err := getDiskStats(l, "/proc/diskstats")
|
||||
if err != nil {
|
||||
log.RaiseAlert("failed reading diskstats: %v", err)
|
||||
l.RaiseAlert("failed reading diskstats: %v", err)
|
||||
}
|
||||
log.Info("sending metrics")
|
||||
l.Info("sending metrics")
|
||||
metrics.Reset()
|
||||
now := time.Now()
|
||||
for bsId, bsStats := range env.stats {
|
||||
@@ -1273,12 +1278,12 @@ func sendMetrics(log *lib.Logger, env *env, influxDB *lib.InfluxDB, blockService
|
||||
}
|
||||
err = influxDB.SendMetrics(metrics.Payload())
|
||||
if err == nil {
|
||||
log.ClearNC(alert)
|
||||
l.ClearNC(alert)
|
||||
sleepFor := time.Minute + time.Duration(rand.Uint64() & ^(uint64(1)<<63))%time.Minute
|
||||
log.Info("metrics sent, sleeping for %v", sleepFor)
|
||||
l.Info("metrics sent, sleeping for %v", sleepFor)
|
||||
time.Sleep(sleepFor)
|
||||
} else {
|
||||
log.RaiseNC(alert, "failed to send metrics, will try again in a second: %v", err)
|
||||
l.RaiseNC(alert, "failed to send metrics, will try again in a second: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
@@ -1292,17 +1297,17 @@ type blockService struct {
|
||||
storageClass msgs.StorageClass
|
||||
cachedInfo msgs.RegisterBlockServiceInfo
|
||||
couldNotUpdateInfoBlocks bool
|
||||
couldNotUpdateInfoBlocksAlert lib.XmonNCAlert
|
||||
couldNotUpdateInfoBlocksAlert log.XmonNCAlert
|
||||
couldNotUpdateInfoCapacity bool
|
||||
couldNotUpdateInfoCapacityAlert lib.XmonNCAlert
|
||||
ioErrorsAlert lib.XmonNCAlert
|
||||
couldNotUpdateInfoCapacityAlert log.XmonNCAlert
|
||||
ioErrorsAlert log.XmonNCAlert
|
||||
ioErrors uint64
|
||||
requests uint64
|
||||
lastIoErrors uint64
|
||||
lastRequests uint64
|
||||
}
|
||||
|
||||
func getMountsInfo(log *lib.Logger, mountsPath string) (map[string]string, error) {
|
||||
func getMountsInfo(log *log.Logger, mountsPath string) (map[string]string, error) {
|
||||
file, err := os.Open(mountsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1332,7 +1337,7 @@ func main() {
|
||||
pathPrefixStr := flag.String("path-prefix", "", "We filter our block service not only by failure domain but also by path prefix")
|
||||
|
||||
futureCutoff := flag.Duration("future-cutoff", DEFAULT_FUTURE_CUTOFF, "")
|
||||
var addresses lib.StringArrayFlags
|
||||
var addresses flags.StringArrayFlags
|
||||
flag.Var(&addresses, "addr", "Addresses (up to two) to bind to, and that will be advertised to shuckle.")
|
||||
verbose := flag.Bool("verbose", false, "")
|
||||
xmon := flag.String("xmon", "", "Xmon address (empty for no xmon)")
|
||||
@@ -1396,7 +1401,7 @@ func main() {
|
||||
flagErrors = true
|
||||
}
|
||||
|
||||
var influxDB *lib.InfluxDB
|
||||
var influxDB *log.InfluxDB
|
||||
if *influxDBOrigin == "" {
|
||||
if *influxDBOrg != "" || *influxDBBucket != "" {
|
||||
fmt.Fprintf(os.Stderr, "Either all or none of the -influx-db flags must be passed\n")
|
||||
@@ -1407,7 +1412,7 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Either all or none of the -influx-db flags must be passed\n")
|
||||
flagErrors = true
|
||||
}
|
||||
influxDB = &lib.InfluxDB{
|
||||
influxDB = &log.InfluxDB{
|
||||
Origin: *influxDBOrigin,
|
||||
Org: *influxDBOrg,
|
||||
Bucket: *influxDBBucket,
|
||||
@@ -1419,14 +1424,14 @@ func main() {
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
ownIp1, port1, err := lib.ParseIPV4Addr(addresses[0])
|
||||
ownIp1, port1, err := flags.ParseIPV4Addr(addresses[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var ownIp2 [4]byte
|
||||
var port2 uint16
|
||||
if len(addresses) == 2 {
|
||||
ownIp2, port2, err = lib.ParseIPV4Addr(addresses[1])
|
||||
ownIp2, port2, err = flags.ParseIPV4Addr(addresses[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1457,14 +1462,14 @@ func main() {
|
||||
}
|
||||
defer logOut.Close()
|
||||
}
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
log := lib.NewLogger(logOut, &lib.LoggerOptions{
|
||||
l := log.NewLogger(logOut, &log.LoggerOptions{
|
||||
Level: level,
|
||||
Syslog: *syslog,
|
||||
XmonAddr: *xmon,
|
||||
@@ -1482,7 +1487,7 @@ func main() {
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
stopCpuProfile := func() {
|
||||
log.Info("stopping cpu profile")
|
||||
l.Info("stopping cpu profile")
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
defer stopCpuProfile()
|
||||
@@ -1497,20 +1502,20 @@ func main() {
|
||||
}()
|
||||
}
|
||||
|
||||
log.Info("Running block service with options:")
|
||||
log.Info(" locationId = %v", *locationId)
|
||||
log.Info(" failureDomain = %v", *failureDomainStr)
|
||||
log.Info(" pathPrefix = %v", *pathPrefixStr)
|
||||
log.Info(" futureCutoff = %v", *futureCutoff)
|
||||
log.Info(" addr = '%v'", addresses)
|
||||
log.Info(" logLevel = %v", level)
|
||||
log.Info(" logFile = '%v'", *logFile)
|
||||
log.Info(" shuckleAddress = '%v'", *shuckleAddress)
|
||||
log.Info(" connectionTimeout = %v", *connectionTimeout)
|
||||
log.Info(" reservedStorage = %v", *reservedStorage)
|
||||
log.Info(" shuckleConnectionTimeout = %v", *shuckleConnectionTimeout)
|
||||
l.Info("Running block service with options:")
|
||||
l.Info(" locationId = %v", *locationId)
|
||||
l.Info(" failureDomain = %v", *failureDomainStr)
|
||||
l.Info(" pathPrefix = %v", *pathPrefixStr)
|
||||
l.Info(" futureCutoff = %v", *futureCutoff)
|
||||
l.Info(" addr = '%v'", addresses)
|
||||
l.Info(" logLevel = %v", level)
|
||||
l.Info(" logFile = '%v'", *logFile)
|
||||
l.Info(" shuckleAddress = '%v'", *shuckleAddress)
|
||||
l.Info(" connectionTimeout = %v", *connectionTimeout)
|
||||
l.Info(" reservedStorage = %v", *reservedStorage)
|
||||
l.Info(" shuckleConnectionTimeout = %v", *shuckleConnectionTimeout)
|
||||
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
env := &env{
|
||||
bufPool: bufPool,
|
||||
stats: make(map[msgs.BlockServiceId]*blockServiceStats),
|
||||
@@ -1519,12 +1524,12 @@ func main() {
|
||||
failureDomain: *failureDomainStr,
|
||||
pathPrefix: *pathPrefixStr,
|
||||
ioAlertPercent: uint8(*ioAlertPercent),
|
||||
shuckleConn: client.MakeShuckleConn(log, nil, *shuckleAddress, 1),
|
||||
shuckleConn: client.MakeShuckleConn(l, nil, *shuckleAddress, 1),
|
||||
}
|
||||
|
||||
mountsInfo, err := getMountsInfo(log, "/proc/self/mountinfo")
|
||||
mountsInfo, err := getMountsInfo(l, "/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
log.RaiseAlert("Disk stats for mounted paths will not be collected due to failure collecting mount info: %v", err)
|
||||
l.RaiseAlert("Disk stats for mounted paths will not be collected due to failure collecting mount info: %v", err)
|
||||
}
|
||||
|
||||
blockServices := make(map[msgs.BlockServiceId]*blockService)
|
||||
@@ -1535,7 +1540,7 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Storage class cannot be EMPTY/INLINE")
|
||||
os.Exit(2)
|
||||
}
|
||||
key := retrieveOrCreateKey(log, dir)
|
||||
key := retrieveOrCreateKey(l, dir)
|
||||
id := blockServiceIdFromKey(key)
|
||||
cipher, err := aes.NewCipher(key[:])
|
||||
if err != nil {
|
||||
@@ -1551,13 +1556,13 @@ func main() {
|
||||
key: key,
|
||||
cipher: cipher,
|
||||
storageClass: storageClass,
|
||||
couldNotUpdateInfoBlocksAlert: *log.NewNCAlert(time.Second),
|
||||
couldNotUpdateInfoCapacityAlert: *log.NewNCAlert(time.Second),
|
||||
ioErrorsAlert: *log.NewNCAlert(time.Second),
|
||||
couldNotUpdateInfoBlocksAlert: *l.NewNCAlert(time.Second),
|
||||
couldNotUpdateInfoCapacityAlert: *l.NewNCAlert(time.Second),
|
||||
ioErrorsAlert: *l.NewNCAlert(time.Second),
|
||||
}
|
||||
}
|
||||
for id, blockService := range blockServices {
|
||||
log.Info("block service %v at %v, storage class %v", id, blockService.path, blockService.storageClass)
|
||||
l.Info("block service %v at %v, storage class %v", id, blockService.path, blockService.storageClass)
|
||||
}
|
||||
|
||||
if len(blockServices) != flag.NArg()/2 {
|
||||
@@ -1570,14 +1575,14 @@ func main() {
|
||||
{
|
||||
var shuckleBlockServices []msgs.BlockServiceDeprecatedInfo
|
||||
{
|
||||
alert := log.NewNCAlert(0)
|
||||
log.RaiseNC(alert, "fetching block services")
|
||||
alert := l.NewNCAlert(0)
|
||||
l.RaiseNC(alert, "fetching block services")
|
||||
|
||||
resp, err := env.shuckleConn.Request(&msgs.AllBlockServicesDeprecatedReq{})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("could not request block services from shuckle: %v", err))
|
||||
}
|
||||
log.ClearNC(alert)
|
||||
l.ClearNC(alert)
|
||||
shuckleBlockServices = resp.(*msgs.AllBlockServicesDeprecatedResp).BlockServices
|
||||
}
|
||||
for i := range shuckleBlockServices {
|
||||
@@ -1607,7 +1612,7 @@ func main() {
|
||||
}
|
||||
// we can't have a decommissioned block service
|
||||
if weHaveBs && isDecommissioned {
|
||||
log.ErrorNoAlert("We have block service %v, which is decommissioned according to shuckle. We will treat it as if it doesn't exist.", bs.Id)
|
||||
l.ErrorNoAlert("We have block service %v, which is decommissioned according to shuckle. We will treat it as if it doesn't exist.", bs.Id)
|
||||
delete(blockServices, bs.Id)
|
||||
deadBlockServices[bs.Id] = deadBlockService{}
|
||||
}
|
||||
@@ -1629,7 +1634,7 @@ func main() {
|
||||
}
|
||||
defer listener1.Close()
|
||||
|
||||
log.Info("running 1 on %v", listener1.Addr())
|
||||
l.Info("running 1 on %v", listener1.Addr())
|
||||
actualPort1 := uint16(listener1.Addr().(*net.TCPAddr).Port)
|
||||
|
||||
var listener2 net.Listener
|
||||
@@ -1641,12 +1646,12 @@ func main() {
|
||||
}
|
||||
defer listener2.Close()
|
||||
|
||||
log.Info("running 2 on %v", listener2.Addr())
|
||||
l.Info("running 2 on %v", listener2.Addr())
|
||||
actualPort2 = uint16(listener2.Addr().(*net.TCPAddr).Port)
|
||||
}
|
||||
|
||||
initBlockServicesInfo(env, log, msgs.Location(*locationId), msgs.AddrsInfo{Addr1: msgs.IpPort{Addrs: ownIp1, Port: actualPort1}, Addr2: msgs.IpPort{Addrs: ownIp2, Port: actualPort2}}, failureDomain, blockServices, *reservedStorage)
|
||||
log.Info("finished updating block service info, will now start")
|
||||
initBlockServicesInfo(env, l, msgs.Location(*locationId), msgs.AddrsInfo{Addr1: msgs.IpPort{Addrs: ownIp1, Port: actualPort1}, Addr2: msgs.IpPort{Addrs: ownIp2, Port: actualPort2}}, failureDomain, blockServices, *reservedStorage)
|
||||
l.Info("finished updating block service info, will now start")
|
||||
|
||||
terminateChan := make(chan any)
|
||||
|
||||
@@ -1658,66 +1663,66 @@ func main() {
|
||||
env.stats[bsId] = &blockServiceStats{}
|
||||
env.eraseLocks[bsId] = &sync.Mutex{}
|
||||
}
|
||||
env.counters = make(map[msgs.BlocksMessageKind]*lib.Timings)
|
||||
env.counters = make(map[msgs.BlocksMessageKind]*timing.Timings)
|
||||
for _, k := range msgs.AllBlocksMessageKind {
|
||||
env.counters[k] = lib.NewTimings(40, 100*time.Microsecond, 1.5)
|
||||
env.counters[k] = timing.NewTimings(40, 100*time.Microsecond, 1.5)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
registerPeriodically(log, blockServices, env)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
registerPeriodically(l, blockServices, env)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
updateBlockServiceInfoBlocksForever(log, blockServices)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
updateBlockServiceInfoBlocksForever(l, blockServices)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
updateBlockServiceInfoCapacityForever(log, blockServices, *reservedStorage)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
updateBlockServiceInfoCapacityForever(l, blockServices, *reservedStorage)
|
||||
}()
|
||||
|
||||
if influxDB != nil {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
sendMetrics(log, env, influxDB, blockServices, *failureDomainStr)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
sendMetrics(l, env, influxDB, blockServices, *failureDomainStr)
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
raiseAlerts(log, env, blockServices)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
raiseAlerts(l, env, blockServices)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
for {
|
||||
conn, err := listener1.Accept()
|
||||
log.Trace("new conn %+v", conn)
|
||||
l.Trace("new conn %+v", conn)
|
||||
if err != nil {
|
||||
terminateChan <- err
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
handleRequest(log, env, terminateChan, blockServices, deadBlockServices, conn.(*net.TCPConn), *futureCutoff, *connectionTimeout)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
handleRequest(l, env, terminateChan, blockServices, deadBlockServices, conn.(*net.TCPConn), *futureCutoff, *connectionTimeout)
|
||||
}()
|
||||
}
|
||||
}()
|
||||
if listener2 != nil {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
for {
|
||||
conn, err := listener2.Accept()
|
||||
log.Trace("new conn %+v", conn)
|
||||
l.Trace("new conn %+v", conn)
|
||||
if err != nil {
|
||||
terminateChan <- err
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
handleRequest(log, env, terminateChan, blockServices, deadBlockServices, conn.(*net.TCPConn), *futureCutoff, *connectionTimeout)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
handleRequest(l, env, terminateChan, blockServices, deadBlockServices, conn.(*net.TCPConn), *futureCutoff, *connectionTimeout)
|
||||
}()
|
||||
}
|
||||
}()
|
||||
@@ -1774,7 +1779,7 @@ func writeBufToTemp(statBytes *uint64, basePath string, buf []byte) (string, err
|
||||
return tmpName, err
|
||||
}
|
||||
|
||||
func verifyCrcFile(log *lib.Logger, readBuffer []byte, path string, expectedSize int64, expectedCrc msgs.Crc) error {
|
||||
func verifyCrcFile(log *log.Logger, readBuffer []byte, path string, expectedSize int64, expectedCrc msgs.Crc) error {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Debug("failed opening file %s with error: %v", path, err)
|
||||
@@ -1793,7 +1798,7 @@ func verifyCrcFile(log *lib.Logger, readBuffer []byte, path string, expectedSize
|
||||
return verifyCrcReader(log, readBuffer, f, expectedCrc)
|
||||
}
|
||||
|
||||
func verifyCrcReader(log *lib.Logger, readBuffer []byte, r io.Reader, expectedCrc msgs.Crc) error {
|
||||
func verifyCrcReader(log *log.Logger, readBuffer []byte, r io.Reader, expectedCrc msgs.Crc) error {
|
||||
cursor := uint32(0)
|
||||
remainingData := 0
|
||||
actualCrc := uint32(0)
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@ type PathResolver interface {
|
||||
}
|
||||
|
||||
// Returns a thread-safe PathResolver that can be used concurrently in multiple goroutines.
|
||||
func NewPathResolver(cl *client.Client, logger *lib.Logger) PathResolver {
|
||||
func NewPathResolver(cl *client.Client, logger *log.Logger) PathResolver {
|
||||
return &resolver{
|
||||
ternClient: cl,
|
||||
logger: logger,
|
||||
@@ -33,7 +33,7 @@ func NewPathResolver(cl *client.Client, logger *lib.Logger) PathResolver {
|
||||
|
||||
type resolver struct {
|
||||
ternClient *client.Client
|
||||
logger *lib.Logger
|
||||
logger *log.Logger
|
||||
// Mapping of inode ID to directory name. Used to avoid duplicate lookups for the same inode.
|
||||
inodeToDir map[msgs.InodeId]string
|
||||
// Used to handle concurrent access to resolver internal data.
|
||||
|
||||
@@ -21,13 +21,16 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/certificate"
|
||||
"xtx/ternfs/cleanup"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/crc32c"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/flags"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/terncli/filesamples"
|
||||
"xtx/ternfs/timing"
|
||||
)
|
||||
|
||||
type commandSpec struct {
|
||||
@@ -55,7 +58,7 @@ func usage() {
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func outputFullFileSizes(log *lib.Logger, c *client.Client) {
|
||||
func outputFullFileSizes(log *log.Logger, c *client.Client) {
|
||||
var examinedDirs uint64
|
||||
var examinedFiles uint64
|
||||
err := client.Parwalk(
|
||||
@@ -112,10 +115,10 @@ func outputFullFileSizes(log *lib.Logger, c *client.Client) {
|
||||
}
|
||||
}
|
||||
|
||||
func outputBriefFileSizes(log *lib.Logger, c *client.Client) {
|
||||
func outputBriefFileSizes(log *log.Logger, c *client.Client) {
|
||||
// histogram
|
||||
histoBins := 256
|
||||
histo := lib.NewHistogram(histoBins, 1024, 1.1)
|
||||
histo := timing.NewHistogram(histoBins, 1024, 1.1)
|
||||
var histoSizes [256][]uint64
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(256)
|
||||
@@ -182,7 +185,7 @@ func formatSize(bytes uint64) string {
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
shuckleAddress := flag.String("shuckle", "", "Shuckle address (host:port).")
|
||||
var addresses lib.StringArrayFlags
|
||||
var addresses flags.StringArrayFlags
|
||||
flag.Var(&addresses, "addr", "Local addresses (up to two) to connect from.")
|
||||
mtu := flag.String("mtu", "", "MTU to use, either an integer or \"max\"")
|
||||
shardInitialTimeout := flag.Duration("shard-initial-timeout", 0, "")
|
||||
@@ -194,13 +197,13 @@ func main() {
|
||||
verbose := flag.Bool("verbose", false, "")
|
||||
trace := flag.Bool("trace", false, "")
|
||||
|
||||
var log *lib.Logger
|
||||
var l *log.Logger
|
||||
var mbClient *client.Client
|
||||
var clientMu sync.RWMutex
|
||||
|
||||
var localAddresses msgs.AddrsInfo
|
||||
if len(addresses) > 0 {
|
||||
ownIp1, port1, err := lib.ParseIPV4Addr(addresses[0])
|
||||
ownIp1, port1, err := flags.ParseIPV4Addr(addresses[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -208,7 +211,7 @@ func main() {
|
||||
var ownIp2 [4]byte
|
||||
var port2 uint16
|
||||
if len(addresses) == 2 {
|
||||
ownIp2, port2, err = lib.ParseIPV4Addr(addresses[1])
|
||||
ownIp2, port2, err = flags.ParseIPV4Addr(addresses[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -239,7 +242,7 @@ func main() {
|
||||
panic("You need to specify -shuckle (or -prod).\n")
|
||||
}
|
||||
var err error
|
||||
c, err := client.NewClient(log, nil, *shuckleAddress, localAddresses)
|
||||
c, err := client.NewClient(l, nil, *shuckleAddress, localAddresses)
|
||||
if err != nil {
|
||||
clientMu.Unlock()
|
||||
panic(fmt.Errorf("could not create client: %v", err))
|
||||
@@ -276,8 +279,8 @@ func main() {
|
||||
}
|
||||
mbClient.SetCDCTimeouts(&cdcTimeouts)
|
||||
if printTimeouts {
|
||||
log.Info("shard timeouts: %+v", shardTimeouts)
|
||||
log.Info("CDC timeouts: %+v", cdcTimeouts)
|
||||
l.Info("shard timeouts: %+v", shardTimeouts)
|
||||
l.Info("CDC timeouts: %+v", cdcTimeouts)
|
||||
}
|
||||
clientMu.Unlock()
|
||||
return mbClient
|
||||
@@ -292,7 +295,7 @@ func main() {
|
||||
dirInfoCache := client.NewDirInfoCache()
|
||||
if *collectDirIdU64 == 0 {
|
||||
state := &cleanup.CollectDirectoriesState{}
|
||||
if err := cleanup.CollectDirectoriesInAllShards(log, getClient(), dirInfoCache, nil, &cleanup.CollectDirectoriesOpts{NumWorkersPerShard: 2, WorkersQueueSize: 100}, state, *collectDirMinEdgeAge); err != nil {
|
||||
if err := cleanup.CollectDirectoriesInAllShards(l, getClient(), dirInfoCache, nil, &cleanup.CollectDirectoriesOpts{NumWorkersPerShard: 2, WorkersQueueSize: 100}, state, *collectDirMinEdgeAge); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
@@ -301,10 +304,10 @@ func main() {
|
||||
panic(fmt.Errorf("inode id %v is not a directory", dirId))
|
||||
}
|
||||
var stats cleanup.CollectDirectoriesStats
|
||||
if err := cleanup.CollectDirectory(log, getClient(), dirInfoCache, &stats, dirId, *collectDirMinEdgeAge); err != nil {
|
||||
if err := cleanup.CollectDirectory(l, getClient(), dirInfoCache, &stats, dirId, *collectDirMinEdgeAge); err != nil {
|
||||
panic(fmt.Errorf("could not collect %v, stats: %+v, err: %v", dirId, stats, err))
|
||||
}
|
||||
log.Info("finished collecting %v, stats: %+v", dirId, stats)
|
||||
l.Info("finished collecting %v, stats: %+v", dirId, stats)
|
||||
}
|
||||
}
|
||||
commands["collect"] = commandSpec{
|
||||
@@ -321,11 +324,11 @@ func main() {
|
||||
state := &cleanup.DestructFilesState{}
|
||||
opts := &cleanup.DestructFilesOptions{NumWorkersPerShard: 10, WorkersQueueSize: 100}
|
||||
if *destrutcFileShardId < 0 {
|
||||
if err := cleanup.DestructFilesInAllShards(log, getClient(), opts, state); err != nil {
|
||||
if err := cleanup.DestructFilesInAllShards(l, getClient(), opts, state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
if err := cleanup.DestructFiles(log, getClient(), opts, state, msgs.ShardId(*destrutcFileShardId)); err != nil {
|
||||
if err := cleanup.DestructFiles(l, getClient(), opts, state, msgs.ShardId(*destrutcFileShardId)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -337,10 +340,10 @@ func main() {
|
||||
stats := cleanup.DestructFilesStats{}
|
||||
var destructFileCookie [8]byte
|
||||
binary.LittleEndian.PutUint64(destructFileCookie[:], *destructFileCookieU64)
|
||||
if err := cleanup.DestructFile(log, getClient(), &stats, fileId, 0, destructFileCookie); err != nil {
|
||||
if err := cleanup.DestructFile(l, getClient(), &stats, fileId, 0, destructFileCookie); err != nil {
|
||||
panic(fmt.Errorf("could not destruct %v, stats: %+v, err: %v", fileId, stats, err))
|
||||
}
|
||||
log.Info("finished destructing %v, stats: %+v", fileId, stats)
|
||||
l.Info("finished destructing %v, stats: %+v", fileId, stats)
|
||||
}
|
||||
}
|
||||
commands["destruct"] = commandSpec{
|
||||
@@ -368,8 +371,8 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Can't provide the same flag both in -flags and -no-flags\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
log.Info("requesting block services")
|
||||
blockServicesResp, err := client.ShuckleRequest(log, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
l.Info("requesting block services")
|
||||
blockServicesResp, err := client.ShuckleRequest(l, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -398,10 +401,10 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Invalid shard %v.\n", *migrateShard)
|
||||
os.Exit(2)
|
||||
}
|
||||
log.Info("will migrate in %v block services:", numBlockServicesToMigrate)
|
||||
l.Info("will migrate in %v block services:", numBlockServicesToMigrate)
|
||||
for failureDomain, bss := range blockServicesToMigrate {
|
||||
for _, blockServiceId := range *bss {
|
||||
log.Info("%v, %v", failureDomain, blockServiceId)
|
||||
l.Info("%v, %v", failureDomain, blockServiceId)
|
||||
}
|
||||
}
|
||||
for {
|
||||
@@ -417,30 +420,30 @@ func main() {
|
||||
}
|
||||
}
|
||||
stats := cleanup.MigrateStats{}
|
||||
progressReportAlert := log.NewNCAlert(10 * time.Second)
|
||||
progressReportAlert := l.NewNCAlert(10 * time.Second)
|
||||
for failureDomain, bss := range blockServicesToMigrate {
|
||||
for _, blockServiceId := range *bss {
|
||||
log.Info("migrating block service %v, %v", blockServiceId, failureDomain)
|
||||
l.Info("migrating block service %v, %v", blockServiceId, failureDomain)
|
||||
if *migrateFileIdU64 == 0 && *migrateShard < 0 {
|
||||
if err := cleanup.MigrateBlocksInAllShards(log, getClient(), &stats, progressReportAlert, blockServiceId); err != nil {
|
||||
if err := cleanup.MigrateBlocksInAllShards(l, getClient(), &stats, progressReportAlert, blockServiceId); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if *migrateFileIdU64 != 0 {
|
||||
fileId := msgs.InodeId(*migrateFileIdU64)
|
||||
if err := cleanup.MigrateBlocksInFile(log, getClient(), &stats, progressReportAlert, blockServiceId, fileId); err != nil {
|
||||
if err := cleanup.MigrateBlocksInFile(l, getClient(), &stats, progressReportAlert, blockServiceId, fileId); err != nil {
|
||||
panic(fmt.Errorf("error while migrating file %v away from block service %v: %v", fileId, blockServiceId, err))
|
||||
}
|
||||
} else {
|
||||
shid := msgs.ShardId(*migrateShard)
|
||||
if err := cleanup.MigrateBlocks(log, getClient(), &stats, progressReportAlert, shid, blockServiceId); err != nil {
|
||||
if err := cleanup.MigrateBlocks(l, getClient(), &stats, progressReportAlert, shid, blockServiceId); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
log.Info("finished migrating blocks away from block service %v, stats so far: %+v", blockServiceId, stats)
|
||||
l.Info("finished migrating blocks away from block service %v, stats so far: %+v", blockServiceId, stats)
|
||||
}
|
||||
}
|
||||
log.Info("finished migrating away from all block services, stats: %+v", stats)
|
||||
log.ClearNC(progressReportAlert)
|
||||
l.Info("finished migrating away from all block services, stats: %+v", stats)
|
||||
l.ClearNC(progressReportAlert)
|
||||
}
|
||||
commands["migrate"] = commandSpec{
|
||||
flags: migrateCmd,
|
||||
@@ -476,7 +479,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := getClient().ShardRequest(log, shard, req, resp); err != nil {
|
||||
if err := getClient().ShardRequest(l, shard, req, resp); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out, err := json.MarshalIndent(resp, "", " ")
|
||||
@@ -515,7 +518,7 @@ func main() {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
if err := getClient().CDCRequest(log, req, resp); err != nil {
|
||||
if err := getClient().CDCRequest(l, req, resp); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
out, err := json.MarshalIndent(resp, "", " ")
|
||||
@@ -554,7 +557,7 @@ func main() {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
if err := getClient().MergeDirectoryInfo(log, id, entry); err != nil {
|
||||
if err := getClient().MergeDirectoryInfo(l, id, entry); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -568,7 +571,7 @@ func main() {
|
||||
removeDirInfoTag := removeDirInfoCmd.String("tag", "", "One of SNAPSHOT|SPAN|BLOCK")
|
||||
removeDirInfoRun := func() {
|
||||
id := msgs.InodeId(*removeDirInfoU64)
|
||||
if err := getClient().RemoveDirectoryInfoEntry(log, id, msgs.DirInfoTagFromName(*removeDirInfoTag)); err != nil {
|
||||
if err := getClient().RemoveDirectoryInfoEntry(l, id, msgs.DirInfoTagFromName(*removeDirInfoTag)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -592,12 +595,12 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
bufPool := lib.NewBufPool()
|
||||
fileId, err := getClient().CreateFile(log, bufPool, client.NewDirInfoCache(), path, input)
|
||||
bufPool := bufpool.NewBufPool()
|
||||
fileId, err := getClient().CreateFile(l, bufPool, client.NewDirInfoCache(), path, input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Info("File created as %v", fileId)
|
||||
l.Info("File created as %v", fileId)
|
||||
}
|
||||
commands["cp-into"] = commandSpec{
|
||||
flags: cpIntoCmd,
|
||||
@@ -627,14 +630,14 @@ func main() {
|
||||
id = msgs.InodeId(*cpOutofId)
|
||||
} else {
|
||||
var err error
|
||||
id, err = getClient().ResolvePath(log, *cpOutofInput)
|
||||
id, err = getClient().ResolvePath(l, *cpOutofInput)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
bufPool := lib.NewBufPool()
|
||||
r, err := getClient().FetchFile(log, bufPool, id)
|
||||
bufPool := bufpool.NewBufPool()
|
||||
r, err := getClient().FetchFile(l, bufPool, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -653,7 +656,7 @@ func main() {
|
||||
blockReqBlockService := blockReqCmd.Uint64("bs", 0, "Block service")
|
||||
blockReqFile := blockReqCmd.String("file", "", "")
|
||||
blockReqRun := func() {
|
||||
resp, err := client.ShuckleRequest(log, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
resp, err := client.ShuckleRequest(l, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -679,7 +682,7 @@ func main() {
|
||||
Size: uint32(len(fileContents)),
|
||||
}
|
||||
req.Certificate = certificate.BlockWriteCertificate(cipher, blockServiceInfo.Id, &req)
|
||||
log.Info("request: %+v", req)
|
||||
l.Info("request: %+v", req)
|
||||
}
|
||||
commands["write-block-req"] = commandSpec{
|
||||
flags: blockReqCmd,
|
||||
@@ -690,7 +693,7 @@ func main() {
|
||||
testBlockWriteBlockService := testBlockWriteCmd.String("bs", "", "Block service. If comma-separated, they'll be written in parallel to the specified ones.")
|
||||
testBlockWriteSize := testBlockWriteCmd.Uint("size", 0, "Size (must fit in u32)")
|
||||
testBlockWriteRun := func() {
|
||||
resp, err := client.ShuckleRequest(log, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
resp, err := client.ShuckleRequest(l, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -715,7 +718,7 @@ func main() {
|
||||
}
|
||||
conns := make([]*net.TCPConn, len(bsInfos))
|
||||
for i := 0; i < len(conns); i++ {
|
||||
conn, err := client.BlockServiceConnection(log, bsInfos[i].Addrs)
|
||||
conn, err := client.BlockServiceConnection(l, bsInfos[i].Addrs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -729,7 +732,7 @@ func main() {
|
||||
conn := conns[i]
|
||||
bsId := bsInfos[i].Id
|
||||
go func() {
|
||||
thisErr := client.TestWrite(log, conn, bsId, bytes.NewReader(contents), uint64(len(contents)))
|
||||
thisErr := client.TestWrite(l, conn, bsId, bytes.NewReader(contents), uint64(len(contents)))
|
||||
if thisErr != nil {
|
||||
err = thisErr
|
||||
}
|
||||
@@ -741,7 +744,7 @@ func main() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Info("writing %v bytes to %v block services took %v (%fGB/s)", *testBlockWriteSize, len(conns), time.Since(t), (float64(*testBlockWriteSize*uint(len(conns)))/1e9)/elapsed.Seconds())
|
||||
l.Info("writing %v bytes to %v block services took %v (%fGB/s)", *testBlockWriteSize, len(conns), time.Since(t), (float64(*testBlockWriteSize*uint(len(conns)))/1e9)/elapsed.Seconds())
|
||||
}
|
||||
commands["test-block-write"] = commandSpec{
|
||||
flags: testBlockWriteCmd,
|
||||
@@ -778,8 +781,8 @@ func main() {
|
||||
blockServiceIds = append(blockServiceIds, msgs.BlockServiceId(*blockserviceFlagsId))
|
||||
}
|
||||
if *blockserviceFlagsFailureDomain != "" || *blockserviceFlagsPathPrefix != "" {
|
||||
log.Info("requesting block services")
|
||||
blockServicesResp, err := client.ShuckleRequest(log, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
l.Info("requesting block services")
|
||||
blockServicesResp, err := client.ShuckleRequest(l, nil, *shuckleAddress, &msgs.AllBlockServicesDeprecatedReq{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -817,10 +820,10 @@ func main() {
|
||||
}
|
||||
mask = uint8(flagMask)
|
||||
}
|
||||
conn := client.MakeShuckleConn(log, nil, *shuckleAddress, 1)
|
||||
conn := client.MakeShuckleConn(l, nil, *shuckleAddress, 1)
|
||||
defer conn.Close()
|
||||
for _, bsId := range blockServiceIds {
|
||||
log.Info("setting flags %v with mask %v for block service %v", flag, msgs.BlockServiceFlags(mask), bsId)
|
||||
l.Info("setting flags %v with mask %v for block service %v", flag, msgs.BlockServiceFlags(mask), bsId)
|
||||
_, err := conn.Request(&msgs.SetBlockServiceFlagsReq{
|
||||
Id: bsId,
|
||||
Flags: flag,
|
||||
@@ -844,8 +847,8 @@ func main() {
|
||||
os.Exit(2)
|
||||
}
|
||||
bsId := msgs.BlockServiceId(*decommissionBlockserviceId)
|
||||
log.Info("decommissioning block service %v using dedicated rate-limited endpoint", bsId)
|
||||
_, err := client.ShuckleRequest(log, nil, *shuckleAddress, &msgs.DecommissionBlockServiceReq{
|
||||
l.Info("decommissioning block service %v using dedicated rate-limited endpoint", bsId)
|
||||
_, err := client.ShuckleRequest(l, nil, *shuckleAddress, &msgs.DecommissionBlockServiceReq{
|
||||
Id: bsId,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -870,8 +873,8 @@ func main() {
|
||||
os.Exit(2)
|
||||
}
|
||||
bsId := msgs.BlockServiceId(*updateBlockservicePathId)
|
||||
log.Info("setting path to %s for block service %v", *updateBlockserviceNewPath, bsId)
|
||||
_, err := client.ShuckleRequest(log, nil, *shuckleAddress, &msgs.UpdateBlockServicePathReq{
|
||||
l.Info("setting path to %s for block service %v", *updateBlockserviceNewPath, bsId)
|
||||
_, err := client.ShuckleRequest(l, nil, *shuckleAddress, &msgs.UpdateBlockServicePathReq{
|
||||
Id: bsId,
|
||||
NewPath: *updateBlockserviceNewPath,
|
||||
})
|
||||
@@ -888,9 +891,9 @@ func main() {
|
||||
fileSizesBrief := fileSizesCmd.Bool("brief", false, "")
|
||||
fileSizesRun := func() {
|
||||
if *fileSizesBrief {
|
||||
outputBriefFileSizes(log, getClient())
|
||||
outputBriefFileSizes(l, getClient())
|
||||
} else {
|
||||
outputFullFileSizes(log, getClient())
|
||||
outputFullFileSizes(l, getClient())
|
||||
}
|
||||
}
|
||||
commands["file-sizes"] = commandSpec{
|
||||
@@ -912,13 +915,13 @@ func main() {
|
||||
req := msgs.VisitFilesReq{}
|
||||
resp := msgs.VisitFilesResp{}
|
||||
for {
|
||||
if err := getClient().ShardRequest(log, shid, &req, &resp); err != nil {
|
||||
if err := getClient().ShardRequest(l, shid, &req, &resp); err != nil {
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
atomic.AddUint64(&numFiles, uint64(len(resp.Ids)))
|
||||
if atomic.AddUint64(&numReqs, 1)%uint64(1_000_000) == 0 {
|
||||
log.Info("went through %v files, %v reqs (%0.2f files/s, %0.2f req/s)", numFiles, numReqs, float64(numFiles)/float64(time.Since(startedAt).Seconds()), float64(numReqs)/float64(time.Since(startedAt).Seconds()))
|
||||
l.Info("went through %v files, %v reqs (%0.2f files/s, %0.2f req/s)", numFiles, numReqs, float64(numFiles)/float64(time.Since(startedAt).Seconds()), float64(numReqs)/float64(time.Since(startedAt).Seconds()))
|
||||
}
|
||||
req.BeginId = resp.NextId
|
||||
if req.BeginId == 0 {
|
||||
@@ -936,7 +939,7 @@ func main() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Info("found %v files", numFiles)
|
||||
l.Info("found %v files", numFiles)
|
||||
}
|
||||
commands["count-files"] = commandSpec{
|
||||
flags: countFilesCmd,
|
||||
@@ -964,7 +967,7 @@ func main() {
|
||||
var numSnapshotFiles uint64
|
||||
var totalSnapshotLogicalSize uint64
|
||||
var totalSnapshotPhysicalSize uint64
|
||||
histogram := lib.NewHistogram(256, 255, 1.15) // max: ~900PB
|
||||
histogram := timing.NewHistogram(256, 255, 1.15) // max: ~900PB
|
||||
histoLogicalSizeBins := make([]uint64, 256)
|
||||
histoPhysicalSizeBins := make([]uint64, 256)
|
||||
histoCountBins := make([]uint64, 256)
|
||||
@@ -973,20 +976,20 @@ func main() {
|
||||
printReport := func() {
|
||||
if *duSnapshot {
|
||||
if *duPhysical {
|
||||
log.Info("went through %v files (%v current logical, %v current physical, %v snapshot logical, %v snapshot physical, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), formatSize(totalPhysicalSize), formatSize(totalSnapshotLogicalSize), formatSize(totalSnapshotPhysicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
l.Info("went through %v files (%v current logical, %v current physical, %v snapshot logical, %v snapshot physical, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), formatSize(totalPhysicalSize), formatSize(totalSnapshotLogicalSize), formatSize(totalSnapshotPhysicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
} else {
|
||||
log.Info("went through %v files (%v current, %v snapshot, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), formatSize(totalSnapshotLogicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
l.Info("went through %v files (%v current, %v snapshot, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), formatSize(totalSnapshotLogicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
}
|
||||
} else {
|
||||
if *duPhysical {
|
||||
log.Info("went through %v files (%v logical, %v physical, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), formatSize(totalPhysicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
l.Info("went through %v files (%v logical, %v physical, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), formatSize(totalPhysicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
} else {
|
||||
log.Info("went through %v files (%v, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
l.Info("went through %v files (%v, %0.2f files/s), %v directories", numFiles, formatSize(totalLogicalSize), float64(numFiles)/float64(time.Since(startedAt).Seconds()), numDirectories)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = client.Parwalk(
|
||||
log,
|
||||
l,
|
||||
c,
|
||||
&client.ParwalkOptions{
|
||||
WorkersPerShard: *duWorkersPerSshard,
|
||||
@@ -1007,7 +1010,7 @@ func main() {
|
||||
}
|
||||
atomic.AddUint64(&numFiles, 1)
|
||||
resp := msgs.StatFileResp{}
|
||||
if err := c.ShardRequest(log, id.Shard(), &msgs.StatFileReq{Id: id}, &resp); err != nil {
|
||||
if err := c.ShardRequest(l, id.Shard(), &msgs.StatFileReq{Id: id}, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
if current {
|
||||
@@ -1026,7 +1029,7 @@ func main() {
|
||||
fileSpansResp := msgs.FileSpansResp{}
|
||||
physicalSize := uint64(0)
|
||||
for {
|
||||
if err := c.ShardRequest(log, id.Shard(), &fileSpansReq, &fileSpansResp); err != nil {
|
||||
if err := c.ShardRequest(l, id.Shard(), &fileSpansReq, &fileSpansResp); err != nil {
|
||||
return err
|
||||
}
|
||||
for spanIx := range fileSpansResp.Spans {
|
||||
@@ -1071,7 +1074,7 @@ func main() {
|
||||
}
|
||||
printReport()
|
||||
if *duHisto != "" {
|
||||
log.Info("writing size histogram to %q", *duHisto)
|
||||
l.Info("writing size histogram to %q", *duHisto)
|
||||
histoCsvBuf := bytes.NewBuffer([]byte{})
|
||||
if *duPhysical {
|
||||
fmt.Fprintf(histoCsvBuf, "logical_upper_bound,file_count,total_logical_size,total_physical_size\n")
|
||||
@@ -1086,7 +1089,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
if err := os.WriteFile(*duHisto, histoCsvBuf.Bytes(), 0644); err != nil {
|
||||
log.ErrorNoAlert("could not write histo file %q, will print histogram here: %v", *duHisto, err)
|
||||
l.ErrorNoAlert("could not write histo file %q, will print histogram here: %v", *duHisto, err)
|
||||
fmt.Print(histoCsvBuf.Bytes())
|
||||
panic(err)
|
||||
}
|
||||
@@ -1110,7 +1113,7 @@ func main() {
|
||||
locationSize := make(map[msgs.Location]uint64)
|
||||
|
||||
for {
|
||||
if err := c.ShardRequest(log, id.Shard(), &fileSpansReq, &fileSpansResp); err != nil {
|
||||
if err := c.ShardRequest(l, id.Shard(), &fileSpansReq, &fileSpansResp); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for spanIx := range fileSpansResp.Spans {
|
||||
@@ -1128,9 +1131,9 @@ func main() {
|
||||
}
|
||||
fileSpansReq.ByteOffset = fileSpansResp.NextOffset
|
||||
}
|
||||
log.Info("Done fetching locations for file %v", id)
|
||||
l.Info("Done fetching locations for file %v", id)
|
||||
for locId, size := range locationSize {
|
||||
log.Info("Location %v has size %v", locId, size)
|
||||
l.Info("Location %v has size %v", locId, size)
|
||||
}
|
||||
}
|
||||
commands["file-locations"] = commandSpec{
|
||||
@@ -1169,7 +1172,7 @@ func main() {
|
||||
}
|
||||
c := getClient()
|
||||
err := client.Parwalk(
|
||||
log,
|
||||
l,
|
||||
c,
|
||||
&client.ParwalkOptions{
|
||||
WorkersPerShard: *findWorkersPerShard,
|
||||
@@ -1195,10 +1198,10 @@ func main() {
|
||||
}
|
||||
statReq := msgs.StatFileReq{Id: id}
|
||||
statResp := msgs.StatFileResp{}
|
||||
if err := c.ShardRequest(log, id.Shard(), &statReq, &statResp); err != nil {
|
||||
if err := c.ShardRequest(l, id.Shard(), &statReq, &statResp); err != nil {
|
||||
if err == msgs.FILE_NOT_FOUND {
|
||||
// could get collected
|
||||
log.Info("file %q disappeared", path.Join(parentPath, name))
|
||||
l.Info("file %q disappeared", path.Join(parentPath, name))
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
@@ -1217,7 +1220,7 @@ func main() {
|
||||
resp := msgs.LocalFileSpansResp{}
|
||||
found := false
|
||||
for {
|
||||
if err := c.ShardRequest(log, id.Shard(), &req, &resp); err != nil {
|
||||
if err := c.ShardRequest(l, id.Shard(), &req, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, span := range resp.Spans {
|
||||
@@ -1253,7 +1256,7 @@ func main() {
|
||||
}
|
||||
resp := msgs.LocalFileSpansResp{}
|
||||
for {
|
||||
if err := c.ShardRequest(log, id.Shard(), &req, &resp); err != nil {
|
||||
if err := c.ShardRequest(l, id.Shard(), &req, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, span := range resp.Spans {
|
||||
@@ -1263,8 +1266,8 @@ func main() {
|
||||
body := span.Body.(*msgs.FetchedBlocksSpan)
|
||||
for _, block := range body.Blocks {
|
||||
blockService := &resp.BlockServices[block.BlockServiceIx]
|
||||
if err := c.CheckBlock(log, blockService, block.BlockId, body.CellSize*uint32(body.Stripes), block.Crc); err != nil {
|
||||
log.ErrorNoAlert("while checking block %v in file %v got error %v", block.BlockId, path.Join(parentPath, name), err)
|
||||
if err := c.CheckBlock(l, blockService, block.BlockId, body.CellSize*uint32(body.Stripes), block.Crc); err != nil {
|
||||
l.ErrorNoAlert("while checking block %v in file %v got error %v", block.BlockId, path.Join(parentPath, name), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1274,7 +1277,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("%v %q", id, path.Join(parentPath, name))
|
||||
l.Info("%v %q", id, path.Join(parentPath, name))
|
||||
return nil
|
||||
},
|
||||
)
|
||||
@@ -1292,10 +1295,10 @@ func main() {
|
||||
scrubFileRun := func() {
|
||||
file := msgs.InodeId(*scrubFileId)
|
||||
stats := &cleanup.ScrubState{}
|
||||
if err := cleanup.ScrubFile(log, getClient(), stats, file); err != nil {
|
||||
if err := cleanup.ScrubFile(l, getClient(), stats, file); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Info("scrub stats: %+v", stats)
|
||||
l.Info("scrub stats: %+v", stats)
|
||||
}
|
||||
commands["scrub-file"] = commandSpec{
|
||||
flags: scrubFileCmd,
|
||||
@@ -1305,7 +1308,7 @@ func main() {
|
||||
scrubCmd := flag.NewFlagSet("scrub", flag.ExitOnError)
|
||||
scrubRun := func() {
|
||||
stats := cleanup.ScrubState{}
|
||||
if err := cleanup.ScrubFilesInAllShards(log, getClient(), &cleanup.ScrubOptions{NumWorkersPerShard: 10}, nil, &stats); err != nil {
|
||||
if err := cleanup.ScrubFilesInAllShards(l, getClient(), &cleanup.ScrubOptions{NumWorkersPerShard: 10}, nil, &stats); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -1327,7 +1330,7 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
for _, c := range counters {
|
||||
log.Info("%v: Success=%v Attempts=%v Timeouts=%v Failures=%v NetFailures=%v", msgs.ShardMessageKind(c.Kind), c.Success, c.Attempts, c.Timeouts, c.Failures, c.NetFailures)
|
||||
l.Info("%v: Success=%v Attempts=%v Timeouts=%v Failures=%v NetFailures=%v", msgs.ShardMessageKind(c.Kind), c.Success, c.Attempts, c.Timeouts, c.Failures, c.NetFailures)
|
||||
}
|
||||
}
|
||||
{
|
||||
@@ -1340,7 +1343,7 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
for _, c := range counters {
|
||||
log.Info("%v: Success=%v Attempts=%v Timeouts=%v Failures=%v NetFailures=%v", msgs.CDCMessageKind(c.Kind), c.Success, c.Attempts, c.Timeouts, c.Failures, c.NetFailures)
|
||||
l.Info("%v: Success=%v Attempts=%v Timeouts=%v Failures=%v NetFailures=%v", msgs.CDCMessageKind(c.Kind), c.Success, c.Attempts, c.Timeouts, c.Failures, c.NetFailures)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1380,7 +1383,7 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
for i := range latencies {
|
||||
log.Info("%v: p50=%v p90=%v p99=%v", msgs.ShardMessageKind(latencies[i].Kind), p(&header, &latencies[i], 0.5), p(&header, &latencies[i], 0.9), p(&header, &latencies[i], 0.99))
|
||||
l.Info("%v: p50=%v p90=%v p99=%v", msgs.ShardMessageKind(latencies[i].Kind), p(&header, &latencies[i], 0.5), p(&header, &latencies[i], 0.9), p(&header, &latencies[i], 0.99))
|
||||
}
|
||||
}
|
||||
{
|
||||
@@ -1393,7 +1396,7 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
for i := range latencies {
|
||||
log.Info("%v: p50=%v p90=%v p99=%v", msgs.CDCMessageKind(latencies[i].Kind), p(&header, &latencies[i], 0.5), p(&header, &latencies[i], 0.9), p(&header, &latencies[i], 0.99))
|
||||
l.Info("%v: p50=%v p90=%v p99=%v", msgs.CDCMessageKind(latencies[i].Kind), p(&header, &latencies[i], 0.5), p(&header, &latencies[i], 0.9), p(&header, &latencies[i], 0.99))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1408,11 +1411,11 @@ func main() {
|
||||
defragFileRun := func() {
|
||||
c := getClient()
|
||||
dirInfoCache := client.NewDirInfoCache()
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
stats := &cleanup.DefragStats{}
|
||||
alert := log.NewNCAlert(0)
|
||||
alert.SetAppType(lib.XMON_NEVER)
|
||||
id, _, parent, err := c.ResolvePathWithParent(log, *defragFilePath)
|
||||
alert := l.NewNCAlert(0)
|
||||
alert.SetAppType(log.XMON_NEVER)
|
||||
id, _, parent, err := c.ResolvePathWithParent(l, *defragFilePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1429,18 +1432,18 @@ func main() {
|
||||
WorkersPerShard: 5,
|
||||
StartFrom: startTime,
|
||||
}
|
||||
if err := cleanup.DefragFiles(log, c, bufPool, dirInfoCache, stats, alert, &options, *defragFilePath); err != nil {
|
||||
if err := cleanup.DefragFiles(l, c, bufPool, dirInfoCache, stats, alert, &options, *defragFilePath); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
if *defragFileFrom != "" {
|
||||
panic(fmt.Errorf("cannot provide -from with a file -path"))
|
||||
}
|
||||
if err := cleanup.DefragFile(log, c, bufPool, dirInfoCache, stats, alert, parent, id, *defragFilePath); err != nil {
|
||||
if err := cleanup.DefragFile(l, c, bufPool, dirInfoCache, stats, alert, parent, id, *defragFilePath); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
log.Info("defrag stats: %+v", stats)
|
||||
l.Info("defrag stats: %+v", stats)
|
||||
}
|
||||
commands["defrag"] = commandSpec{
|
||||
flags: defragFileCmd,
|
||||
@@ -1452,14 +1455,14 @@ func main() {
|
||||
defragSpansRun := func() {
|
||||
c := getClient()
|
||||
dirInfoCache := client.NewDirInfoCache()
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
stats := &cleanup.DefragSpansStats{}
|
||||
alert := log.NewNCAlert(0)
|
||||
alert.SetAppType(lib.XMON_NEVER)
|
||||
if err := cleanup.DefragSpans(log, c, bufPool, dirInfoCache, stats, alert, *defragSpansPath); err != nil {
|
||||
alert := l.NewNCAlert(0)
|
||||
alert.SetAppType(log.XMON_NEVER)
|
||||
if err := cleanup.DefragSpans(l, c, bufPool, dirInfoCache, stats, alert, *defragSpansPath); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Info("defrag stats: %+v", stats)
|
||||
l.Info("defrag stats: %+v", stats)
|
||||
}
|
||||
commands["defrag-spans"] = commandSpec{
|
||||
flags: defragSpansCmd,
|
||||
@@ -1493,7 +1496,7 @@ func main() {
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
dirId, err := c.ResolvePath(log, path.Dir(p))
|
||||
dirId, err := c.ResolvePath(l, path.Dir(p))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1503,25 +1506,25 @@ func main() {
|
||||
StartName: path.Base(p),
|
||||
}
|
||||
resp := msgs.FullReadDirResp{}
|
||||
if err := c.ShardRequest(log, dirId.Shard(), &req, &resp); err != nil {
|
||||
if err := c.ShardRequest(l, dirId.Shard(), &req, &resp); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(resp.Results) < 2 {
|
||||
log.Info("%q: found < 2 edges, skipping: %+v", p, resp.Results)
|
||||
l.Info("%q: found < 2 edges, skipping: %+v", p, resp.Results)
|
||||
continue
|
||||
}
|
||||
// if we already have a current edge, no need to do anything
|
||||
if resp.Results[0].Current {
|
||||
log.Info("%q: a current edge already exists, skipping", p)
|
||||
l.Info("%q: a current edge already exists, skipping", p)
|
||||
continue
|
||||
}
|
||||
// otherwise, we expect a deleted edge, and then an owned edge
|
||||
if resp.Results[0].TargetId.Id() != msgs.NULL_INODE_ID {
|
||||
log.Info("%q: last edge is not a deletion edge, skipping: %+v", p, resp.Results[0])
|
||||
l.Info("%q: last edge is not a deletion edge, skipping: %+v", p, resp.Results[0])
|
||||
continue
|
||||
}
|
||||
if !resp.Results[1].TargetId.Extra() {
|
||||
log.Info("%q: second to last edge is not an owned edge, skipping: %+v", p, resp.Results[1])
|
||||
l.Info("%q: second to last edge is not an owned edge, skipping: %+v", p, resp.Results[1])
|
||||
continue
|
||||
}
|
||||
// We've got everything we need, do the resurrection
|
||||
@@ -1532,10 +1535,10 @@ func main() {
|
||||
OldCreationTime: resp.Results[1].CreationTime,
|
||||
NewName: path.Base(p),
|
||||
}
|
||||
if err := c.ShardRequest(log, dirId.Shard(), &resurrectReq, &msgs.SameDirectoryRenameSnapshotResp{}); err != nil {
|
||||
if err := c.ShardRequest(l, dirId.Shard(), &resurrectReq, &msgs.SameDirectoryRenameSnapshotResp{}); err != nil {
|
||||
panic(fmt.Errorf("could not resurrect %q: %w", p, err))
|
||||
}
|
||||
log.Info("%q: resurrected", p)
|
||||
l.Info("%q: resurrected", p)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -1552,7 +1555,7 @@ func main() {
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
if seenFiles%30_000 == 0 {
|
||||
log.Info("Went through %v files (%0.2f files/sec)", seenFiles, 1000.0*float64(seenFiles)/float64(time.Since(t0).Milliseconds()))
|
||||
l.Info("Went through %v files (%0.2f files/sec)", seenFiles, 1000.0*float64(seenFiles)/float64(time.Since(t0).Milliseconds()))
|
||||
}
|
||||
seenFiles++
|
||||
if strings.TrimSpace(scanner.Text()) == "" {
|
||||
@@ -1571,7 +1574,7 @@ func main() {
|
||||
|
||||
resolveSamplePathsCmd := flag.NewFlagSet("resolve-sample-paths", flag.ExitOnError)
|
||||
resolveSamplePathsRun := func() {
|
||||
resolver := filesamples.NewPathResolver(getClient(), log)
|
||||
resolver := filesamples.NewPathResolver(getClient(), l)
|
||||
resolver.ResolveFilePaths(os.Stdin, os.Stdout)
|
||||
}
|
||||
commands["resolve-sample-paths"] = commandSpec{
|
||||
@@ -1600,14 +1603,14 @@ func main() {
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
log = lib.NewLogger(os.Stderr, &lib.LoggerOptions{Level: level})
|
||||
l = log.NewLogger(os.Stderr, &log.LoggerOptions{Level: level})
|
||||
|
||||
spec, found := commands[flag.Args()[0]]
|
||||
if !found {
|
||||
|
||||
@@ -6,15 +6,17 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
golog "log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/flags"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
@@ -22,9 +24,9 @@ import (
|
||||
)
|
||||
|
||||
var c *client.Client
|
||||
var logger *lib.Logger
|
||||
var logger *log.Logger
|
||||
var dirInfoCache *client.DirInfoCache
|
||||
var bufPool *lib.BufPool
|
||||
var bufPool *bufpool.BufPool
|
||||
|
||||
func ternErrToErrno(err error) syscall.Errno {
|
||||
switch err {
|
||||
@@ -291,7 +293,7 @@ type transientFile struct {
|
||||
cookie [8]byte
|
||||
dir msgs.InodeId
|
||||
name string
|
||||
data *lib.Buf // null if it has been flushed
|
||||
data *bufpool.Buf // null if it has been flushed
|
||||
size uint64
|
||||
writeErr syscall.Errno
|
||||
}
|
||||
@@ -685,7 +687,7 @@ func main() {
|
||||
logFile := flag.String("log-file", "", "Redirect logging output to given file.")
|
||||
signalParent := flag.Bool("signal-parent", false, "If passed, will send USR1 to parent when ready -- useful for tests.")
|
||||
shuckleAddress := flag.String("shuckle", "", "Shuckle address (host:port).")
|
||||
var addresses lib.StringArrayFlags
|
||||
var addresses flags.StringArrayFlags
|
||||
flag.Var(&addresses, "addr", "Local addresses (up to two) to connect from.")
|
||||
profileFile := flag.String("profile-file", "", "If set, will write CPU profile here.")
|
||||
syslog := flag.Bool("syslog", false, "")
|
||||
@@ -726,14 +728,14 @@ func main() {
|
||||
}
|
||||
defer logOut.Close()
|
||||
}
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
logger = lib.NewLogger(logOut, &lib.LoggerOptions{Level: level, Syslog: *syslog, PrintQuietAlerts: true})
|
||||
logger = log.NewLogger(logOut, &log.LoggerOptions{Level: level, Syslog: *syslog, PrintQuietAlerts: true})
|
||||
|
||||
if *profileFile != "" {
|
||||
f, err := os.Create(*profileFile)
|
||||
@@ -746,7 +748,7 @@ func main() {
|
||||
|
||||
var localAddresses msgs.AddrsInfo
|
||||
if len(addresses) > 0 {
|
||||
ownIp1, port1, err := lib.ParseIPV4Addr(addresses[0])
|
||||
ownIp1, port1, err := flags.ParseIPV4Addr(addresses[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -754,7 +756,7 @@ func main() {
|
||||
var ownIp2 [4]byte
|
||||
var port2 uint16
|
||||
if len(addresses) == 2 {
|
||||
ownIp2, port2, err = lib.ParseIPV4Addr(addresses[1])
|
||||
ownIp2, port2, err = flags.ParseIPV4Addr(addresses[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -784,13 +786,13 @@ func main() {
|
||||
|
||||
dirInfoCache = client.NewDirInfoCache()
|
||||
|
||||
bufPool = lib.NewBufPool()
|
||||
bufPool = bufpool.NewBufPool()
|
||||
|
||||
root := ternNode{
|
||||
id: msgs.ROOT_DIR_INODE_ID,
|
||||
}
|
||||
fuseOptions := &fs.Options{
|
||||
Logger: log.New(os.Stderr, "fuse", log.Ldate|log.Ltime|log.Lmicroseconds|log.Lshortfile),
|
||||
Logger: golog.New(os.Stderr, "fuse", golog.Ldate|golog.Ltime|golog.Lmicroseconds|golog.Lshortfile),
|
||||
AttrTimeout: fileAttrCacheTimeFlag,
|
||||
EntryTimeout: dirAttrCacheTimeFlag,
|
||||
MountOptions: fuse.MountOptions{
|
||||
|
||||
@@ -11,10 +11,14 @@ import (
|
||||
"path"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/cleanup"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/flags"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/timing"
|
||||
"xtx/ternfs/wyhash"
|
||||
|
||||
"net/http"
|
||||
@@ -80,7 +84,7 @@ func main() {
|
||||
trace := flag.Bool("trace", false, "Enables debug logging.")
|
||||
logFile := flag.String("log-file", "", "File to log to, stdout if not provided.")
|
||||
shuckleAddress := flag.String("shuckle", "", "Shuckle address (host:port).")
|
||||
var addresses lib.StringArrayFlags
|
||||
var addresses flags.StringArrayFlags
|
||||
flag.Var(&addresses, "addr", "Local addresses (up to two) to connect from.")
|
||||
numShuckleHandlers := flag.Uint("num-shuckle-handlers", 10, "Number of parallel shuckle requests")
|
||||
syslog := flag.Bool("syslog", false, "")
|
||||
@@ -121,7 +125,7 @@ func main() {
|
||||
|
||||
var localAddresses msgs.AddrsInfo
|
||||
if len(addresses) > 0 {
|
||||
ownIp1, port1, err := lib.ParseIPV4Addr(addresses[0])
|
||||
ownIp1, port1, err := flags.ParseIPV4Addr(addresses[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -129,7 +133,7 @@ func main() {
|
||||
var ownIp2 [4]byte
|
||||
var port2 uint16
|
||||
if len(addresses) == 2 {
|
||||
ownIp2, port2, err = lib.ParseIPV4Addr(addresses[1])
|
||||
ownIp2, port2, err = flags.ParseIPV4Addr(addresses[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -152,7 +156,7 @@ func main() {
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
var influxDB *lib.InfluxDB
|
||||
var influxDB *log.InfluxDB
|
||||
if *influxDBOrigin == "" {
|
||||
if *influxDBOrg != "" || *influxDBBucket != "" {
|
||||
fmt.Fprintf(os.Stderr, "Either all or none of the -influx-db flags must be passed\n")
|
||||
@@ -163,7 +167,7 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Either all or none of the -influx-db flags must be passed\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
influxDB = &lib.InfluxDB{
|
||||
influxDB = &log.InfluxDB{
|
||||
Origin: *influxDBOrigin,
|
||||
Org: *influxDBOrg,
|
||||
Bucket: *influxDBBucket,
|
||||
@@ -185,21 +189,21 @@ func main() {
|
||||
}
|
||||
defer logOut.Close()
|
||||
}
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
|
||||
log := lib.NewLogger(logOut, &lib.LoggerOptions{Level: level, Syslog: *syslog, XmonAddr: *xmon, AppType: lib.XMON_DAYTIME, AppInstance: *appInstance})
|
||||
l := log.NewLogger(logOut, &log.LoggerOptions{Level: level, Syslog: *syslog, XmonAddr: *xmon, AppType: log.XMON_DAYTIME, AppInstance: *appInstance})
|
||||
|
||||
if *mtu != 0 {
|
||||
client.SetMTU(*mtu)
|
||||
}
|
||||
|
||||
log.Info("Will run GC in all shards")
|
||||
l.Info("Will run GC in all shards")
|
||||
|
||||
if err := os.Mkdir(*dataDir, 0777); err != nil && !os.IsExist(err) {
|
||||
panic(err)
|
||||
@@ -231,7 +235,7 @@ func main() {
|
||||
blockTimeouts.Overall = 10 * time.Minute
|
||||
|
||||
dirInfoCache := client.NewDirInfoCache()
|
||||
c, err := client.NewClient(log, &shuckleTimeouts, *shuckleAddress, localAddresses)
|
||||
c, err := client.NewClient(l, &shuckleTimeouts, *shuckleAddress, localAddresses)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -268,7 +272,7 @@ func main() {
|
||||
|
||||
// store the state
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
for {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
@@ -289,7 +293,7 @@ func main() {
|
||||
if err := tx.Commit(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Info("stored state, waiting one minute")
|
||||
l.Info("stored state, waiting one minute")
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
@@ -301,10 +305,10 @@ func main() {
|
||||
}
|
||||
defer httpListener.Close()
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
terminateChan <- http.Serve(httpListener, nil)
|
||||
}()
|
||||
log.Info("http pprof listener started on port %v", httpListener.Addr().(*net.TCPAddr).Port)
|
||||
l.Info("http pprof listener started on port %v", httpListener.Addr().(*net.TCPAddr).Port)
|
||||
}
|
||||
|
||||
if *collectDirectories {
|
||||
@@ -315,9 +319,9 @@ func main() {
|
||||
for i := 0; i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
for {
|
||||
if err := cleanup.CollectDirectories(log, c, dirInfoCache, nil, opts, collectDirectoriesState, shid, *collectDirectoriesMinEdgeAge); err != nil {
|
||||
if err := cleanup.CollectDirectories(l, c, dirInfoCache, nil, opts, collectDirectoriesState, shid, *collectDirectoriesMinEdgeAge); err != nil {
|
||||
panic(fmt.Errorf("could not collect directories in shard %v: %v", shid, err))
|
||||
}
|
||||
}
|
||||
@@ -332,22 +336,22 @@ func main() {
|
||||
for i := 0; i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
alert := log.NewNCAlert(10 * time.Second)
|
||||
defer log.ClearNC(alert)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
alert := l.NewNCAlert(10 * time.Second)
|
||||
defer l.ClearNC(alert)
|
||||
timesFailed := 0
|
||||
for {
|
||||
if err := cleanup.DestructFiles(log, c, opts, destructFilesState, shid); err != nil {
|
||||
if err := cleanup.DestructFiles(l, c, opts, destructFilesState, shid); err != nil {
|
||||
timesFailed++
|
||||
if timesFailed == 5 {
|
||||
log.RaiseNC(alert, "could not destruct files after 5 attempts. last error: %v", err )
|
||||
l.RaiseNC(alert, "could not destruct files after 5 attempts. last error: %v", err )
|
||||
}
|
||||
log.Info("destructing files in shard %v failed, sleeping for 10 minutes", shid)
|
||||
l.Info("destructing files in shard %v failed, sleeping for 10 minutes", shid)
|
||||
time.Sleep(10 * time.Minute)
|
||||
} else {
|
||||
log.ClearNC(alert)
|
||||
l.ClearNC(alert)
|
||||
timesFailed = 0
|
||||
log.Info("finished destructing in shard %v, sleeping for one hour", shid)
|
||||
l.Info("finished destructing in shard %v, sleeping for one hour", shid)
|
||||
time.Sleep(time.Hour)
|
||||
}
|
||||
}
|
||||
@@ -356,21 +360,21 @@ func main() {
|
||||
}
|
||||
if *zeroBlockServices {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
for {
|
||||
// just do that once an hour, we don't need this often.
|
||||
waitFor := time.Second * time.Duration(rand.Uint64()%(60*60))
|
||||
log.Info("waiting %v before collecting zero block service files", waitFor)
|
||||
l.Info("waiting %v before collecting zero block service files", waitFor)
|
||||
time.Sleep(waitFor)
|
||||
if err := cleanup.CollectZeroBlockServiceFiles(log, c, zeroBlockServiceFilesStats); err != nil {
|
||||
log.RaiseAlert("could not collecting zero block service files: %v", err)
|
||||
if err := cleanup.CollectZeroBlockServiceFiles(l, c, zeroBlockServiceFilesStats); err != nil {
|
||||
l.RaiseAlert("could not collecting zero block service files: %v", err)
|
||||
}
|
||||
log.Info("finished zero block services cycle, will restart")
|
||||
l.Info("finished zero block services cycle, will restart")
|
||||
}
|
||||
}()
|
||||
}
|
||||
if *scrub {
|
||||
rateLimit := lib.NewRateLimit(&lib.RateLimitOpts{
|
||||
rateLimit := timing.NewRateLimit(&timing.RateLimitOpts{
|
||||
RefillInterval: time.Second,
|
||||
Refill: 100000, // 100k blocks per second scrubs in ~1 month right now (100 billion blocks)
|
||||
BucketSize: 100000 * 100,
|
||||
@@ -384,10 +388,10 @@ func main() {
|
||||
for i := 0; i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
for {
|
||||
if err := cleanup.ScrubFiles(log, c, opts, rateLimit, scrubState, shid); err != nil {
|
||||
log.RaiseAlert("could not scrub files: %v", err)
|
||||
if err := cleanup.ScrubFiles(l, c, opts, rateLimit, scrubState, shid); err != nil {
|
||||
l.RaiseAlert("could not scrub files: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -395,8 +399,8 @@ func main() {
|
||||
}
|
||||
if *migrate {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
migrator := cleanup.Migrator(*shuckleAddress, log, c, uint64(*numMigrators), uint64(*migratorIdx), *numMigrationsPerShard, *migratorLogOnly, *migrateFailureDomain)
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
migrator := cleanup.Migrator(*shuckleAddress, l, c, uint64(*numMigrators), uint64(*migratorIdx), *numMigrationsPerShard, *migratorLogOnly, *migrateFailureDomain)
|
||||
migrator.Run()
|
||||
}()
|
||||
} else {
|
||||
@@ -410,20 +414,20 @@ func main() {
|
||||
storageClass = msgs.StorageClassFromString(*defragStorageClass)
|
||||
}
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(l, terminateChan, recover()) }()
|
||||
for {
|
||||
log.Info("starting to defrag")
|
||||
l.Info("starting to defrag")
|
||||
defragStats = &cleanup.DefragStats{}
|
||||
bufPool := lib.NewBufPool()
|
||||
progressReportAlert := log.NewNCAlert(0)
|
||||
progressReportAlert.SetAppType(lib.XMON_NEVER)
|
||||
bufPool := bufpool.NewBufPool()
|
||||
progressReportAlert := l.NewNCAlert(0)
|
||||
progressReportAlert.SetAppType(log.XMON_NEVER)
|
||||
options := cleanup.DefragOptions{
|
||||
WorkersPerShard: *defragWorkersPerShard,
|
||||
MinSpanSize: uint32(*defragMinSpanSize),
|
||||
StorageClass: storageClass,
|
||||
}
|
||||
cleanup.DefragFiles(log, c, bufPool, dirInfoCache, defragStats, progressReportAlert, &options, "/")
|
||||
log.RaiseAlertAppType(lib.XMON_DAYTIME, "finished one cycle of defragging, will start again")
|
||||
cleanup.DefragFiles(l, c, bufPool, dirInfoCache, defragStats, progressReportAlert, &options, "/")
|
||||
l.RaiseAlertAppType(log.XMON_DAYTIME, "finished one cycle of defragging, will start again")
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -431,10 +435,10 @@ func main() {
|
||||
if influxDB != nil && (*destructFiles || *collectDirectories || *zeroBlockServices || *scrub || *defrag) {
|
||||
// one thing just pushing the stats every minute
|
||||
go func() {
|
||||
metrics := lib.MetricsBuilder{}
|
||||
alert := log.NewNCAlert(10 * time.Second)
|
||||
metrics := log.MetricsBuilder{}
|
||||
alert := l.NewNCAlert(10 * time.Second)
|
||||
for {
|
||||
log.Info("sending stats metrics")
|
||||
l.Info("sending stats metrics")
|
||||
now := time.Now()
|
||||
metrics.Reset()
|
||||
// generic GC metrics
|
||||
@@ -537,12 +541,12 @@ func main() {
|
||||
}
|
||||
err := influxDB.SendMetrics(metrics.Payload())
|
||||
if err == nil {
|
||||
log.ClearNC(alert)
|
||||
l.ClearNC(alert)
|
||||
sleepFor := time.Second * 30
|
||||
log.Info("gc metrics sent, sleeping for %v", sleepFor)
|
||||
l.Info("gc metrics sent, sleeping for %v", sleepFor)
|
||||
time.Sleep(sleepFor)
|
||||
} else {
|
||||
log.RaiseNC(alert, "failed to send gc metrics, will try again in a second: %v", err)
|
||||
l.RaiseNC(alert, "failed to send gc metrics, will try again in a second: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
@@ -552,7 +556,7 @@ func main() {
|
||||
// counting transient files/files/directories
|
||||
go func() {
|
||||
for {
|
||||
log.Info("starting to count files")
|
||||
l.Info("starting to count files")
|
||||
for i := int(countState.Files.Shard); i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
countState.Files.Shard = shid
|
||||
@@ -561,8 +565,8 @@ func main() {
|
||||
count := uint64(0)
|
||||
var err error
|
||||
for {
|
||||
if err = c.ShardRequest(log, shid, &req, &resp); err != nil {
|
||||
log.RaiseAlert("could not get files for shard %v: %v", shid, err)
|
||||
if err = c.ShardRequest(l, shid, &req, &resp); err != nil {
|
||||
l.RaiseAlert("could not get files for shard %v: %v", shid, err)
|
||||
break
|
||||
}
|
||||
count += uint64(len(resp.Ids))
|
||||
@@ -580,7 +584,7 @@ func main() {
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
log.Info("starting to count directories")
|
||||
l.Info("starting to count directories")
|
||||
for i := int(countState.Directories.Shard); i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
countState.Directories.Shard = shid
|
||||
@@ -589,8 +593,8 @@ func main() {
|
||||
count := uint64(0)
|
||||
var err error
|
||||
for {
|
||||
if err = c.ShardRequest(log, shid, &req, &resp); err != nil {
|
||||
log.RaiseAlert("could not get directories for shard %v: %v", shid, err)
|
||||
if err = c.ShardRequest(l, shid, &req, &resp); err != nil {
|
||||
l.RaiseAlert("could not get directories for shard %v: %v", shid, err)
|
||||
break
|
||||
}
|
||||
count += uint64(len(resp.Ids))
|
||||
@@ -606,7 +610,7 @@ func main() {
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
log.Info("starting to count transient files")
|
||||
l.Info("starting to count transient files")
|
||||
for i := int(countState.TransientFiles.Shard); i < 256; i++ {
|
||||
shid := msgs.ShardId(i)
|
||||
countState.TransientFiles.Shard = shid
|
||||
@@ -615,8 +619,8 @@ func main() {
|
||||
count := uint64(0)
|
||||
var err error
|
||||
for {
|
||||
if err = c.ShardRequest(log, shid, &req, &resp); err != nil {
|
||||
log.RaiseAlert("could not get transient files for shard %v: %v", shid, err)
|
||||
if err = c.ShardRequest(l, shid, &req, &resp); err != nil {
|
||||
l.RaiseAlert("could not get transient files for shard %v: %v", shid, err)
|
||||
break
|
||||
}
|
||||
count += uint64(len(resp.Files))
|
||||
@@ -631,14 +635,14 @@ func main() {
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
metrics := lib.MetricsBuilder{}
|
||||
alert := log.NewNCAlert(10 * time.Second)
|
||||
metrics := log.MetricsBuilder{}
|
||||
alert := l.NewNCAlert(10 * time.Second)
|
||||
rand := wyhash.New(rand.Uint64())
|
||||
for {
|
||||
log.Info("sending files/transient files/dirs metrics")
|
||||
l.Info("sending files/transient files/dirs metrics")
|
||||
now := time.Now()
|
||||
metrics.Reset()
|
||||
log.ClearNC(alert)
|
||||
l.ClearNC(alert)
|
||||
for i := 0; i < 256; i++ {
|
||||
metrics.Measurement("eggsfs_transient_files")
|
||||
metrics.Tag("shard", fmt.Sprintf("%v", msgs.ShardId(i)))
|
||||
@@ -655,12 +659,12 @@ func main() {
|
||||
}
|
||||
err = influxDB.SendMetrics(metrics.Payload())
|
||||
if err == nil {
|
||||
log.ClearNC(alert)
|
||||
l.ClearNC(alert)
|
||||
sleepFor := time.Minute + time.Duration(rand.Uint64() & ^(uint64(1)<<63))%time.Minute
|
||||
log.Info("count metrics sent, sleeping for %v", sleepFor)
|
||||
l.Info("count metrics sent, sleeping for %v", sleepFor)
|
||||
time.Sleep(sleepFor)
|
||||
} else {
|
||||
log.RaiseNC(alert, "failed to send count metrics, will try again in a second: %v", err)
|
||||
l.RaiseNC(alert, "failed to send count metrics, will try again in a second: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
@@ -668,6 +672,6 @@ func main() {
|
||||
}
|
||||
|
||||
mbErr := <-terminateChan
|
||||
log.Info("got error, winding down: %v", mbErr)
|
||||
l.Info("got error, winding down: %v", mbErr)
|
||||
panic(mbErr)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/managedprocess"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
@@ -90,14 +90,14 @@ func main() {
|
||||
}
|
||||
defer logOut.Close()
|
||||
}
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
log := lib.NewLogger(logOut, &lib.LoggerOptions{Level: level, Syslog: false, PrintQuietAlerts: true})
|
||||
l := log.NewLogger(logOut, &log.LoggerOptions{Level: level, Syslog: false, PrintQuietAlerts: true})
|
||||
|
||||
var cppExes *managedprocess.CppExes
|
||||
var goExes *managedprocess.GoExes
|
||||
@@ -114,8 +114,8 @@ func main() {
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("building shard/cdc/blockservice/shuckle\n")
|
||||
cppExes = managedprocess.BuildCppExes(log, *repoDir, *buildType)
|
||||
goExes = managedprocess.BuildGoExes(log, *repoDir, false)
|
||||
cppExes = managedprocess.BuildCppExes(l, *repoDir, *buildType)
|
||||
goExes = managedprocess.BuildGoExes(l, *repoDir, false)
|
||||
}
|
||||
|
||||
terminateChan := make(chan any, 1)
|
||||
@@ -127,7 +127,7 @@ func main() {
|
||||
|
||||
// Start shuckle
|
||||
shuckleAddress := fmt.Sprintf("127.0.0.1:%v", *shuckleBincodePort)
|
||||
procs.StartShuckle(log, &managedprocess.ShuckleOpts{
|
||||
procs.StartShuckle(l, &managedprocess.ShuckleOpts{
|
||||
Exe: goExes.ShuckleExe,
|
||||
HttpPort: uint16(*shuckleHttpPort),
|
||||
LogLevel: level,
|
||||
@@ -143,20 +143,20 @@ func main() {
|
||||
numLocations := 1
|
||||
if *multiLocation {
|
||||
// Waiting for shuckle
|
||||
err := client.WaitForShuckle(log, shuckleAddress, 10*time.Second)
|
||||
err := client.WaitForShuckle(l, shuckleAddress, 10*time.Second)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to connect to shuckle %v", err))
|
||||
}
|
||||
_, err = client.ShuckleRequest(log, nil, shuckleAddress, &msgs.CreateLocationReq{1, "location1"})
|
||||
_, err = client.ShuckleRequest(l, nil, shuckleAddress, &msgs.CreateLocationReq{1, "location1"})
|
||||
if err != nil {
|
||||
// it's possible location already exits, try renaming it
|
||||
_, err = client.ShuckleRequest(log, nil, shuckleAddress, &msgs.RenameLocationReq{1, "location1"})
|
||||
_, err = client.ShuckleRequest(l, nil, shuckleAddress, &msgs.RenameLocationReq{1, "location1"})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to create location %v", err))
|
||||
}
|
||||
}
|
||||
procs.StartShuckleProxy(
|
||||
log, &managedprocess.ShuckleProxyOpts{
|
||||
l, &managedprocess.ShuckleProxyOpts{
|
||||
Exe: goExes.ShuckleProxyExe,
|
||||
LogLevel: level,
|
||||
Dir: path.Join(*dataDir, "shuckleproxy"),
|
||||
@@ -211,7 +211,7 @@ func main() {
|
||||
opts.Addr1 = "127.0.0.1:0"
|
||||
opts.Addr2 = "127.0.0.1:0"
|
||||
}
|
||||
procs.StartBlockService(log, &opts)
|
||||
procs.StartBlockService(l, &opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ func main() {
|
||||
waitShuckleFor = 60 * time.Second
|
||||
}
|
||||
fmt.Printf("waiting for block services for %v...\n", waitShuckleFor)
|
||||
client.WaitForBlockServices(log, shuckleAddress, int(*failureDomains**hddBlockServices**flashBlockServices*uint(numLocations)), true, waitShuckleFor)
|
||||
client.WaitForBlockServices(l, shuckleAddress, int(*failureDomains**hddBlockServices**flashBlockServices*uint(numLocations)), true, waitShuckleFor)
|
||||
|
||||
// Start CDC
|
||||
{
|
||||
@@ -251,7 +251,7 @@ func main() {
|
||||
} else {
|
||||
opts.Addr1 = "127.0.0.1:0"
|
||||
}
|
||||
procs.StartCDC(log, *repoDir, &opts)
|
||||
procs.StartCDC(l, *repoDir, &opts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,13 +292,13 @@ func main() {
|
||||
} else {
|
||||
opts.Addr1 = "127.0.0.1:0"
|
||||
}
|
||||
procs.StartShard(log, *repoDir, &opts)
|
||||
procs.StartShard(l, *repoDir, &opts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("waiting for shards/cdc for %v...\n", waitShuckleFor)
|
||||
client.WaitForClient(log, shuckleAddress, waitShuckleFor)
|
||||
client.WaitForClient(l, shuckleAddress, waitShuckleFor)
|
||||
|
||||
if !*noFuse {
|
||||
for loc :=0; loc < numLocations; loc++ {
|
||||
@@ -308,7 +308,7 @@ func main() {
|
||||
shuckleAddressToUse = shuckleProxyAddress
|
||||
fuseDir = "fuse1"
|
||||
}
|
||||
fuseMountPoint := procs.StartFuse(log, &managedprocess.FuseOpts{
|
||||
fuseMountPoint := procs.StartFuse(l, &managedprocess.FuseOpts{
|
||||
Exe: goExes.FuseExe,
|
||||
Path: path.Join(*dataDir, fuseDir),
|
||||
LogLevel: level,
|
||||
|
||||
@@ -6,8 +6,9 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/s3"
|
||||
)
|
||||
@@ -34,15 +35,15 @@ func main() {
|
||||
trace := flag.Bool("trace", false, "")
|
||||
flag.Parse()
|
||||
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
|
||||
log := lib.NewLogger(os.Stdout, &lib.LoggerOptions{
|
||||
l := log.NewLogger(os.Stdout, &log.LoggerOptions{
|
||||
Level: level,
|
||||
AppInstance: "eggss3",
|
||||
AppType: "restech_eggsfs.daytime",
|
||||
@@ -55,7 +56,7 @@ func main() {
|
||||
}
|
||||
|
||||
c, err := client.NewClient(
|
||||
log,
|
||||
l,
|
||||
nil,
|
||||
*ternfsAddr,
|
||||
msgs.AddrsInfo{},
|
||||
@@ -73,14 +74,14 @@ func main() {
|
||||
os.Exit(2)
|
||||
}
|
||||
bucketName, rootPath := parts[0], parts[1]
|
||||
log.Info("Mapping bucket %q to path %q", bucketName, rootPath)
|
||||
l.Info("Mapping bucket %q to path %q", bucketName, rootPath)
|
||||
bucketPaths[bucketName] = rootPath
|
||||
}
|
||||
|
||||
s3Server := s3.NewS3Server(
|
||||
log,
|
||||
l,
|
||||
c,
|
||||
lib.NewBufPool(),
|
||||
bufpool.NewBufPool(),
|
||||
client.NewDirInfoCache(),
|
||||
bucketPaths,
|
||||
*virtualHost,
|
||||
@@ -90,9 +91,9 @@ func main() {
|
||||
Addr: *addr,
|
||||
Handler: s3Server,
|
||||
}
|
||||
log.Info("Starting S3 gateway on %q", *addr)
|
||||
l.Info("Starting S3 gateway on %q", *addr)
|
||||
if *virtualHost != "" {
|
||||
log.Info("Virtual host routing enabled for domain: %q", *virtualHost)
|
||||
l.Info("Virtual host routing enabled for domain: %q", *virtualHost)
|
||||
}
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
panic(err)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,7 +6,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
@@ -16,13 +15,16 @@ import (
|
||||
"time"
|
||||
"xtx/ternfs/bincode"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/flags"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/timing"
|
||||
"xtx/ternfs/wyhash"
|
||||
)
|
||||
|
||||
type state struct {
|
||||
counters map[msgs.ShuckleMessageKind]*lib.Timings
|
||||
counters map[msgs.ShuckleMessageKind]*timing.Timings
|
||||
config *shuckleProxyConfig
|
||||
shuckleConn *client.ShuckleConn
|
||||
}
|
||||
@@ -35,24 +37,24 @@ type shuckleProxyConfig struct {
|
||||
}
|
||||
|
||||
func newState(
|
||||
log *lib.Logger,
|
||||
l *log.Logger,
|
||||
conf *shuckleProxyConfig,
|
||||
idb *lib.InfluxDB,
|
||||
idb *log.InfluxDB,
|
||||
) *state {
|
||||
st := &state{
|
||||
config: conf,
|
||||
shuckleConn: client.MakeShuckleConn(log, nil, conf.shuckleAddress, conf.numHandlers),
|
||||
shuckleConn: client.MakeShuckleConn(l, nil, conf.shuckleAddress, conf.numHandlers),
|
||||
}
|
||||
|
||||
st.counters = make(map[msgs.ShuckleMessageKind]*lib.Timings)
|
||||
st.counters = make(map[msgs.ShuckleMessageKind]*timing.Timings)
|
||||
for _, k := range msgs.AllShuckleMessageKind {
|
||||
st.counters[k] = lib.NewTimings(40, 10*time.Microsecond, 1.5)
|
||||
st.counters[k] = timing.NewTimings(40, 10*time.Microsecond, 1.5)
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func handleLocalChangedBlockServices(ll *lib.Logger, s *state, req *msgs.LocalChangedBlockServicesReq) (*msgs.LocalChangedBlockServicesResp, error) {
|
||||
func handleLocalChangedBlockServices(ll *log.Logger, s *state, req *msgs.LocalChangedBlockServicesReq) (*msgs.LocalChangedBlockServicesResp, error) {
|
||||
reqAtLocation := &msgs.ChangedBlockServicesAtLocationReq{s.config.location, req.ChangedSince}
|
||||
resp, err := handleProxyRequest(ll, s, reqAtLocation)
|
||||
if err != nil {
|
||||
@@ -62,7 +64,7 @@ func handleLocalChangedBlockServices(ll *lib.Logger, s *state, req *msgs.LocalCh
|
||||
return &msgs.LocalChangedBlockServicesResp{respAtLocation.LastChange, respAtLocation.BlockServices}, nil
|
||||
}
|
||||
|
||||
func handleLocalShards(ll *lib.Logger, s *state, _ *msgs.LocalShardsReq) (*msgs.LocalShardsResp, error) {
|
||||
func handleLocalShards(ll *log.Logger, s *state, _ *msgs.LocalShardsReq) (*msgs.LocalShardsResp, error) {
|
||||
reqAtLocation := &msgs.ShardsAtLocationReq{s.config.location}
|
||||
resp, err := handleProxyRequest(ll, s, reqAtLocation)
|
||||
if err != nil {
|
||||
@@ -73,7 +75,7 @@ func handleLocalShards(ll *lib.Logger, s *state, _ *msgs.LocalShardsReq) (*msgs.
|
||||
return &msgs.LocalShardsResp{respAtLocation.Shards}, nil
|
||||
}
|
||||
|
||||
func handleLocalCdc(log *lib.Logger, s *state, req *msgs.LocalCdcReq) (msgs.ShuckleResponse, error) {
|
||||
func handleLocalCdc(log *log.Logger, s *state, req *msgs.LocalCdcReq) (msgs.ShuckleResponse, error) {
|
||||
reqAtLocation := &msgs.CdcAtLocationReq{LocationId: s.config.location}
|
||||
resp, err := handleProxyRequest(log, s, reqAtLocation)
|
||||
if err != nil {
|
||||
@@ -84,15 +86,15 @@ func handleLocalCdc(log *lib.Logger, s *state, req *msgs.LocalCdcReq) (msgs.Shuc
|
||||
return &msgs.LocalCdcResp{respAtLocation.Addrs, respAtLocation.LastSeen}, nil
|
||||
}
|
||||
|
||||
func handleProxyRequest(log *lib.Logger, s *state, req msgs.ShuckleRequest) (msgs.ShuckleResponse, error) {
|
||||
func handleProxyRequest(log *log.Logger, s *state, req msgs.ShuckleRequest) (msgs.ShuckleResponse, error) {
|
||||
return s.shuckleConn.Request(req)
|
||||
}
|
||||
|
||||
func handleShuckle(log *lib.Logger, s *state) (msgs.ShuckleResponse, error) {
|
||||
func handleShuckle(log *log.Logger, s *state) (msgs.ShuckleResponse, error) {
|
||||
return &msgs.ShuckleResp{s.config.addrs}, nil
|
||||
}
|
||||
|
||||
func handleRequestParsed(log *lib.Logger, s *state, req msgs.ShuckleRequest) (msgs.ShuckleResponse, error) {
|
||||
func handleRequestParsed(log *log.Logger, s *state, req msgs.ShuckleRequest) (msgs.ShuckleResponse, error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
s.counters[req.ShuckleRequestKind()].Add(time.Since(t0))
|
||||
@@ -183,7 +185,7 @@ func isBenignConnTermination(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func writeShuckleResponse(log *lib.Logger, w io.Writer, resp msgs.ShuckleResponse) error {
|
||||
func writeShuckleResponse(log *log.Logger, w io.Writer, resp msgs.ShuckleResponse) error {
|
||||
// serialize
|
||||
bytes := bincode.Pack(resp)
|
||||
// write out
|
||||
@@ -202,7 +204,7 @@ func writeShuckleResponse(log *lib.Logger, w io.Writer, resp msgs.ShuckleRespons
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeShuckleResponseError(log *lib.Logger, w io.Writer, err msgs.TernError) error {
|
||||
func writeShuckleResponseError(log *log.Logger, w io.Writer, err msgs.TernError) error {
|
||||
log.Debug("writing shuckle error %v", err)
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
if err := binary.Write(buf, binary.LittleEndian, msgs.SHUCKLE_RESP_PROTOCOL_VERSION); err != nil {
|
||||
@@ -223,7 +225,7 @@ func writeShuckleResponseError(log *lib.Logger, w io.Writer, err msgs.TernError)
|
||||
|
||||
// returns whether the connection should be terminated
|
||||
func handleError(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
conn *net.TCPConn,
|
||||
err error,
|
||||
) bool {
|
||||
@@ -248,7 +250,7 @@ func handleError(
|
||||
}
|
||||
|
||||
func readShuckleRequest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
r io.Reader,
|
||||
) (msgs.ShuckleRequest, error) {
|
||||
var protocol uint32
|
||||
@@ -332,7 +334,7 @@ func readShuckleRequest(
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func handleRequest(log *lib.Logger, s *state, conn *net.TCPConn) {
|
||||
func handleRequest(log *log.Logger, s *state, conn *net.TCPConn) {
|
||||
defer conn.Close()
|
||||
|
||||
for {
|
||||
@@ -376,12 +378,12 @@ func noRunawayArgs() {
|
||||
}
|
||||
|
||||
// Writes stats to influx db.
|
||||
func sendMetrics(log *lib.Logger, st *state, influxDB *lib.InfluxDB) error {
|
||||
metrics := lib.MetricsBuilder{}
|
||||
func sendMetrics(l *log.Logger, st *state, influxDB *log.InfluxDB) error {
|
||||
metrics := log.MetricsBuilder{}
|
||||
rand := wyhash.New(rand.Uint64())
|
||||
alert := log.NewNCAlert(10 * time.Second)
|
||||
alert := l.NewNCAlert(10 * time.Second)
|
||||
for {
|
||||
log.Info("sending metrics")
|
||||
l.Info("sending metrics")
|
||||
metrics.Reset()
|
||||
now := time.Now()
|
||||
for _, req := range msgs.AllShuckleMessageKind {
|
||||
@@ -393,12 +395,12 @@ func sendMetrics(log *lib.Logger, st *state, influxDB *lib.InfluxDB) error {
|
||||
}
|
||||
err := influxDB.SendMetrics(metrics.Payload())
|
||||
if err == nil {
|
||||
log.ClearNC(alert)
|
||||
l.ClearNC(alert)
|
||||
sleepFor := time.Minute + time.Duration(rand.Uint64() & ^(uint64(1)<<63))%time.Minute
|
||||
log.Info("metrics sent, sleeping for %v", sleepFor)
|
||||
l.Info("metrics sent, sleeping for %v", sleepFor)
|
||||
time.Sleep(sleepFor)
|
||||
} else {
|
||||
log.RaiseNC(alert, "failed to send metrics, will try again in a second: %v", err)
|
||||
l.RaiseNC(alert, "failed to send metrics, will try again in a second: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
@@ -406,7 +408,7 @@ func sendMetrics(log *lib.Logger, st *state, influxDB *lib.InfluxDB) error {
|
||||
|
||||
func main() {
|
||||
|
||||
var addresses lib.StringArrayFlags
|
||||
var addresses flags.StringArrayFlags
|
||||
flag.Var(&addresses, "addr", "Addresses (up to two) to bind bincode server on.")
|
||||
logFile := flag.String("log-file", "", "File in which to write logs (or stdout)")
|
||||
verbose := flag.Bool("verbose", false, "")
|
||||
@@ -440,7 +442,7 @@ func main() {
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
var influxDB *lib.InfluxDB
|
||||
var influxDB *log.InfluxDB
|
||||
if *influxDBOrigin == "" {
|
||||
if *influxDBOrg != "" || *influxDBBucket != "" {
|
||||
fmt.Fprintf(os.Stderr, "Either all or none of the -influx-db flags must be passed\n")
|
||||
@@ -451,14 +453,14 @@ func main() {
|
||||
fmt.Fprintf(os.Stderr, "Either all or none of the -influx-db flags must be passed\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
influxDB = &lib.InfluxDB{
|
||||
influxDB = &log.InfluxDB{
|
||||
Origin: *influxDBOrigin,
|
||||
Org: *influxDBOrg,
|
||||
Bucket: *influxDBBucket,
|
||||
}
|
||||
}
|
||||
|
||||
ownIp1, ownPort1, err := lib.ParseIPV4Addr(addresses[0])
|
||||
ownIp1, ownPort1, err := flags.ParseIPV4Addr(addresses[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -466,7 +468,7 @@ func main() {
|
||||
var ownIp2 [4]byte
|
||||
var ownPort2 uint16
|
||||
if len(addresses) == 2 {
|
||||
ownIp2, ownPort2, err = lib.ParseIPV4Addr(addresses[1])
|
||||
ownIp2, ownPort2, err = flags.ParseIPV4Addr(addresses[1])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -477,18 +479,19 @@ func main() {
|
||||
var err error
|
||||
logOut, err = os.OpenFile(*logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("could not open log file %v: %v", *logFile, err)
|
||||
fmt.Printf("could not open log file %v: %v\n", *logFile, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
log := lib.NewLogger(logOut, &lib.LoggerOptions{Level: level, Syslog: *syslog, XmonAddr: *xmon, AppInstance: "eggsshuckleproxy", AppType: "restech_eggsfs.critical"})
|
||||
log := log.NewLogger(logOut, &log.LoggerOptions{Level: level, Syslog: *syslog, XmonAddr: *xmon, AppInstance: "eggsshuckleproxy", AppType: "restech_eggsfs.critical"})
|
||||
|
||||
log.Info("Running shuckle proxy with options:")
|
||||
log.Info(" addr = %v", addresses)
|
||||
@@ -546,7 +549,7 @@ func main() {
|
||||
var activeConnections int64
|
||||
startBincodeHandler := func(listener net.Listener) {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
@@ -561,7 +564,7 @@ func main() {
|
||||
go func() {
|
||||
defer func() {
|
||||
atomic.AddInt64(&activeConnections, -1)
|
||||
lib.HandleRecoverPanic(log, recover())
|
||||
lrecover.HandleRecoverChan(log, terminateChan, recover())
|
||||
}()
|
||||
handleRequest(log, state, conn.(*net.TCPConn))
|
||||
}()
|
||||
@@ -576,7 +579,7 @@ func main() {
|
||||
|
||||
if influxDB != nil {
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverPanic(log, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
sendMetrics(log, state, influxDB)
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"time"
|
||||
"xtx/ternfs/cleanup"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
)
|
||||
|
||||
func deleteDir(log *lib.Logger, client *client.Client, ownerId msgs.InodeId, name string, creationTime msgs.TernTime, dirId msgs.InodeId) {
|
||||
func deleteDir(log *log.Logger, client *client.Client, ownerId msgs.InodeId, name string, creationTime msgs.TernTime, dirId msgs.InodeId) {
|
||||
readDirReq := msgs.ReadDirReq{
|
||||
DirId: dirId,
|
||||
}
|
||||
@@ -47,7 +47,7 @@ func deleteDir(log *lib.Logger, client *client.Client, ownerId msgs.InodeId, nam
|
||||
}
|
||||
|
||||
func cleanupAfterTest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
shuckleAddress string,
|
||||
counters *client.ClientCounters,
|
||||
pauseBlockServiceKiller *sync.Mutex,
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"sort"
|
||||
"unsafe"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/wyhash"
|
||||
)
|
||||
@@ -159,7 +159,7 @@ func dirSeek(fd C.int, off C.long, whence C.int) (C.long, error) {
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func dirSeekTest(log *lib.Logger, shuckleAddress string, mountPoint string) {
|
||||
func dirSeekTest(log *log.Logger, shuckleAddress string, mountPoint string) {
|
||||
c, err := client.NewClient(log, nil, shuckleAddress, msgs.AddrsInfo{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/wyhash"
|
||||
)
|
||||
@@ -153,7 +154,7 @@ func checkCheckpoint(prefix string, files *fileHistoryFiles, allEdges []edge) {
|
||||
}
|
||||
}
|
||||
|
||||
func runCheckpoint(log *lib.Logger, client *client.Client, prefix string, files *fileHistoryFiles) fileHistoryCheckpoint {
|
||||
func runCheckpoint(log *log.Logger, client *client.Client, prefix string, files *fileHistoryFiles) fileHistoryCheckpoint {
|
||||
edges := readDir(log, client, msgs.ROOT_DIR_INODE_ID)
|
||||
checkCheckpoint(prefix, files, edges)
|
||||
resp := msgs.StatDirectoryResp{}
|
||||
@@ -163,7 +164,7 @@ func runCheckpoint(log *lib.Logger, client *client.Client, prefix string, files
|
||||
}
|
||||
}
|
||||
|
||||
func runStep(log *lib.Logger, client *client.Client, dirInfoCache *client.DirInfoCache, files *fileHistoryFiles, stepAny any) any {
|
||||
func runStep(log *log.Logger, client *client.Client, dirInfoCache *client.DirInfoCache, files *fileHistoryFiles, stepAny any) any {
|
||||
switch step := stepAny.(type) {
|
||||
case fileHistoryCreateFile:
|
||||
id, creationTime := createFile(log, client, dirInfoCache, msgs.ROOT_DIR_INODE_ID, 0, step.name, 0, 0, nil)
|
||||
@@ -253,7 +254,7 @@ func replayStep(prefix string, files *fileHistoryFiles, fullEdges []fullEdge, st
|
||||
}
|
||||
}
|
||||
|
||||
func fileHistoryStepSingle(log *lib.Logger, client *client.Client, dirInfoCache *client.DirInfoCache, opts *fileHistoryTestOpts, seed uint64, filePrefix string) {
|
||||
func fileHistoryStepSingle(log *log.Logger, client *client.Client, dirInfoCache *client.DirInfoCache, opts *fileHistoryTestOpts, seed uint64, filePrefix string) {
|
||||
// loop for n steps. at every step:
|
||||
// * if we have never reached the target files, then just create a file.
|
||||
// * if we have, create/delete/rename/rename with override at random.
|
||||
@@ -318,7 +319,7 @@ type fileHistoryTestOpts struct {
|
||||
}
|
||||
|
||||
func fileHistoryTest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
shuckleAddress string,
|
||||
opts *fileHistoryTestOpts,
|
||||
counters *client.ClientCounters,
|
||||
@@ -327,7 +328,7 @@ func fileHistoryTest(
|
||||
dirInfoCache := client.NewDirInfoCache()
|
||||
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
numTests := opts.threads
|
||||
if numTests > 15 {
|
||||
panic(fmt.Errorf("numTests %d too big for one-digit prefix", numTests))
|
||||
@@ -338,7 +339,7 @@ func fileHistoryTest(
|
||||
prefix := fmt.Sprintf("%x", i)
|
||||
seed := uint64(i)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
c, err := client.NewClient(log, nil, shuckleAddress, msgs.AddrsInfo{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -20,9 +20,10 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/cleanup"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
terns3 "xtx/ternfs/s3"
|
||||
"xtx/ternfs/wyhash"
|
||||
@@ -51,23 +52,23 @@ type fsTestOpts struct {
|
||||
}
|
||||
|
||||
type fsTestHarness[Id comparable] interface {
|
||||
createDirectory(log *lib.Logger, owner Id, name string) (Id, msgs.TernTime)
|
||||
rename(log *lib.Logger, isDirectory bool, targetId Id, oldOwner Id, oldCreationTime msgs.TernTime, oldName string, newOwner Id, newName string) (Id, msgs.TernTime)
|
||||
createFile(log *lib.Logger, owner Id, spanSize uint32, name string, size uint64, dataSeed uint64) (Id, msgs.TernTime)
|
||||
checkFileData(log *lib.Logger, id Id, size uint64, dataSeed uint64)
|
||||
createDirectory(log *log.Logger, owner Id, name string) (Id, msgs.TernTime)
|
||||
rename(log *log.Logger, isDirectory bool, targetId Id, oldOwner Id, oldCreationTime msgs.TernTime, oldName string, newOwner Id, newName string) (Id, msgs.TernTime)
|
||||
createFile(log *log.Logger, owner Id, spanSize uint32, name string, size uint64, dataSeed uint64) (Id, msgs.TernTime)
|
||||
checkFileData(log *log.Logger, id Id, size uint64, dataSeed uint64)
|
||||
// files, directories
|
||||
readDirectory(log *lib.Logger, dir Id) ([]string, []string)
|
||||
removeFile(log *lib.Logger, dir Id, name string)
|
||||
removeDirectory(log *lib.Logger, dir Id, name string)
|
||||
readDirectory(log *log.Logger, dir Id) ([]string, []string)
|
||||
removeFile(log *log.Logger, dir Id, name string)
|
||||
removeDirectory(log *log.Logger, dir Id, name string)
|
||||
}
|
||||
|
||||
type apiFsTestHarness struct {
|
||||
client *client.Client
|
||||
dirInfoCache *client.DirInfoCache
|
||||
readBufPool *lib.BufPool
|
||||
readBufPool *bufpool.BufPool
|
||||
}
|
||||
|
||||
func (c *apiFsTestHarness) createDirectory(log *lib.Logger, owner msgs.InodeId, name string) (id msgs.InodeId, creationTime msgs.TernTime) {
|
||||
func (c *apiFsTestHarness) createDirectory(log *log.Logger, owner msgs.InodeId, name string) (id msgs.InodeId, creationTime msgs.TernTime) {
|
||||
// TODO random parity
|
||||
req := msgs.MakeDirectoryReq{
|
||||
OwnerId: owner,
|
||||
@@ -79,7 +80,7 @@ func (c *apiFsTestHarness) createDirectory(log *lib.Logger, owner msgs.InodeId,
|
||||
}
|
||||
|
||||
func (c *apiFsTestHarness) rename(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
isDirectory bool,
|
||||
targetId msgs.InodeId,
|
||||
oldOwner msgs.InodeId,
|
||||
@@ -130,12 +131,12 @@ func (c *apiFsTestHarness) rename(
|
||||
}
|
||||
|
||||
func (c *apiFsTestHarness) createFile(
|
||||
log *lib.Logger, owner msgs.InodeId, spanSize uint32, name string, size uint64, dataSeed uint64,
|
||||
log *log.Logger, owner msgs.InodeId, spanSize uint32, name string, size uint64, dataSeed uint64,
|
||||
) (msgs.InodeId, msgs.TernTime) {
|
||||
return createFile(log, c.client, c.dirInfoCache, owner, spanSize, name, size, dataSeed, c.readBufPool)
|
||||
}
|
||||
|
||||
func (c *apiFsTestHarness) readDirectory(log *lib.Logger, dir msgs.InodeId) (files []string, dirs []string) {
|
||||
func (c *apiFsTestHarness) readDirectory(log *log.Logger, dir msgs.InodeId) (files []string, dirs []string) {
|
||||
edges := readDir(log, c.client, dir)
|
||||
for _, edge := range edges {
|
||||
if edge.targetId.Type() == msgs.DIRECTORY {
|
||||
@@ -181,7 +182,7 @@ func ensureLen(buf []byte, l int) []byte {
|
||||
return buf
|
||||
}
|
||||
|
||||
func (c *apiFsTestHarness) checkFileData(log *lib.Logger, id msgs.InodeId, size uint64, dataSeed uint64) {
|
||||
func (c *apiFsTestHarness) checkFileData(log *log.Logger, id msgs.InodeId, size uint64, dataSeed uint64) {
|
||||
actualData := readFile(log, c.readBufPool, c.client, id, size)
|
||||
defer c.readBufPool.Put(actualData)
|
||||
expectedData := c.readBufPool.Get(int(size))
|
||||
@@ -190,7 +191,7 @@ func (c *apiFsTestHarness) checkFileData(log *lib.Logger, id msgs.InodeId, size
|
||||
checkFileData(id, 0, int(size), actualData.Bytes(), expectedData.Bytes())
|
||||
}
|
||||
|
||||
func (c *apiFsTestHarness) removeFile(log *lib.Logger, ownerId msgs.InodeId, name string) {
|
||||
func (c *apiFsTestHarness) removeFile(log *log.Logger, ownerId msgs.InodeId, name string) {
|
||||
lookupResp := msgs.LookupResp{}
|
||||
if err := c.client.ShardRequest(log, ownerId.Shard(), &msgs.LookupReq{DirId: ownerId, Name: name}, &lookupResp); err != nil {
|
||||
panic(err)
|
||||
@@ -200,7 +201,7 @@ func (c *apiFsTestHarness) removeFile(log *lib.Logger, ownerId msgs.InodeId, nam
|
||||
}
|
||||
}
|
||||
|
||||
func (c *apiFsTestHarness) removeDirectory(log *lib.Logger, ownerId msgs.InodeId, name string) {
|
||||
func (c *apiFsTestHarness) removeDirectory(log *log.Logger, ownerId msgs.InodeId, name string) {
|
||||
lookupResp := msgs.LookupResp{}
|
||||
if err := c.client.ShardRequest(log, ownerId.Shard(), &msgs.LookupReq{DirId: ownerId, Name: name}, &lookupResp); err != nil {
|
||||
panic(err)
|
||||
@@ -215,10 +216,10 @@ var _ = (fsTestHarness[msgs.InodeId])((*apiFsTestHarness)(nil))
|
||||
type s3TestHarness struct {
|
||||
client *s3.Client
|
||||
bucket string
|
||||
bufPool *lib.BufPool
|
||||
bufPool *bufpool.BufPool
|
||||
}
|
||||
|
||||
func (c *s3TestHarness) createDirectory(log *lib.Logger, owner string, name string) (id string, creationTime msgs.TernTime) {
|
||||
func (c *s3TestHarness) createDirectory(log *log.Logger, owner string, name string) (id string, creationTime msgs.TernTime) {
|
||||
fullPath := path.Join(owner, name) + "/"
|
||||
_, err := c.client.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.bucket),
|
||||
@@ -231,7 +232,7 @@ func (c *s3TestHarness) createDirectory(log *lib.Logger, owner string, name stri
|
||||
return path.Join(owner, name), 0
|
||||
}
|
||||
|
||||
func (c *s3TestHarness) rename(log *lib.Logger, isDirectory bool, targetFullPath string, oldDir string, oldCreationTime msgs.TernTime, oldName string, newDir string, newName string) (string, msgs.TernTime) {
|
||||
func (c *s3TestHarness) rename(log *log.Logger, isDirectory bool, targetFullPath string, oldDir string, oldCreationTime msgs.TernTime, oldName string, newDir string, newName string) (string, msgs.TernTime) {
|
||||
if targetFullPath != path.Join(oldDir, oldName) {
|
||||
panic(fmt.Errorf("mismatching %v and %v", targetFullPath, path.Join(oldDir, oldName)))
|
||||
}
|
||||
@@ -275,7 +276,7 @@ func (c *s3TestHarness) rename(log *lib.Logger, isDirectory bool, targetFullPath
|
||||
return path.Join(newDir, newName), 0
|
||||
}
|
||||
|
||||
func (c *s3TestHarness) createFile(log *lib.Logger, owner string, spanSize uint32, name string, size uint64, dataSeed uint64) (string, msgs.TernTime) {
|
||||
func (c *s3TestHarness) createFile(log *log.Logger, owner string, spanSize uint32, name string, size uint64, dataSeed uint64) (string, msgs.TernTime) {
|
||||
fullPath := path.Join(owner, name)
|
||||
rand := wyhash.New(dataSeed)
|
||||
bodyBuf := c.bufPool.Get(int(size))
|
||||
@@ -293,7 +294,7 @@ func (c *s3TestHarness) createFile(log *lib.Logger, owner string, spanSize uint3
|
||||
return fullPath, 0
|
||||
}
|
||||
|
||||
func (c *s3TestHarness) checkFileData(log *lib.Logger, filePath string, size uint64, dataSeed uint64) {
|
||||
func (c *s3TestHarness) checkFileData(log *log.Logger, filePath string, size uint64, dataSeed uint64) {
|
||||
fullSize := int(size)
|
||||
expectedData := c.bufPool.Get(fullSize)
|
||||
defer c.bufPool.Put(expectedData)
|
||||
@@ -341,7 +342,7 @@ func (c *s3TestHarness) checkFileData(log *lib.Logger, filePath string, size uin
|
||||
checkFileData(filePath, 0, fullSize, actualData.Bytes(), expectedData.Bytes())
|
||||
}
|
||||
|
||||
func (c *s3TestHarness) readDirectory(log *lib.Logger, dir string) (files []string, directories []string) {
|
||||
func (c *s3TestHarness) readDirectory(log *log.Logger, dir string) (files []string, directories []string) {
|
||||
files = []string{}
|
||||
directories = []string{}
|
||||
|
||||
@@ -366,7 +367,7 @@ func (c *s3TestHarness) readDirectory(log *lib.Logger, dir string) (files []stri
|
||||
return files, directories
|
||||
}
|
||||
|
||||
func (c *s3TestHarness) removeFile(log *lib.Logger, dir string, name string) {
|
||||
func (c *s3TestHarness) removeFile(log *log.Logger, dir string, name string) {
|
||||
_, err := c.client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(c.bucket),
|
||||
Key: aws.String(path.Join(dir, name)),
|
||||
@@ -376,7 +377,7 @@ func (c *s3TestHarness) removeFile(log *lib.Logger, dir string, name string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *s3TestHarness) removeDirectory(log *lib.Logger, dir string, name string) {
|
||||
func (c *s3TestHarness) removeDirectory(log *log.Logger, dir string, name string) {
|
||||
_, err := c.client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(c.bucket),
|
||||
Key: aws.String(path.Join(dir, name) + "/"),
|
||||
@@ -389,13 +390,13 @@ func (c *s3TestHarness) removeDirectory(log *lib.Logger, dir string, name string
|
||||
var _ = (fsTestHarness[string])((*s3TestHarness)(nil))
|
||||
|
||||
type posixFsTestHarness struct {
|
||||
bufPool *lib.BufPool
|
||||
bufPool *bufpool.BufPool
|
||||
readWithMmap bool
|
||||
}
|
||||
|
||||
func (*posixFsTestHarness) createDirectory(log *lib.Logger, owner string, name string) (fullPath string, creationTime msgs.TernTime) {
|
||||
func (*posixFsTestHarness) createDirectory(l *log.Logger, owner string, name string) (fullPath string, creationTime msgs.TernTime) {
|
||||
fullPath = path.Join(owner, name)
|
||||
log.LogStack(1, lib.DEBUG, "posix mkdir %v", fullPath)
|
||||
l.LogStack(1, log.DEBUG, "posix mkdir %v", fullPath)
|
||||
if err := os.Mkdir(fullPath, 0777); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -403,7 +404,7 @@ func (*posixFsTestHarness) createDirectory(log *lib.Logger, owner string, name s
|
||||
}
|
||||
|
||||
func (*posixFsTestHarness) rename(
|
||||
log *lib.Logger,
|
||||
l *log.Logger,
|
||||
isDirectory bool,
|
||||
targetFullPath string,
|
||||
oldDir string,
|
||||
@@ -416,14 +417,14 @@ func (*posixFsTestHarness) rename(
|
||||
panic(fmt.Errorf("mismatching %v and %v", targetFullPath, path.Join(oldDir, oldName)))
|
||||
}
|
||||
newFullPath := path.Join(newDir, newName)
|
||||
log.LogStack(1, lib.DEBUG, "posix rename %v -> %v", targetFullPath, newFullPath)
|
||||
l.LogStack(1, log.DEBUG, "posix rename %v -> %v", targetFullPath, newFullPath)
|
||||
if err := os.Rename(targetFullPath, path.Join(newDir, newName)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newFullPath, 0
|
||||
}
|
||||
|
||||
func getInodeId(log *lib.Logger, path string) msgs.InodeId {
|
||||
func getInodeId(log *log.Logger, path string) msgs.InodeId {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -440,7 +441,7 @@ func getInodeId(log *lib.Logger, path string) msgs.InodeId {
|
||||
}
|
||||
|
||||
func (c *posixFsTestHarness) createFile(
|
||||
log *lib.Logger, dirFullPath string, spanSize uint32, name string, size uint64, dataSeed uint64,
|
||||
l *log.Logger, dirFullPath string, spanSize uint32, name string, size uint64, dataSeed uint64,
|
||||
) (fileFullPath string, t msgs.TernTime) {
|
||||
fileFullPath = path.Join(dirFullPath, name)
|
||||
|
||||
@@ -453,7 +454,7 @@ func (c *posixFsTestHarness) createFile(
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.LogStack(1, lib.DEBUG, "posix create file %v (%v size)", fileFullPath, size)
|
||||
l.LogStack(1, log.DEBUG, "posix create file %v (%v size)", fileFullPath, size)
|
||||
if size > 0 {
|
||||
// write in randomly sized chunks
|
||||
chunks := int(rand.Uint32()%10) + 1
|
||||
@@ -465,7 +466,7 @@ func (c *posixFsTestHarness) createFile(
|
||||
offsets[chunks] = int(size)
|
||||
sort.Ints(offsets)
|
||||
for i := 0; i < chunks; i++ {
|
||||
log.Debug("writing from %v to %v (pid %v)", offsets[i], offsets[i+1], os.Getpid())
|
||||
l.Debug("writing from %v to %v (pid %v)", offsets[i], offsets[i+1], os.Getpid())
|
||||
if _, err := f.Write(actualDataBuf.Bytes()[offsets[i]:offsets[i+1]]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -477,10 +478,10 @@ func (c *posixFsTestHarness) createFile(
|
||||
return fileFullPath, 0
|
||||
}
|
||||
|
||||
func (c *posixFsTestHarness) readDirectory(log *lib.Logger, dirFullPath string) (files []string, dirs []string) {
|
||||
log.LogStack(1, lib.DEBUG, "posix readdir for %v", dirFullPath)
|
||||
func (c *posixFsTestHarness) readDirectory(l *log.Logger, dirFullPath string) (files []string, dirs []string) {
|
||||
l.LogStack(1, log.DEBUG, "posix readdir for %v", dirFullPath)
|
||||
fileInfo, err := os.ReadDir(dirFullPath)
|
||||
log.LogStack(1, lib.DEBUG, "posix readdir for %v finished", dirFullPath)
|
||||
l.LogStack(1, log.DEBUG, "posix readdir for %v finished", dirFullPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -494,7 +495,7 @@ func (c *posixFsTestHarness) readDirectory(log *lib.Logger, dirFullPath string)
|
||||
return files, dirs
|
||||
}
|
||||
|
||||
func (c *posixFsTestHarness) checkFileData(log *lib.Logger, fullFilePath string, size uint64, dataSeed uint64) {
|
||||
func (c *posixFsTestHarness) checkFileData(log *log.Logger, fullFilePath string, size uint64, dataSeed uint64) {
|
||||
log.Debug("checking data for file %v tid(%d)", fullFilePath, syscall.Gettid())
|
||||
fullSize := int(size)
|
||||
expectedData := c.bufPool.Get(fullSize)
|
||||
@@ -566,11 +567,11 @@ func (c *posixFsTestHarness) checkFileData(log *lib.Logger, fullFilePath string,
|
||||
checkFileData(fullFilePath, 0, fullSize, actualData.Bytes(), expectedData.Bytes())
|
||||
}
|
||||
|
||||
func (c *posixFsTestHarness) removeFile(log *lib.Logger, ownerId string, name string) {
|
||||
func (c *posixFsTestHarness) removeFile(log *log.Logger, ownerId string, name string) {
|
||||
os.Remove(path.Join(ownerId, name))
|
||||
}
|
||||
|
||||
func (c *posixFsTestHarness) removeDirectory(log *lib.Logger, ownerId string, name string) {
|
||||
func (c *posixFsTestHarness) removeDirectory(log *log.Logger, ownerId string, name string) {
|
||||
os.Remove(path.Join(ownerId, name))
|
||||
}
|
||||
|
||||
@@ -630,7 +631,7 @@ func (s *fsTestState[Id]) dir(path []int) *fsTestDir[Id] {
|
||||
return s.rootDir.dir(path)
|
||||
}
|
||||
|
||||
func (state *fsTestState[Id]) incrementDirs(log *lib.Logger, opts *fsTestOpts) {
|
||||
func (state *fsTestState[Id]) incrementDirs(log *log.Logger, opts *fsTestOpts) {
|
||||
if state.totalDirs >= opts.numDirs {
|
||||
panic("ran out of dirs!")
|
||||
}
|
||||
@@ -640,7 +641,7 @@ func (state *fsTestState[Id]) incrementDirs(log *lib.Logger, opts *fsTestOpts) {
|
||||
}
|
||||
}
|
||||
|
||||
func (state *fsTestState[Id]) makeDir(log *lib.Logger, harness fsTestHarness[Id], opts *fsTestOpts, parent []int, name int) []int {
|
||||
func (state *fsTestState[Id]) makeDir(log *log.Logger, harness fsTestHarness[Id], opts *fsTestOpts, parent []int, name int) []int {
|
||||
state.incrementDirs(log, opts)
|
||||
dir := state.dir(parent)
|
||||
_, dirExists := dir.children.directories[name]
|
||||
@@ -661,7 +662,7 @@ func (state *fsTestState[Id]) makeDir(log *lib.Logger, harness fsTestHarness[Id]
|
||||
return path
|
||||
}
|
||||
|
||||
func (state *fsTestState[Id]) makeDirFromTemp(log *lib.Logger, harness fsTestHarness[Id], opts *fsTestOpts, parent []int, name int, tmpParent []int) []int {
|
||||
func (state *fsTestState[Id]) makeDirFromTemp(log *log.Logger, harness fsTestHarness[Id], opts *fsTestOpts, parent []int, name int, tmpParent []int) []int {
|
||||
dir := state.dir(parent)
|
||||
_, dirExists := dir.children.directories[name]
|
||||
if dirExists {
|
||||
@@ -688,7 +689,7 @@ func (state *fsTestState[Id]) makeDirFromTemp(log *lib.Logger, harness fsTestHar
|
||||
return path
|
||||
}
|
||||
|
||||
func (state *fsTestState[Id]) incrementFiles(log *lib.Logger, opts *fsTestOpts) {
|
||||
func (state *fsTestState[Id]) incrementFiles(log *log.Logger, opts *fsTestOpts) {
|
||||
if state.totalFiles >= opts.numFiles {
|
||||
panic("ran out of files!")
|
||||
}
|
||||
@@ -698,7 +699,7 @@ func (state *fsTestState[Id]) incrementFiles(log *lib.Logger, opts *fsTestOpts)
|
||||
}
|
||||
}
|
||||
|
||||
func (state *fsTestState[Id]) calcFileSize(log *lib.Logger, opts *fsTestOpts, rand *wyhash.Rand) (size uint64) {
|
||||
func (state *fsTestState[Id]) calcFileSize(log *log.Logger, opts *fsTestOpts, rand *wyhash.Rand) (size uint64) {
|
||||
p := rand.Float64()
|
||||
if p < opts.emptyFileProb || opts.maxFileSize == 0 {
|
||||
size = 0
|
||||
@@ -712,7 +713,7 @@ func (state *fsTestState[Id]) calcFileSize(log *lib.Logger, opts *fsTestOpts, ra
|
||||
return size
|
||||
}
|
||||
|
||||
func (state *fsTestState[Id]) makeFile(log *lib.Logger, harness fsTestHarness[Id], opts *fsTestOpts, rand *wyhash.Rand, dirPath []int, name int) {
|
||||
func (state *fsTestState[Id]) makeFile(log *log.Logger, harness fsTestHarness[Id], opts *fsTestOpts, rand *wyhash.Rand, dirPath []int, name int) {
|
||||
state.incrementFiles(log, opts)
|
||||
dir := state.dir(dirPath)
|
||||
_, dirExists := dir.children.directories[name]
|
||||
@@ -738,7 +739,7 @@ func (state *fsTestState[Id]) makeFile(log *lib.Logger, harness fsTestHarness[Id
|
||||
}
|
||||
}
|
||||
|
||||
func (state *fsTestState[Id]) makeFileFromTemp(log *lib.Logger, harness fsTestHarness[Id], opts *fsTestOpts, rand *wyhash.Rand, dirPath []int, name int, tmpDirPath []int) {
|
||||
func (state *fsTestState[Id]) makeFileFromTemp(log *log.Logger, harness fsTestHarness[Id], opts *fsTestOpts, rand *wyhash.Rand, dirPath []int, name int, tmpDirPath []int) {
|
||||
state.incrementFiles(log, opts)
|
||||
dir := state.dir(dirPath)
|
||||
_, dirExists := dir.children.directories[name]
|
||||
@@ -766,7 +767,7 @@ func (state *fsTestState[Id]) makeFileFromTemp(log *lib.Logger, harness fsTestHa
|
||||
}
|
||||
}
|
||||
|
||||
func (d *fsTestDir[Id]) check(log *lib.Logger, harness fsTestHarness[Id]) {
|
||||
func (d *fsTestDir[Id]) check(log *log.Logger, harness fsTestHarness[Id]) {
|
||||
files, dirs := harness.readDirectory(log, d.id)
|
||||
if len(files)+len(dirs) != len(d.children.files)+len(d.children.directories) {
|
||||
panic(fmt.Errorf("bad number of edges -- got %v + %v, expected %v + %v", len(files), len(dirs), len(d.children.files), len(d.children.files)))
|
||||
@@ -802,7 +803,7 @@ func (d *fsTestDir[Id]) check(log *lib.Logger, harness fsTestHarness[Id]) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *fsTestDir[Id]) clean(log *lib.Logger, harness fsTestHarness[Id]) {
|
||||
func (d *fsTestDir[Id]) clean(log *log.Logger, harness fsTestHarness[Id]) {
|
||||
files, dirs := harness.readDirectory(log, d.id)
|
||||
for _, fileName := range files {
|
||||
log.Debug("removing file %v", fileName)
|
||||
@@ -824,7 +825,7 @@ func (d *fsTestDir[Id]) clean(log *lib.Logger, harness fsTestHarness[Id]) {
|
||||
}
|
||||
|
||||
// Just the first block service id we can find
|
||||
func findBlockServiceToPurge(log *lib.Logger, client *client.Client) msgs.BlockServiceId {
|
||||
func findBlockServiceToPurge(log *log.Logger, client *client.Client) msgs.BlockServiceId {
|
||||
filesReq := msgs.VisitFilesReq{}
|
||||
filesResp := msgs.VisitFilesResp{}
|
||||
for {
|
||||
@@ -851,7 +852,7 @@ func findBlockServiceToPurge(log *lib.Logger, client *client.Client) msgs.BlockS
|
||||
|
||||
// returns how many blocks were corrupted
|
||||
func corruptFiles(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
shuckleAddress string,
|
||||
c *client.Client,
|
||||
opts *fsTestOpts,
|
||||
@@ -957,7 +958,7 @@ func corruptFiles(
|
||||
}
|
||||
|
||||
func fsTestInternal[Id comparable](
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
c *client.Client,
|
||||
state *fsTestState[Id],
|
||||
shuckleAddress string,
|
||||
@@ -1134,7 +1135,7 @@ func fsTestInternal[Id comparable](
|
||||
options := &cleanup.DefragOptions{
|
||||
WorkersPerShard: 5,
|
||||
}
|
||||
if err := cleanup.DefragFiles(log, c, lib.NewBufPool(), client.NewDirInfoCache(), stats, alert, options, "/"); err != nil {
|
||||
if err := cleanup.DefragFiles(log, c, bufpool.NewBufPool(), client.NewDirInfoCache(), stats, alert, options, "/"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if stats.DefraggedSpans == 0 {
|
||||
@@ -1259,7 +1260,7 @@ func (s3Harness) isHarness() {}
|
||||
func (apiHarness) isHarness() {}
|
||||
|
||||
func fsTest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
shuckleAddress string,
|
||||
opts *fsTestOpts,
|
||||
counters *client.ClientCounters,
|
||||
@@ -1275,7 +1276,7 @@ func fsTest(
|
||||
switch h := harnessType.(type) {
|
||||
case posixHarness:
|
||||
harness := &posixFsTestHarness{
|
||||
bufPool: lib.NewBufPool(),
|
||||
bufPool: bufpool.NewBufPool(),
|
||||
readWithMmap: opts.readWithMmap,
|
||||
}
|
||||
state := fsTestState[string]{
|
||||
@@ -1287,7 +1288,7 @@ func fsTest(
|
||||
harness := &apiFsTestHarness{
|
||||
client: c,
|
||||
dirInfoCache: client.NewDirInfoCache(),
|
||||
readBufPool: lib.NewBufPool(),
|
||||
readBufPool: bufpool.NewBufPool(),
|
||||
}
|
||||
state := fsTestState[msgs.InodeId]{
|
||||
totalDirs: 1, // root dir
|
||||
@@ -1300,7 +1301,7 @@ func fsTest(
|
||||
panic(err)
|
||||
}
|
||||
port := listener.Addr().(*net.TCPAddr).Port
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
server := terns3.NewS3Server(log, c, bufPool, client.NewDirInfoCache(), map[string]string{"bucket": "/"}, "")
|
||||
go http.Serve(listener, server)
|
||||
cfg, err := s3config.LoadDefaultConfig(context.TODO(),
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/wyhash"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@ type largeFileTestOpts struct {
|
||||
}
|
||||
|
||||
func largeFileTest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
opts *largeFileTestOpts,
|
||||
mountPoint string,
|
||||
) {
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/wyhash"
|
||||
)
|
||||
@@ -19,7 +20,7 @@ type createInode struct {
|
||||
}
|
||||
|
||||
func parallelDirsTest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
shuckleAddress string,
|
||||
counters *client.ClientCounters,
|
||||
) {
|
||||
@@ -59,7 +60,7 @@ func parallelDirsTest(
|
||||
tid := i
|
||||
inodes[tid] = make(map[string]createInode)
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
rand := wyhash.New(uint64(tid))
|
||||
for i := 0; i < actionsPerThread; i++ {
|
||||
which := rand.Float64()
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
)
|
||||
|
||||
type preadddirOpts struct {
|
||||
@@ -16,7 +16,7 @@ type preadddirOpts struct {
|
||||
}
|
||||
|
||||
func preaddirCheck(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
thread int,
|
||||
mountPoint string,
|
||||
opts *preadddirOpts,
|
||||
@@ -48,7 +48,7 @@ func preaddirCheck(
|
||||
}
|
||||
|
||||
func preaddirTest(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
mountPoint string,
|
||||
opts *preadddirOpts,
|
||||
) {
|
||||
|
||||
@@ -3,8 +3,9 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/wyhash"
|
||||
)
|
||||
@@ -21,14 +22,14 @@ type fullEdge struct {
|
||||
creationTime msgs.TernTime
|
||||
}
|
||||
|
||||
func shardReq(log *lib.Logger, client *client.Client, shid msgs.ShardId, reqBody msgs.ShardRequest, respBody msgs.ShardResponse) {
|
||||
func shardReq(log *log.Logger, client *client.Client, shid msgs.ShardId, reqBody msgs.ShardRequest, respBody msgs.ShardResponse) {
|
||||
err := client.ShardRequest(log, shid, reqBody, respBody)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func cdcReq(log *lib.Logger, client *client.Client, reqBody msgs.CDCRequest, respBody msgs.CDCResponse) {
|
||||
func cdcReq(log *log.Logger, client *client.Client, reqBody msgs.CDCRequest, respBody msgs.CDCResponse) {
|
||||
err := client.CDCRequest(log, reqBody, respBody)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -42,7 +43,7 @@ func (noopReader) Read(bs []byte) (int, error) {
|
||||
}
|
||||
|
||||
func createFile(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
client *client.Client,
|
||||
dirInfoCache *client.DirInfoCache,
|
||||
dirId msgs.InodeId,
|
||||
@@ -50,7 +51,7 @@ func createFile(
|
||||
name string,
|
||||
size uint64,
|
||||
dataSeed uint64,
|
||||
bufPool *lib.BufPool,
|
||||
bufPool *bufpool.BufPool,
|
||||
) (id msgs.InodeId, creationTime msgs.TernTime) {
|
||||
// construct
|
||||
constructReq := msgs.ConstructFileReq{
|
||||
@@ -100,7 +101,7 @@ func createFile(
|
||||
return constructResp.Id, linkResp.CreationTime
|
||||
}
|
||||
|
||||
func readFile(log *lib.Logger, bufPool *lib.BufPool, client *client.Client, id msgs.InodeId, size uint64) *lib.Buf {
|
||||
func readFile(log *log.Logger, bufPool *bufpool.BufPool, client *client.Client, id msgs.InodeId, size uint64) *bufpool.Buf {
|
||||
buf := bufPool.Get(int(size))
|
||||
r, err := client.ReadFile(log, bufPool, id)
|
||||
if err != nil {
|
||||
@@ -124,7 +125,7 @@ func readFile(log *lib.Logger, bufPool *lib.BufPool, client *client.Client, id m
|
||||
return buf
|
||||
}
|
||||
|
||||
func readDir(log *lib.Logger, client *client.Client, dir msgs.InodeId) []edge {
|
||||
func readDir(log *log.Logger, client *client.Client, dir msgs.InodeId) []edge {
|
||||
req := msgs.ReadDirReq{
|
||||
DirId: dir,
|
||||
StartHash: 0,
|
||||
@@ -147,7 +148,7 @@ func readDir(log *lib.Logger, client *client.Client, dir msgs.InodeId) []edge {
|
||||
return edges
|
||||
}
|
||||
|
||||
func fullReadDir(log *lib.Logger, client *client.Client, dirId msgs.InodeId) []fullEdge {
|
||||
func fullReadDir(log *log.Logger, client *client.Client, dirId msgs.InodeId) []fullEdge {
|
||||
req := msgs.FullReadDirReq{
|
||||
DirId: msgs.ROOT_DIR_INODE_ID,
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
"xtx/ternfs/wyhash"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ func dirName(mountPoint string, i int) string {
|
||||
}
|
||||
|
||||
func rsyncTest(
|
||||
log *lib.Logger,
|
||||
l *log.Logger,
|
||||
opts *rsyncTestOpts,
|
||||
mountPoint string,
|
||||
) {
|
||||
@@ -57,15 +57,15 @@ func rsyncTest(
|
||||
}
|
||||
// rsync into mountpoint
|
||||
rsyncCmd := exec.Command("rsync", "-rv", tmpDir1+"/", mountPoint+"/")
|
||||
rsyncCmd.Stdout = log.Sink(lib.INFO)
|
||||
rsyncCmd.Stderr = log.Sink(lib.INFO)
|
||||
rsyncCmd.Stdout = l.Sink(log.INFO)
|
||||
rsyncCmd.Stderr = l.Sink(log.INFO)
|
||||
if err := rsyncCmd.Run(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// diff directories
|
||||
diffCmd := exec.Command("diff", "-rq", tmpDir1, mountPoint)
|
||||
diffCmd.Stdout = log.Sink(lib.INFO)
|
||||
diffCmd.Stderr = log.Sink(lib.INFO)
|
||||
diffCmd.Stdout = l.Sink(log.INFO)
|
||||
diffCmd.Stderr = l.Sink(log.INFO)
|
||||
if err := diffCmd.Run(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,10 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"xtx/ternfs/bufpool"
|
||||
"xtx/ternfs/client"
|
||||
"xtx/ternfs/lib"
|
||||
"xtx/ternfs/log"
|
||||
lrecover "xtx/ternfs/log/recover"
|
||||
"xtx/ternfs/managedprocess"
|
||||
"xtx/ternfs/msgs"
|
||||
"xtx/ternfs/wyhash"
|
||||
@@ -104,7 +106,7 @@ func (r *RunTests) print(format string, a ...any) error {
|
||||
}
|
||||
|
||||
func (r *RunTests) test(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
name string,
|
||||
extra string,
|
||||
run func(counters *client.ClientCounters),
|
||||
@@ -245,9 +247,9 @@ func (i *cfgOverrides) flag(k string) bool {
|
||||
|
||||
func (r *RunTests) run(
|
||||
terminateChan chan any,
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
) {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
c, err := client.NewClient(log, nil, r.shuckleAddress(), msgs.AddrsInfo{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -593,14 +595,14 @@ func (r *RunTests) run(
|
||||
"",
|
||||
func(counters *client.ClientCounters) {
|
||||
numThreads := 10000
|
||||
bufPool := lib.NewBufPool()
|
||||
bufPool := bufpool.NewBufPool()
|
||||
dirInfoCache := client.NewDirInfoCache()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numThreads)
|
||||
for i := 0; i < numThreads; i++ {
|
||||
ti := i
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
if _, err := c.CreateFile(log, bufPool, dirInfoCache, fmt.Sprintf("/%d", ti), bytes.NewReader([]byte{})); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -705,7 +707,7 @@ type blockServiceVictim struct {
|
||||
}
|
||||
|
||||
func (bsv *blockServiceVictim) start(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
blocksExe string,
|
||||
shucklePort uint16,
|
||||
port1 uint16,
|
||||
@@ -729,7 +731,7 @@ func (bsv *blockServiceVictim) start(
|
||||
}
|
||||
|
||||
func killBlockServices(
|
||||
log *lib.Logger,
|
||||
log *log.Logger,
|
||||
terminateChan chan any,
|
||||
stopChan chan struct{},
|
||||
pause *sync.Mutex,
|
||||
@@ -752,7 +754,7 @@ func killBlockServices(
|
||||
log.Info("will kill block service for %v", killDuration)
|
||||
rand := wyhash.New(uint64(time.Now().UnixNano()))
|
||||
go func() {
|
||||
defer func() { lib.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
defer func() { lrecover.HandleRecoverChan(log, terminateChan, recover()) }()
|
||||
for {
|
||||
// pick and kill the victim
|
||||
pause.Lock()
|
||||
@@ -937,17 +939,17 @@ func main() {
|
||||
}
|
||||
defer logOut.Close()
|
||||
}
|
||||
level := lib.INFO
|
||||
level := log.INFO
|
||||
if *verbose {
|
||||
level = lib.DEBUG
|
||||
level = log.DEBUG
|
||||
}
|
||||
if *trace {
|
||||
level = lib.TRACE
|
||||
level = log.TRACE
|
||||
}
|
||||
log := lib.NewLogger(logOut, &lib.LoggerOptions{Level: level, Syslog: false, PrintQuietAlerts: true})
|
||||
l := log.NewLogger(logOut, &log.LoggerOptions{Level: level, Syslog: false, PrintQuietAlerts: true})
|
||||
|
||||
if *mtu > 0 {
|
||||
log.Info("Setting MTU to %v", *mtu)
|
||||
l.Info("Setting MTU to %v", *mtu)
|
||||
client.SetMTU(*mtu)
|
||||
}
|
||||
|
||||
@@ -966,8 +968,8 @@ func main() {
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("building shard/cdc/blockservice/shuckle\n")
|
||||
cppExes = managedprocess.BuildCppExes(log, *repoDir, *buildType)
|
||||
goExes = managedprocess.BuildGoExes(log, *repoDir, *race)
|
||||
cppExes = managedprocess.BuildCppExes(l, *repoDir, *buildType)
|
||||
goExes = managedprocess.BuildGoExes(l, *repoDir, *race)
|
||||
}
|
||||
|
||||
terminateChan := make(chan any, 1)
|
||||
@@ -1001,7 +1003,7 @@ func main() {
|
||||
}
|
||||
isQEMU := strings.TrimSpace(string(dmiOut)) == "QEMU"
|
||||
if isQEMU {
|
||||
log.Info("increasing metadata timeout since we're in QEMU")
|
||||
l.Info("increasing metadata timeout since we're in QEMU")
|
||||
sysctl("fs.eggsfs.overall_shard_timeout_ms", "60000")
|
||||
sysctl("fs.eggsfs.overall_cdc_timeout_ms", "60000")
|
||||
}
|
||||
@@ -1045,7 +1047,7 @@ func main() {
|
||||
if *blockServiceKiller {
|
||||
shuckleOpts.Stale = time.Hour * 1000 // never, so that we stimulate the clients ability to fallback
|
||||
}
|
||||
procs.StartShuckle(log, shuckleOpts)
|
||||
procs.StartShuckle(l, shuckleOpts)
|
||||
|
||||
failureDomains := 14 + 4 // so that any 4 can fail and we can still do everything.
|
||||
hddBlockServices := 10
|
||||
@@ -1066,7 +1068,7 @@ func main() {
|
||||
storageClasses: storageClasses,
|
||||
}
|
||||
procId := bsv.start(
|
||||
log,
|
||||
l,
|
||||
goExes.BlocksExe,
|
||||
shucklePort,
|
||||
0, 0,
|
||||
@@ -1084,7 +1086,7 @@ func main() {
|
||||
|
||||
// wait for block services first, so we know that shards will immediately have all of them
|
||||
fmt.Printf("waiting for block services for %v...\n", waitShuckleFor)
|
||||
blockServices := client.WaitForBlockServices(log, fmt.Sprintf("127.0.0.1:%v", shucklePort), failureDomains*(hddBlockServices+flashBlockServices), true, waitShuckleFor)
|
||||
blockServices := client.WaitForBlockServices(l, fmt.Sprintf("127.0.0.1:%v", shucklePort), failureDomains*(hddBlockServices+flashBlockServices), true, waitShuckleFor)
|
||||
blockServicesPorts := make(map[msgs.FailureDomain]struct {
|
||||
_1 uint16
|
||||
_2 uint16
|
||||
@@ -1127,7 +1129,7 @@ func main() {
|
||||
// apparently 100ms is too little when running with valgrind
|
||||
cdcOpts.ShardTimeout = time.Millisecond * 500
|
||||
}
|
||||
procs.StartCDC(log, *repoDir, cdcOpts)
|
||||
procs.StartCDC(l, *repoDir, cdcOpts)
|
||||
}
|
||||
|
||||
// Start shards
|
||||
@@ -1159,20 +1161,20 @@ func main() {
|
||||
shopts.LogsDBFlags = []string{"-logsdb-leader"}
|
||||
}
|
||||
}
|
||||
procs.StartShard(log, *repoDir, &shopts)
|
||||
procs.StartShard(l, *repoDir, &shopts)
|
||||
}
|
||||
}
|
||||
|
||||
// now wait for shards/cdc
|
||||
fmt.Printf("waiting for shards/cdc for %v...\n", waitShuckleFor)
|
||||
client.WaitForClient(log, fmt.Sprintf("127.0.0.1:%v", shucklePort), waitShuckleFor)
|
||||
client.WaitForClient(l, fmt.Sprintf("127.0.0.1:%v", shucklePort), waitShuckleFor)
|
||||
|
||||
var stopBlockServiceKiller chan struct{}
|
||||
var pauseBlockServiceKiller sync.Mutex
|
||||
if *blockServiceKiller {
|
||||
fmt.Printf("will kill block services\n")
|
||||
stopBlockServiceKiller = make(chan struct{}, 1)
|
||||
killBlockServices(log, terminateChan, stopBlockServiceKiller, &pauseBlockServiceKiller, goExes.BlocksExe, shucklePort, *profile, procs, blockServicesProcs, blockServicesPorts)
|
||||
killBlockServices(l, terminateChan, stopBlockServiceKiller, &pauseBlockServiceKiller, goExes.BlocksExe, shucklePort, *profile, procs, blockServicesProcs, blockServicesPorts)
|
||||
// stop before trying to clean up data dir etc.
|
||||
defer func() {
|
||||
stopBlockServiceKiller <- struct{}{}
|
||||
@@ -1180,7 +1182,7 @@ func main() {
|
||||
}()
|
||||
}
|
||||
|
||||
fuseMountPoint := procs.StartFuse(log, &managedprocess.FuseOpts{
|
||||
fuseMountPoint := procs.StartFuse(l, &managedprocess.FuseOpts{
|
||||
Exe: goExes.FuseExe,
|
||||
Path: path.Join(*dataDir, "fuse"),
|
||||
LogLevel: level,
|
||||
@@ -1200,9 +1202,9 @@ func main() {
|
||||
}
|
||||
mountKmod(shuckleAddress, mountPoint)
|
||||
defer func() {
|
||||
log.Info("about to unmount kmod mount")
|
||||
l.Info("about to unmount kmod mount")
|
||||
out, err := exec.Command("sudo", "umount", mountPoint).CombinedOutput()
|
||||
log.Info("done unmounting")
|
||||
l.Info("done unmounting")
|
||||
if err != nil {
|
||||
fmt.Printf("could not umount fs (%v): %s", err, out)
|
||||
}
|
||||
@@ -1273,7 +1275,7 @@ func main() {
|
||||
pauseBlockServiceKiller: &pauseBlockServiceKiller,
|
||||
kmsgFd: kfd,
|
||||
}
|
||||
r.run(terminateChan, log)
|
||||
r.run(terminateChan, l)
|
||||
}()
|
||||
|
||||
// wait for things to finish
|
||||
@@ -1283,7 +1285,7 @@ func main() {
|
||||
}
|
||||
|
||||
// fsck everything
|
||||
log.Info("stopping cluster and fscking it")
|
||||
l.Info("stopping cluster and fscking it")
|
||||
procsClosed = true
|
||||
procs.Close()
|
||||
{
|
||||
@@ -1295,7 +1297,7 @@ func main() {
|
||||
if !strings.Contains(subDir.Name(), "shard") {
|
||||
continue
|
||||
}
|
||||
log.Info("fscking %q", path.Join(*dataDir, subDir.Name(), "db"))
|
||||
l.Info("fscking %q", path.Join(*dataDir, subDir.Name(), "db"))
|
||||
cmd := exec.Command(cppExes.DBToolsExe, "fsck", path.Join(*dataDir, subDir.Name(), "db"))
|
||||
if err := cmd.Run(); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package timing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package timing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package timing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package timing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -1,4 +1,4 @@
|
||||
package lib
|
||||
package timing
|
||||
|
||||
import (
|
||||
"math"
|
||||
Reference in New Issue
Block a user