From ac99f10f94ff308ae3a1504479728ecebbb569f7 Mon Sep 17 00:00:00 2001 From: Francesco Mazzoli Date: Wed, 11 Jan 2023 15:22:45 +0000 Subject: [PATCH] Add artificial packet drop to integration tests... ...and fixup many places in the code to allow for such drops to happen somewhat smoothly. --- c/eggs_msgs.h | 57 +- cpp/CDC.cpp | 58 +- cpp/CDC.hpp | 3 + cpp/CDCDB.cpp | 1399 +++++++++++++++---------- cpp/CDCDB.hpp | 24 +- cpp/CDCDBData.hpp | 127 +-- cpp/Crypto.cpp | 29 +- cpp/MsgsGen.cpp | 799 ++++++++------ cpp/MsgsGen.hpp | 282 +++-- cpp/Shard.cpp | 34 +- cpp/Shard.hpp | 4 + cpp/ShardDB.cpp | 251 +++-- cpp/eggs-cdc.app.cpp | 12 +- cpp/eggs-shard.app.cpp | 23 +- cpp/tests.app.cpp | 11 +- go/bincodegen/bincodegen.go | 79 +- go/cli/cli.go | 22 +- go/eggs/cdcreq.go | 265 +++-- go/eggs/client.go | 163 +-- go/eggs/gc.go | 39 +- go/eggs/managedprocess.go | 76 +- go/eggs/migrate.go | 12 +- go/eggs/shard_test.go | 6 +- go/eggs/shardreq.go | 285 +++-- go/gcdaemon/gcdaemon.go | 10 +- go/integrationtest/cleanup.go | 47 +- go/integrationtest/filehistory.go | 121 ++- go/integrationtest/fstest.go | 189 +++- go/integrationtest/harness.go | 45 +- go/integrationtest/integrationtest.go | 202 ++-- go/msgs/msgs.go | 196 +++- go/msgs/msgs_bincode.go | 665 +++++++++--- go/runeggs/runeggs.go | 8 + go/shuckle/shuckle.go | 8 + python/error.py | 2 - python/msgs.py | 378 ++++--- tests.sh | 3 + 37 files changed, 3883 insertions(+), 2051 deletions(-) diff --git a/c/eggs_msgs.h b/c/eggs_msgs.h index c5e63a16..18ab81a8 100644 --- a/c/eggs_msgs.h +++ b/c/eggs_msgs.h @@ -10,31 +10,31 @@ #define EGGSFS_ERR_FILE_NOT_FOUND 17 #define EGGSFS_ERR_DIRECTORY_NOT_FOUND 18 #define EGGSFS_ERR_NAME_NOT_FOUND 19 -#define EGGSFS_ERR_TYPE_IS_DIRECTORY 20 -#define EGGSFS_ERR_TYPE_IS_NOT_DIRECTORY 21 -#define EGGSFS_ERR_BAD_COOKIE 22 -#define EGGSFS_ERR_INCONSISTENT_STORAGE_CLASS_PARITY 23 -#define EGGSFS_ERR_LAST_SPAN_STATE_NOT_CLEAN 24 -#define EGGSFS_ERR_COULD_NOT_PICK_BLOCK_SERVICES 25 -#define EGGSFS_ERR_BAD_SPAN_BODY 26 -#define EGGSFS_ERR_SPAN_NOT_FOUND 27 -#define EGGSFS_ERR_BLOCK_SERVICE_NOT_FOUND 28 -#define EGGSFS_ERR_CANNOT_CERTIFY_BLOCKLESS_SPAN 29 -#define EGGSFS_ERR_BAD_NUMBER_OF_BLOCKS_PROOFS 30 -#define EGGSFS_ERR_BAD_BLOCK_PROOF 31 -#define EGGSFS_ERR_CANNOT_OVERRIDE_NAME 32 -#define EGGSFS_ERR_NAME_IS_LOCKED 33 -#define EGGSFS_ERR_OLD_NAME_IS_LOCKED 34 -#define EGGSFS_ERR_NEW_NAME_IS_LOCKED 35 +#define EGGSFS_ERR_EDGE_NOT_FOUND 20 +#define EGGSFS_ERR_EDGE_IS_LOCKED 21 +#define EGGSFS_ERR_TYPE_IS_DIRECTORY 22 +#define EGGSFS_ERR_TYPE_IS_NOT_DIRECTORY 23 +#define EGGSFS_ERR_BAD_COOKIE 24 +#define EGGSFS_ERR_INCONSISTENT_STORAGE_CLASS_PARITY 25 +#define EGGSFS_ERR_LAST_SPAN_STATE_NOT_CLEAN 26 +#define EGGSFS_ERR_COULD_NOT_PICK_BLOCK_SERVICES 27 +#define EGGSFS_ERR_BAD_SPAN_BODY 28 +#define EGGSFS_ERR_SPAN_NOT_FOUND 29 +#define EGGSFS_ERR_BLOCK_SERVICE_NOT_FOUND 30 +#define EGGSFS_ERR_CANNOT_CERTIFY_BLOCKLESS_SPAN 31 +#define EGGSFS_ERR_BAD_NUMBER_OF_BLOCKS_PROOFS 32 +#define EGGSFS_ERR_BAD_BLOCK_PROOF 33 +#define EGGSFS_ERR_CANNOT_OVERRIDE_NAME 34 +#define EGGSFS_ERR_NAME_IS_LOCKED 35 #define EGGSFS_ERR_MTIME_IS_TOO_RECENT 36 #define EGGSFS_ERR_MISMATCHING_TARGET 37 #define EGGSFS_ERR_MISMATCHING_OWNER 38 -#define EGGSFS_ERR_DIRECTORY_NOT_EMPTY 39 -#define EGGSFS_ERR_FILE_IS_TRANSIENT 40 -#define EGGSFS_ERR_OLD_DIRECTORY_NOT_FOUND 41 -#define EGGSFS_ERR_NEW_DIRECTORY_NOT_FOUND 42 -#define EGGSFS_ERR_LOOP_IN_DIRECTORY_RENAME 43 -#define EGGSFS_ERR_EDGE_NOT_FOUND 44 +#define EGGSFS_ERR_MISMATCHING_CREATION_TIME 39 +#define EGGSFS_ERR_DIRECTORY_NOT_EMPTY 40 +#define EGGSFS_ERR_FILE_IS_TRANSIENT 41 +#define EGGSFS_ERR_OLD_DIRECTORY_NOT_FOUND 42 +#define EGGSFS_ERR_NEW_DIRECTORY_NOT_FOUND 43 +#define EGGSFS_ERR_LOOP_IN_DIRECTORY_RENAME 44 #define EGGSFS_ERR_DIRECTORY_HAS_OWNER 45 #define EGGSFS_ERR_FILE_IS_NOT_TRANSIENT 46 #define EGGSFS_ERR_FILE_NOT_EMPTY 47 @@ -46,10 +46,10 @@ #define EGGSFS_ERR_MORE_RECENT_SNAPSHOT_EDGE 53 #define EGGSFS_ERR_MORE_RECENT_CURRENT_EDGE 54 #define EGGSFS_ERR_BAD_DIRECTORY_INFO 55 -#define EGGSFS_ERR_CREATION_TIME_TOO_RECENT 56 -#define EGGSFS_ERR_DEADLINE_NOT_PASSED 57 -#define EGGSFS_ERR_SAME_SOURCE_AND_DESTINATION 58 -#define EGGSFS_ERR_SAME_DIRECTORIES 59 +#define EGGSFS_ERR_DEADLINE_NOT_PASSED 56 +#define EGGSFS_ERR_SAME_SOURCE_AND_DESTINATION 57 +#define EGGSFS_ERR_SAME_DIRECTORIES 58 +#define EGGSFS_ERR_SAME_SHARD 59 #define EGGSFS_META_LOOKUP 0x1 #define EGGSFS_META_STAT_FILE 0x2 @@ -64,12 +64,13 @@ #define EGGSFS_META_FILE_SPANS 0xD #define EGGSFS_META_SAME_DIRECTORY_RENAME 0xE #define EGGSFS_META_SET_DIRECTORY_INFO 0xF +#define EGGSFS_META_SNAPSHOT_LOOKUP 0x9 #define EGGSFS_META_VISIT_DIRECTORIES 0x15 #define EGGSFS_META_VISIT_FILES 0x20 #define EGGSFS_META_VISIT_TRANSIENT_FILES 0x16 #define EGGSFS_META_FULL_READ_DIR 0x21 #define EGGSFS_META_REMOVE_NON_OWNED_EDGE 0x17 -#define EGGSFS_META_INTRA_SHARD_HARD_FILE_UNLINK 0x18 +#define EGGSFS_META_SAME_SHARD_HARD_FILE_UNLINK 0x18 #define EGGSFS_META_REMOVE_SPAN_INITIATE 0x19 #define EGGSFS_META_REMOVE_SPAN_CERTIFY 0x1A #define EGGSFS_META_SWAP_BLOCKS 0x22 @@ -89,5 +90,5 @@ #define EGGSFS_CDC_SOFT_UNLINK_DIRECTORY 0x3 #define EGGSFS_CDC_RENAME_DIRECTORY 0x4 #define EGGSFS_CDC_HARD_UNLINK_DIRECTORY 0x5 -#define EGGSFS_CDC_HARD_UNLINK_FILE 0x6 +#define EGGSFS_CDC_CROSS_SHARD_HARD_UNLINK_FILE 0x6 diff --git a/cpp/CDC.cpp b/cpp/CDC.cpp index b0aed993..cbd48c62 100644 --- a/cpp/CDC.cpp +++ b/cpp/CDC.cpp @@ -13,6 +13,7 @@ #include "Bincode.hpp" #include "CDC.hpp" #include "CDCDB.hpp" +#include "Env.hpp" #include "Exception.hpp" #include "Msgs.hpp" #include "MsgsGen.hpp" @@ -21,6 +22,7 @@ #include "CDCDB.hpp" #include "Crypto.hpp" #include "CDCKey.hpp" +#include "splitmix64.hpp" static uint16_t CDC_PORT = 36137; @@ -48,6 +50,9 @@ private: CDCReqContainer _cdcReqContainer; ShardRespContainer _shardRespContainer; CDCStep _step; + uint64_t _shardRequestIdCounter; + uint64_t _packetDropRand; + uint64_t _packetDropProbability; // probability * 10,000 std::array _socks; AES128Key _expandedCDCKey; // The requests we've enqueued, but haven't completed yet, with @@ -57,13 +62,21 @@ private: std::optional _inFlightShardReq; public: - CDCServer(Logger& logger, CDCDB& db) : + CDCServer(Logger& logger, const CDCOptions& options, CDCDB& db) : _env(logger, "req_server"), _stop(false), _db(db), _recvBuf(UDP_MTU), - _sendBuf(UDP_MTU) + _sendBuf(UDP_MTU), + _shardRequestIdCounter(0), + _packetDropRand(CDC_PORT), + _packetDropProbability(0) { + if (options.simulatePacketDrop != 0.0) { + LOG_INFO(_env, "will drop %s%% of packets", options.simulatePacketDrop*100.0); + _packetDropProbability = options.simulatePacketDrop * 10'000.0; + ALWAYS_ASSERT(_packetDropProbability > 0 && _packetDropProbability < 10'000); + } _currentLogIndex = _db.lastAppliedLogEntry(); memset(&_socks[0], 0, sizeof(_socks)); expandKey(CDCKey, _expandedCDCKey); @@ -149,14 +162,14 @@ public: break; } - // timeout after 5 secs - if (_inFlightShardReq && (eggsNow() - _inFlightShardReq->sentAt) > 5'000'000'000ull) { - _handleShardError(_inFlightShardReq->shid, EggsError::TIMEOUT); + // timeout after 100ms + if (_inFlightShardReq && (eggsNow() - _inFlightShardReq->sentAt) > 100'000'000ull) { _inFlightShardReq.reset(); + _handleShardError(_inFlightShardReq->shid, EggsError::TIMEOUT); } - // 10ms timeout for prompt termination and for timeouts - int nfds = epoll_wait(epoll, events, _socks.size(), 10 /* milliseconds */); + // 1ms timeout for prompt termination and for shard resps timeouts + int nfds = epoll_wait(epoll, events, _socks.size(), 1 /*milliseconds*/); if (nfds < 0) { throw SYSCALL_EXCEPTION("epoll_wait"); } @@ -191,7 +204,12 @@ private: if (read < 0) { throw SYSCALL_EXCEPTION("recvfrom"); } - LOG_DEBUG(_env, "received message from %s", clientAddr); + LOG_DEBUG(_env, "received CDC request from %s", clientAddr); + + if (splitmix64(_packetDropRand) % 10'000 < _packetDropProbability) { + LOG_DEBUG(_env, "artificially dropping packet"); + continue; + } BincodeBuf reqBbuf(&_recvBuf[0], read); @@ -205,7 +223,7 @@ private: continue; } - LOG_DEBUG(_env, "received request id %s, kind %s, from %s", reqHeader.requestId, reqHeader.kind, clientAddr); + LOG_DEBUG(_env, "received request id %s, kind %s", reqHeader.requestId, reqHeader.kind); // If this will be filled in with an actual code, it means that we couldn't process // the request. @@ -229,7 +247,7 @@ private: if (err == NO_ERROR) { // If things went well, process the request - LOG_DEBUG(_env, "cdc request %s successfully parsed, will now process", _cdcReqContainer.kind()); + LOG_DEBUG(_env, "CDC request %s successfully parsed, will now process", _cdcReqContainer.kind()); uint64_t txnId = _db.processCDCReq(true, eggsNow(), _advanceLogIndex(), _cdcReqContainer, _step); auto& inFlight = _inFlightTxns[txnId]; inFlight.cdcRequestId = reqHeader.requestId; @@ -257,6 +275,11 @@ private: } LOG_DEBUG(_env, "received response from shard %s", shid); + + if (splitmix64(_packetDropRand) % 10'000 < _packetDropProbability) { + LOG_DEBUG(_env, "artificially dropping packet"); + continue; + } BincodeBuf reqBbuf(&_recvBuf[0], read); @@ -265,10 +288,12 @@ private: respHeader.unpack(reqBbuf); } catch (BincodeException err) { LOG_ERROR(_env, "%s\nstacktrace:\n%s", err.what(), err.getStackTrace()); - RAISE_ALERT(_env, "could not parse response header from shard %s, dropping it.", shid); + RAISE_ALERT(_env, "could not parse response header, dropping response"); continue; } + LOG_DEBUG(_env, "received response id %s, kind %s", respHeader.requestId, respHeader.kind); + // Note that below we just let the BincodeExceptions propagate upwards since we // control all the code in this codebase, and the header is good, and we're a // bit lazy. @@ -325,7 +350,7 @@ private: RAISE_ALERT(_env, "txn %s, req id %s, finished with error %s", step.txnFinished, inFlight->second.cdcRequestId, step.err); _sendError(inFlight->second.cdcRequestId, step.err, inFlight->second.clientAddr); } else { - LOG_DEBUG(_env, "sending response back to %s", inFlight->second.clientAddr); + LOG_DEBUG(_env, "sending response with req id %s, kind %s, back to %s", inFlight->second.cdcRequestId, inFlight->second.kind, inFlight->second.clientAddr); BincodeBuf bbuf(&_sendBuf[0], _sendBuf.size()); CDCResponseHeader respHeader(inFlight->second.cdcRequestId, inFlight->second.kind); respHeader.pack(bbuf); @@ -338,10 +363,10 @@ private: if (step.txnNeedsShard != 0) { LOG_DEBUG(_env, "txn %s needs shard %s, req %s", step.txnNeedsShard, step.shardReq.shid, step.shardReq.req); BincodeBuf bbuf(&_sendBuf[0], _sendBuf.size()); - auto t = eggsNow(); // Header ShardRequestHeader shardReqHeader; - shardReqHeader.requestId = t.ns; + shardReqHeader.requestId = _shardRequestIdCounter; + _shardRequestIdCounter++; shardReqHeader.kind = step.shardReq.req.kind(); shardReqHeader.pack(bbuf); // Body @@ -356,12 +381,13 @@ private: shardAddr.sin_family = AF_INET; shardAddr.sin_port = htons(step.shardReq.shid.port()); inet_aton("127.0.0.1", &shardAddr.sin_addr); + LOG_DEBUG(_env, "sending request with req id %s", shardReqHeader.requestId); _send(_socks[(int)step.shardReq.shid.u8 + 1], shardAddr, (const char*)bbuf.data, bbuf.len()); // Record the in-flight req ALWAYS_ASSERT(!_inFlightShardReq); auto& inFlight = _inFlightShardReq.emplace(); inFlight.shardRequestId = shardReqHeader.requestId; - inFlight.sentAt = t; + inFlight.sentAt = eggsNow(); inFlight.txnId = step.txnNeedsShard; inFlight.shid = step.shardReq.shid; } @@ -417,7 +443,7 @@ void runCDC(const std::string& dbDir, const CDCOptions& options) { CDCDB db(logger, dbDir); { - auto server = std::make_unique(logger, db); + auto server = std::make_unique(logger, options, db); pthread_t tid; if (pthread_create(&tid, nullptr, &runCDCServer, &*server) != 0) { throw SYSCALL_EXCEPTION("pthread_create"); diff --git a/cpp/CDC.hpp b/cpp/CDC.hpp index 01e7d30e..04056757 100644 --- a/cpp/CDC.hpp +++ b/cpp/CDC.hpp @@ -5,6 +5,9 @@ struct CDCOptions { LogLevel level = LogLevel::LOG_INFO; std::string logFile = ""; // if empty, stdout + // If non-zero, packets will be dropped with this probability. Useful to test + // resilience of the system. + double simulatePacketDrop = 0.0; }; void runCDC(const std::string& dbDir, const CDCOptions& options); \ No newline at end of file diff --git a/cpp/CDCDB.cpp b/cpp/CDCDB.cpp index f9fb7b93..84c65f31 100644 --- a/cpp/CDCDB.cpp +++ b/cpp/CDCDB.cpp @@ -102,7 +102,7 @@ std::ostream& operator<<(std::ostream& out, const CDCStep& x) { out << "txnNeedsShard=" << x.txnNeedsShard << ", shardReq=" << x.shardReq; } if (x.nextTxn != 0) { - if (x.txnFinished != 0 && x.txnNeedsShard != 0) { + if (x.txnFinished != 0 || x.txnNeedsShard != 0) { out << ", "; } out << "nextTxn=" << x.nextTxn; @@ -111,47 +111,851 @@ std::ostream& operator<<(std::ostream& out, const CDCStep& x) { return out; } -enum class MakeDirectoryStep : uint8_t { - START = 0, - AFTER_CREATE_DIR = 1, - AFTER_CREATE_LOCKED_EDGE = 2, - AFTER_UNLOCK_EDGE = 3, - AFTER_ROLLBACK = 4, +inline bool createCurrentLockedEdgeRetry(EggsError err) { + return + err == EggsError::TIMEOUT || err == EggsError::MTIME_IS_TOO_RECENT || + err == EggsError::MORE_RECENT_SNAPSHOT_EDGE || err == EggsError::MORE_RECENT_CURRENT_EDGE; +} + +inline bool createCurrentLockedEdgeFatal(EggsError err) { + return + err == EggsError::DIRECTORY_NOT_FOUND || err == EggsError::CANNOT_OVERRIDE_NAME; +} + +struct StateMachineEnv { + rocksdb::OptimisticTransactionDB* db; + rocksdb::ColumnFamilyHandle* defaultCf; + rocksdb::ColumnFamilyHandle* parentCf; + rocksdb::Transaction& dbTxn; + EggsTime time; + uint64_t txnId; + uint8_t txnStep; + CDCStep& cdcStep; + + StateMachineEnv( + rocksdb::OptimisticTransactionDB* db_, rocksdb::ColumnFamilyHandle* defaultCf_, rocksdb::ColumnFamilyHandle* parentCf_, rocksdb::Transaction& dbTxn_, EggsTime time_, uint64_t txnId_, uint8_t step_, CDCStep& cdcStep_ + ): + db(db_), defaultCf(defaultCf_), parentCf(parentCf_), dbTxn(dbTxn_), time(time_), txnId(txnId_), txnStep(step_), cdcStep(cdcStep_) + {} + + InodeId nextDirectoryId() { + std::string v; + ROCKS_DB_CHECKED(db->Get({}, defaultCf, cdcMetadataKey(&NEXT_DIRECTORY_ID_KEY), &v)); + ExternalValue nextId(v); + InodeId id = nextId().id(); + nextId().setId(InodeId::FromU64(id.u64 + 1)); + ROCKS_DB_CHECKED(dbTxn.Put(defaultCf, cdcMetadataKey(&NEXT_DIRECTORY_ID_KEY), nextId.toSlice())); + return id; + } + + ShardReqContainer& needsShard(uint8_t step, ShardId shid) { + txnStep = step; + cdcStep.txnFinished = 0; + cdcStep.txnNeedsShard = txnId; + cdcStep.shardReq.shid = shid; + return cdcStep.shardReq.req; + } + + CDCRespContainer& finish() { + cdcStep.txnFinished = txnId; + cdcStep.err = NO_ERROR; + return cdcStep.resp; + } + + void finishWithError(EggsError err) { + ALWAYS_ASSERT(err != NO_ERROR); + cdcStep.txnFinished = txnId; + cdcStep.err = err; + cdcStep.txnNeedsShard = 0; + } }; -enum class HardUnlinkDirectoryStep : uint8_t { - START = 0, - AFTER_REMOVE_INODE = 1, +constexpr uint8_t TXN_START = 0; + +enum MakeDirectoryStep : uint8_t { + MAKE_DIRECTORY_LOOKUP = 1, + MAKE_DIRECTORY_CREATE_DIR = 2, + MAKE_DIRECTORY_CREATE_LOCKED_EDGE = 3, + MAKE_DIRECTORY_UNLOCK_EDGE = 4, + MAKE_DIRECTORY_ROLLBACK = 5, }; -enum class RenameFileStep : uint8_t { - START = 0, - AFTER_LOCK_OLD_EDGE = 1, - AFTER_CREATE_NEW_LOCKED_EDGE = 2, - AFTER_UNLOCK_NEW_EDGE = 3, - AFTER_UNLOCK_OLD_EDGE = 4, - AFTER_ROLLBACK = 5, +// Steps: +// +// 1. Lookup if an existing directory exists. If it does, immediately succeed. +// 2. Allocate inode id here in the CDC +// 3. Create directory in shard we get from the inode +// 4. Create locked edge from owner to newly created directory +// 5. Unlock the edge created in 3 +// +// If 4 fails, 3 must be rolled back. 5 does not fail. +// +// 1 is necessary rather than failing on attempted override because otherwise failures +// due to repeated calls are indistinguishable from genuine failures. +struct MakeDirectoryStateMachine { + StateMachineEnv& env; + const MakeDirectoryReq& req; + MakeDirectoryState state; + + MakeDirectoryStateMachine(StateMachineEnv& env_, const MakeDirectoryReq& req_, MakeDirectoryState state_): + env(env_), req(req_), state(state_) + {} + + void resume(EggsError err, const ShardRespContainer* resp) { + ALWAYS_ASSERT((err == NO_ERROR && resp == nullptr) == (env.txnStep == TXN_START)); + switch (env.txnStep) { + case TXN_START: start(); break; + case MAKE_DIRECTORY_LOOKUP: afterLookup(err, resp); break; + case MAKE_DIRECTORY_CREATE_DIR: afterCreateDirectoryInode(err, resp); break; + case MAKE_DIRECTORY_CREATE_LOCKED_EDGE: afterCreateLockedEdge(err, resp); break; + case MAKE_DIRECTORY_UNLOCK_EDGE: afterUnlockEdge(err, resp); break; + case MAKE_DIRECTORY_ROLLBACK: afterRollback(err, resp); break; + default: throw EGGS_EXCEPTION("bad step %s", env.txnStep); + } + } + + void start() { + state.setDirId(env.nextDirectoryId()); + lookup(); + } + + void lookup() { + auto& shardReq = env.needsShard(MAKE_DIRECTORY_LOOKUP, req.ownerId.shard()).setLookup(); + shardReq.dirId = req.ownerId; + shardReq.name = req.name; + } + + void afterLookup(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + lookup(); // retry + } else if (err == EggsError::DIRECTORY_NOT_FOUND) { + env.finishWithError(err); + } else if (err == EggsError::NAME_NOT_FOUND) { + // normal case, let's proceed + createDirectoryInode(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + const auto& lookupResp = resp->getLookup(); + if (lookupResp.targetId.type() == InodeType::DIRECTORY) { + // we're good already + auto& cdcResp = env.finish().setMakeDirectory(); + cdcResp.creationTime = lookupResp.creationTime; + cdcResp.id = lookupResp.targetId; + } else { + env.finishWithError(EggsError::CANNOT_OVERRIDE_NAME); + } + } + } + + void createDirectoryInode() { + auto& shardReq = env.needsShard(MAKE_DIRECTORY_CREATE_DIR, state.dirId().shard()).setCreateDirectoryInode(); + shardReq.id = state.dirId(); + shardReq.info.inherited = req.info.inherited; + shardReq.info.body = req.info.body; + shardReq.ownerId = req.ownerId; + } + + void afterCreateDirectoryInode(EggsError shardRespError, const ShardRespContainer* shardResp) { + if (shardRespError == EggsError::TIMEOUT) { + // Try again -- note that the call to create directory inode is idempotent. + createDirectoryInode(); + } else { + ALWAYS_ASSERT(shardRespError == NO_ERROR); + createLockedEdge(); + } + } + + void createLockedEdge() { + auto& shardReq = env.needsShard(MAKE_DIRECTORY_CREATE_LOCKED_EDGE, req.ownerId.shard()).setCreateLockedCurrentEdge(); + shardReq.dirId = req.ownerId; + shardReq.targetId = state.dirId(); + shardReq.name = req.name; + } + + void afterCreateLockedEdge(EggsError err, const ShardRespContainer* resp) { + if (createCurrentLockedEdgeRetry(err)) { + createLockedEdge(); // try again + } else if (createCurrentLockedEdgeFatal(err)) { + // can't go forward + state.setExitError(err); + rollback(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + state.setCreationTime(resp->getCreateLockedCurrentEdge().creationTime); + unlockEdge(); + } + } + + void unlockEdge() { + auto& shardReq = env.needsShard(MAKE_DIRECTORY_UNLOCK_EDGE, req.ownerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.ownerId; + shardReq.name = req.name; + shardReq.targetId = state.dirId(); + shardReq.wasMoved = false; + shardReq.creationTime = state.creationTime(); + } + + void afterUnlockEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + // retry + unlockEdge(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + // We're done, record the parent relationship and finish + { + auto k = InodeIdKey::Static(state.dirId()); + auto v = InodeIdKey::Static(req.ownerId); + ROCKS_DB_CHECKED(env.dbTxn.Put(env.parentCf, k.toSlice(), v.toSlice())); + } + auto& resp = env.finish().setMakeDirectory(); + resp.id = state.dirId(); + resp.creationTime = state.creationTime(); + } + } + + void rollback() { + auto& shardReq = env.needsShard(MAKE_DIRECTORY_ROLLBACK, state.dirId().shard()).setRemoveDirectoryOwner(); + shardReq.dirId = state.dirId(); + // we've just created this directory, it is empty, therefore the policy + // is irrelevant. + shardReq.info = defaultDirectoryInfo(); + } + + void afterRollback(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + rollback(); // retry + } else { + ALWAYS_ASSERT(err == NO_ERROR); + env.finishWithError(state.exitError()); + } + } }; -enum class SoftUnlinkDirectoryStep : uint8_t { - START = 0, - AFTER_LOCK_EDGE = 1, - AFTER_STAT = 2, - AFTER_REMOVE_OWNER = 3, - AFTER_UNLOCK_EDGE = 4, - AFTER_ROLLBACK = 5, +enum HardUnlinkDirectoryStep : uint8_t { + HARD_UNLINK_DIRECTORY_REMOVE_INODE = 1, }; -enum class RenameDirectoryStep : uint8_t { - START = 0, - AFTER_LOCK_OLD_EDGE = 1, - AFTER_CREATE_LOCKED_EDGE = 2, - AFTER_UNLOCK_NEW_EDGE = 3, - AFTER_UNLOCK_OLD_EDGE = 4, - AFTER_SET_OWNER = 5, - AFTER_ROLLBACK = 6, +// The only reason we have this here is for possible conflicts with RemoveDirectoryOwner, +// which might temporarily set the owner of a directory to NULL. Since in the current +// implementation we only ever have one transaction in flight in the CDC, we can just +// execute this. +struct HardUnlinkDirectoryStateMachine { + StateMachineEnv& env; + const HardUnlinkDirectoryReq& req; + HardUnlinkDirectoryState state; + + HardUnlinkDirectoryStateMachine(StateMachineEnv& env_, const HardUnlinkDirectoryReq& req_, HardUnlinkDirectoryState state_): + env(env_), req(req_), state(state_) + {} + + void resume(EggsError err, const ShardRespContainer* resp) { + ALWAYS_ASSERT((err == NO_ERROR && resp == nullptr) == (env.txnStep == TXN_START)); + switch (env.txnStep) { + case TXN_START: removeInode(); break; + case HARD_UNLINK_DIRECTORY_REMOVE_INODE: afterRemoveInode(err, resp); break; + default: throw EGGS_EXCEPTION("bad step %s", env.txnStep); + } + } + + void removeInode() { + auto& shardReq = env.needsShard(HARD_UNLINK_DIRECTORY_REMOVE_INODE, req.dirId.shard()).setRemoveInode(); + shardReq.id = req.dirId; + } + + void afterRemoveInode(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + removeInode(); // try again + } else if ( + err == EggsError::DIRECTORY_NOT_FOUND || err == EggsError::DIRECTORY_HAS_OWNER || err == EggsError::DIRECTORY_NOT_EMPTY + ) { + env.finishWithError(err); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + env.finish().setHardUnlinkDirectory(); + } + } }; +enum RenameFileStep : uint8_t { + RENAME_FILE_LOCK_OLD_EDGE = 1, + RENAME_FILE_CREATE_NEW_LOCKED_EDGE = 2, + RENAME_FILE_UNLOCK_NEW_EDGE = 3, + RENAME_FILE_UNLOCK_OLD_EDGE = 4, + RENAME_FILE_ROLLBACK = 5, +}; + +// Steps: +// +// 1. lock source current edge +// 2. create destination locked current target edge +// 3. unlock edge in step 2 +// 4. unlock source target current edge, and soft unlink it +// +// If we fail at step 2, we need to roll back step 1. Steps 3 and 4 should never fail. +struct RenameFileStateMachine { + StateMachineEnv& env; + const RenameFileReq& req; + RenameFileState state; + + RenameFileStateMachine(StateMachineEnv& env_, const RenameFileReq& req_, RenameFileState state_): + env(env_), req(req_), state(state_) + {} + + void resume(EggsError err, const ShardRespContainer* resp) { + ALWAYS_ASSERT((err == NO_ERROR && resp == nullptr) == (env.txnStep == TXN_START)); + switch (env.txnStep) { + case TXN_START: start(); break; + case RENAME_FILE_LOCK_OLD_EDGE: afterLockOldEdge(err, resp); break; + case RENAME_FILE_CREATE_NEW_LOCKED_EDGE: afterCreateNewLockedEdge(err, resp); break; + case RENAME_FILE_UNLOCK_NEW_EDGE: afterUnlockNewEdge(err, resp); break; + case RENAME_FILE_UNLOCK_OLD_EDGE: afterUnlockOldEdge(err, resp); break; + case RENAME_FILE_ROLLBACK: afterRollback(err, resp); break; + default: throw EGGS_EXCEPTION("bad step %s", env.txnStep); + } + } + + void start() { + // We need this explicit check here because moving directories is more complicated, + // and therefore we do it in another transaction type entirely. + if (req.targetId.type() == InodeType::DIRECTORY) { + env.finishWithError(EggsError::TYPE_IS_NOT_DIRECTORY); + } else if (req.oldOwnerId == req.newOwnerId) { + env.finishWithError(EggsError::SAME_DIRECTORIES); + } else { + lockOldEdge(); + } + } + + void lockOldEdge() { + auto& shardReq = env.needsShard(RENAME_FILE_LOCK_OLD_EDGE, req.oldOwnerId.shard()).setLockCurrentEdge(); + shardReq.dirId = req.oldOwnerId; + shardReq.name = req.oldName; + shardReq.targetId = req.targetId; + shardReq.creationTime = req.oldCreationTime; + } + + void afterLockOldEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + lockOldEdge(); // retry + } else if ( + err == EggsError::EDGE_NOT_FOUND || err == EggsError::MISMATCHING_CREATION_TIME || err == EggsError::DIRECTORY_NOT_FOUND + ) { + // We failed hard and we have nothing to roll back + if (err == EggsError::DIRECTORY_NOT_FOUND) { + err = EggsError::OLD_DIRECTORY_NOT_FOUND; + } + env.finishWithError(err); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + createNewLockedEdge(); + } + } + + void createNewLockedEdge() { + auto& shardReq = env.needsShard(RENAME_FILE_CREATE_NEW_LOCKED_EDGE, req.newOwnerId.shard()).setCreateLockedCurrentEdge(); + shardReq.dirId = req.newOwnerId; + shardReq.name = req.newName; + shardReq.targetId = req.targetId; + } + + void afterCreateNewLockedEdge(EggsError err, const ShardRespContainer* resp) { + if (createCurrentLockedEdgeRetry(err)) { + createNewLockedEdge(); // retry + } else if (createCurrentLockedEdgeFatal(err)) { + if (err == EggsError::DIRECTORY_NOT_FOUND) { + err = EggsError::NEW_DIRECTORY_NOT_FOUND; + } + // we failed hard and we need to rollback + state.setExitError(err); + rollback(); + } else { + state.setNewCreationTime(resp->getCreateLockedCurrentEdge().creationTime); + unlockNewEdge(); + } + } + + void unlockNewEdge() { + auto& shardReq = env.needsShard(RENAME_FILE_UNLOCK_NEW_EDGE, req.newOwnerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.newOwnerId; + shardReq.targetId = req.targetId; + shardReq.name = req.newName; + shardReq.wasMoved = false; + shardReq.creationTime = state.newCreationTime(); + } + + void afterUnlockNewEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + unlockNewEdge(); // retry + } else { + ALWAYS_ASSERT(err == NO_ERROR); + unlockOldEdge(); + } + } + + void unlockOldEdge() { + // We're done creating the destination edge, now unlock the source, marking it as moved + auto& shardReq = env.needsShard(RENAME_FILE_UNLOCK_OLD_EDGE, req.oldOwnerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.oldOwnerId; + shardReq.targetId = req.targetId; + shardReq.name = req.oldName; + shardReq.wasMoved = true; + shardReq.creationTime = req.oldCreationTime; + } + + void afterUnlockOldEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + unlockOldEdge(); // retry + } else { + // This can only be because of repeated calls from here: we have the edge locked, + // and only the CDC does changes. + // TODO it would be cleaner to verify this with a lookup + ALWAYS_ASSERT(err == NO_ERROR || err == EggsError::EDGE_NOT_FOUND); + // we're finally done + auto& resp = env.finish().setRenameFile(); + resp.creationTime = state.newCreationTime(); + } + } + + void rollback() { + auto& shardReq = env.needsShard(RENAME_FILE_ROLLBACK, req.oldOwnerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.oldOwnerId; + shardReq.name = req.oldName; + shardReq.targetId = req.targetId; + shardReq.wasMoved = false; + shardReq.creationTime = state.newCreationTime(); + } + + void afterRollback(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + rollback(); // retry + } else { + ALWAYS_ASSERT(err == NO_ERROR); + env.finishWithError(state.exitError()); + } + } +}; + +enum SoftUnlinkDirectoryStep : uint8_t { + SOFT_UNLINK_DIRECTORY_LOCK_EDGE = 1, + SOFT_UNLINK_DIRECTORY_STAT = 2, + SOFT_UNLINK_DIRECTORY_REMOVE_OWNER = 3, + SOFT_UNLINK_DIRECTORY_UNLOCK_EDGE = 4, + SOFT_UNLINK_DIRECTORY_ROLLBACK = 5, +}; + +// Steps: +// +// 1. Lock edge going into the directory to remove. This prevents things making +// making it snapshot or similar in the meantime. +// 2. Resolve the directory info, since we'll need to store it when we remove the directory owner. +// 3. Remove directory owner from directory that we want to remove. This will fail if there still +// are current edges there. +// 4. Unlock edge going into the directory, making it snapshot. +// +// If 2 or 3 fail, we need to roll back the locking, without making the edge snapshot. +struct SoftUnlinkDirectoryStateMachine { + StateMachineEnv& env; + const SoftUnlinkDirectoryReq& req; + SoftUnlinkDirectoryState state; + + SoftUnlinkDirectoryStateMachine(StateMachineEnv& env_, const SoftUnlinkDirectoryReq& req_, SoftUnlinkDirectoryState state_): + env(env_), req(req_), state(state_) + {} + + void resume(EggsError err, const ShardRespContainer* resp) { + ALWAYS_ASSERT((err == NO_ERROR && resp == nullptr) == (env.txnStep == TXN_START)); + switch (env.txnStep) { + case TXN_START: start(); break; + case SOFT_UNLINK_DIRECTORY_LOCK_EDGE: afterLockEdge(err, resp); break; + case SOFT_UNLINK_DIRECTORY_STAT: afterStat(err, resp); break; + case SOFT_UNLINK_DIRECTORY_REMOVE_OWNER: afterRemoveOwner(err, resp); break; + case SOFT_UNLINK_DIRECTORY_UNLOCK_EDGE: afterUnlockEdge(err, resp); break; + case SOFT_UNLINK_DIRECTORY_ROLLBACK: afterRollback(err, resp); break; + default: throw EGGS_EXCEPTION("bad step %s", env.txnStep); + } + } + + void start() { + if (req.targetId.type() != InodeType::DIRECTORY) { + env.finishWithError(EggsError::TYPE_IS_NOT_DIRECTORY); + } else { + lockEdge(); + } + } + + void lockEdge() { + auto& shardReq = env.needsShard(SOFT_UNLINK_DIRECTORY_LOCK_EDGE, req.ownerId.shard()).setLockCurrentEdge(); + shardReq.dirId = req.ownerId; + shardReq.name = req.name; + shardReq.targetId = req.targetId; + shardReq.creationTime = req.creationTime; + } + + void afterLockEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + lockEdge(); + } else if (err == EggsError::MISMATCHING_CREATION_TIME || err == EggsError::EDGE_NOT_FOUND) { + env.finishWithError(err); // no rollback to be done + } else { + ALWAYS_ASSERT(err == NO_ERROR); + state.setStatDirId(req.targetId); + stat(); + } + } + + void stat() { + auto& shardReq = env.needsShard(SOFT_UNLINK_DIRECTORY_STAT, state.statDirId().shard()).setStatDirectory(); + shardReq.id = state.statDirId(); + } + + void afterStat(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + stat(); // retry + } else { + ALWAYS_ASSERT(err == NO_ERROR); + const auto& statResp = resp->getStatDirectory(); + if (statResp.info.size() > 0) { + auto& shardReq = env.needsShard(SOFT_UNLINK_DIRECTORY_REMOVE_OWNER, req.targetId.shard()).setRemoveDirectoryOwner(); + shardReq.dirId = req.targetId; + shardReq.info = statResp.info; + } else { + ALWAYS_ASSERT(statResp.owner != NULL_INODE_ID); + // keep walking upwards + state.setStatDirId(statResp.owner); + stat(); + } + } + } + + void afterRemoveOwner(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + // we don't want to keep the dir info around start again from the last stat + stat(); + } else if (err == EggsError::DIRECTORY_NOT_EMPTY) { + state.setExitError(err); + rollback(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + unlockEdge(); + } + } + + void unlockEdge() { + auto& shardReq = env.needsShard(SOFT_UNLINK_DIRECTORY_UNLOCK_EDGE, req.ownerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.ownerId; + shardReq.name = req.name; + shardReq.targetId = req.targetId; + // Note that here we used wasMoved even if the subsequent + // snapshot edge will be non-owned, since we're dealing with + // a directory, rather than a file. + shardReq.wasMoved = true; + shardReq.creationTime = req.creationTime; + } + + void afterUnlockEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + unlockEdge(); + } else { + // This can only be because of repeated calls from here: we have the edge locked, + // and only the CDC does changes. + // TODO it would be cleaner to verify this with a lookup + ALWAYS_ASSERT(err == NO_ERROR || err == EggsError::EDGE_NOT_FOUND); + auto& cdcResp = env.finish().setSoftUnlinkDirectory(); + // Update parent map + { + auto k = InodeIdKey::Static(req.targetId); + ROCKS_DB_CHECKED(env.dbTxn.Delete(env.parentCf, k.toSlice())); + } + } + } + + void rollback() { + auto& shardReq = env.needsShard(SOFT_UNLINK_DIRECTORY_ROLLBACK, req.ownerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.ownerId; + shardReq.name = req.name; + shardReq.targetId = req.targetId; + shardReq.wasMoved = false; + } + + void afterRollback(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + rollback(); + } else { + // This can only be because of repeated calls from here: we have the edge locked, + // and only the CDC does changes. + // TODO it would be cleaner to verify this with a lookup + ALWAYS_ASSERT(err == NO_ERROR || err == EggsError::EDGE_NOT_FOUND); + env.finishWithError(state.exitError()); + } + } +}; + +enum RenameDirectoryStep : uint8_t { + RENAME_DIRECTORY_LOCK_OLD_EDGE = 1, + RENAME_DIRECTORY_CREATE_LOCKED_NEW_EDGE = 2, + RENAME_DIRECTORY_UNLOCK_NEW_EDGE = 3, + RENAME_DIRECTORY_UNLOCK_OLD_EDGE = 4, + RENAME_DIRECTORY_SET_OWNER = 5, + RENAME_DIRECTORY_ROLLBACK = 6, +}; + +// Steps: +// +// 1. Make sure there's no loop by traversing the parents +// 2. Lock old edge +// 3. Create and lock the new edge +// 4. Unlock the new edge +// 5. Unlock and unlink the old edge +// 6. Update the owner of the moved directory to the new directory +// +// If we fail at step 3, we need to unlock the edge we locked at step 2. Step 4 and 5 +// should never fail. +struct RenameDirectoryStateMachine { + StateMachineEnv& env; + const RenameDirectoryReq& req; + RenameDirectoryState state; + + RenameDirectoryStateMachine(StateMachineEnv& env_, const RenameDirectoryReq& req_, RenameDirectoryState state_): + env(env_), req(req_), state(state_) + {} + + void resume(EggsError err, const ShardRespContainer* resp) { + ALWAYS_ASSERT((err == NO_ERROR && resp == nullptr) == (env.txnStep == TXN_START)); + switch (env.txnStep) { + case TXN_START: start(); break; + case RENAME_DIRECTORY_LOCK_OLD_EDGE: afterLockOldEdge(err, resp); break; + case RENAME_DIRECTORY_CREATE_LOCKED_NEW_EDGE: afterCreateLockedEdge(err, resp); break; + case RENAME_DIRECTORY_UNLOCK_NEW_EDGE: afterUnlockNewEdge(err, resp); break; + case RENAME_DIRECTORY_UNLOCK_OLD_EDGE: afterUnlockOldEdge(err, resp); break; + case RENAME_DIRECTORY_SET_OWNER: afterSetOwner(err, resp); break; + case RENAME_DIRECTORY_ROLLBACK: afterRollback(err, resp); break; + default: throw EGGS_EXCEPTION("bad step %s", env.txnStep); + } + } + + bool loopCheck() { + return true; // TODO implement + } + + void start() { + if (req.targetId.type() != InodeType::DIRECTORY) { + env.finishWithError(EggsError::TYPE_IS_NOT_DIRECTORY); + } else if (req.oldOwnerId == req.newOwnerId) { + env.finishWithError(EggsError::SAME_DIRECTORIES); + } else if (!loopCheck()) { + // First, check if we'd create a loop + env.finishWithError(EggsError::LOOP_IN_DIRECTORY_RENAME); + } else { + // Now, actually start by locking the old edge + lockOldEdge(); + } + } + + void lockOldEdge() { + auto& shardReq = env.needsShard(RENAME_DIRECTORY_LOCK_OLD_EDGE, req.oldOwnerId.shard()).setLockCurrentEdge(); + shardReq.dirId = req.oldOwnerId; + shardReq.name = req.oldName; + shardReq.targetId = req.targetId; + shardReq.creationTime = req.oldCreationTime; + } + + void afterLockOldEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + lockOldEdge(); // retry + } else if ( + err == EggsError::DIRECTORY_NOT_FOUND || err == EggsError::EDGE_NOT_FOUND || err == EggsError::MISMATCHING_CREATION_TIME + ) { + if (err == EggsError::DIRECTORY_NOT_FOUND) { + err = EggsError::OLD_DIRECTORY_NOT_FOUND; + } + env.finishWithError(err); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + createLockedNewEdge(); + } + } + + void createLockedNewEdge() { + auto& shardReq = env.needsShard(RENAME_DIRECTORY_CREATE_LOCKED_NEW_EDGE, req.newOwnerId.shard()).setCreateLockedCurrentEdge(); + shardReq.dirId = req.newOwnerId; + shardReq.name = req.newName; + shardReq.targetId = req.targetId; + } + + void afterCreateLockedEdge(EggsError err, const ShardRespContainer* resp) { + if (createCurrentLockedEdgeRetry(err)) { + createLockedNewEdge(); + } else if (createCurrentLockedEdgeFatal(err)) { + state.setExitError(err); + rollback(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + state.setNewCreationTime(resp->getCreateLockedCurrentEdge().creationTime); + unlockNewEdge(); + } + } + + void unlockNewEdge() { + auto& shardReq = env.needsShard(RENAME_DIRECTORY_UNLOCK_NEW_EDGE, req.newOwnerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.newOwnerId; + shardReq.name = req.newName; + shardReq.targetId = req.targetId; + shardReq.wasMoved = false; + shardReq.creationTime = state.newCreationTime(); + } + + void afterUnlockNewEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + unlockNewEdge(); + } else if (err == EggsError::EDGE_NOT_FOUND) { + // This can only be because of repeated calls from here: we have the edge locked, + // and only the CDC does changes. + // TODO it would be cleaner to verify this with a lookup + unlockOldEdge(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + unlockOldEdge(); + } + } + + void unlockOldEdge() { + auto& shardReq = env.needsShard(RENAME_DIRECTORY_UNLOCK_OLD_EDGE, req.oldOwnerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.oldOwnerId; + shardReq.name = req.oldName; + shardReq.targetId = req.targetId; + shardReq.wasMoved = true; + shardReq.creationTime = req.oldCreationTime; + } + + void afterUnlockOldEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + unlockOldEdge(); + } else if (err == EggsError::EDGE_NOT_FOUND) { + // This can only be because of repeated calls from here: we have the edge locked, + // and only the CDC does changes. + // TODO it would be cleaner to verify this with a lookup + setOwner(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + setOwner(); + } + } + + void setOwner() { + auto& shardReq = env.needsShard(RENAME_DIRECTORY_SET_OWNER, req.targetId.shard()).setSetDirectoryOwner(); + shardReq.ownerId = req.newOwnerId; + shardReq.dirId = req.targetId; + } + + void afterSetOwner(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + setOwner(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + auto& resp = env.finish().setRenameDirectory(); + resp.creationTime = state.newCreationTime(); + // update cache + { + auto k = InodeIdKey::Static(req.targetId); + auto v = InodeIdKey::Static(req.newOwnerId); + ROCKS_DB_CHECKED(env.dbTxn.Put(env.parentCf, k.toSlice(), v.toSlice())); + } + } + } + + void rollback() { + auto& shardReq = env.needsShard(RENAME_DIRECTORY_ROLLBACK, req.oldOwnerId.shard()).setUnlockCurrentEdge(); + shardReq.dirId = req.oldOwnerId; + shardReq.name = req.oldName; + shardReq.targetId = req.targetId; + shardReq.wasMoved = false; + shardReq.creationTime = state.newCreationTime(); + } + + void afterRollback(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + rollback(); + } else { + env.finishWithError(state.exitError()); + } + } +}; + +enum CrossShardHardUnlinkFileStep : uint8_t { + CROSS_SHARD_HARD_UNLINK_FILE_REMOVE_EDGE = 1, + CROSS_SHARD_HARD_UNLINK_FILE_MAKE_TRANSIENT = 2, +}; + +struct CrossShardHardUnlinkFileStateMachine { + StateMachineEnv& env; + const CrossShardHardUnlinkFileReq& req; + CrossShardHardUnlinkFileState state; + + CrossShardHardUnlinkFileStateMachine(StateMachineEnv& env_, const CrossShardHardUnlinkFileReq& req_, CrossShardHardUnlinkFileState state_): + env(env_), req(req_), state(state_) + {} + + void resume(EggsError err, const ShardRespContainer* resp) { + ALWAYS_ASSERT((err == NO_ERROR && resp == nullptr) == (env.txnStep == TXN_START)); + switch (env.txnStep) { + case TXN_START: start(); break; + case CROSS_SHARD_HARD_UNLINK_FILE_REMOVE_EDGE: afterRemoveEdge(err, resp); break; + case CROSS_SHARD_HARD_UNLINK_FILE_MAKE_TRANSIENT: afterMakeTransient(err, resp); break; + default: throw EGGS_EXCEPTION("bad step %s", env.txnStep); + } + } + + void start() { + if (req.ownerId.shard() == req.targetId.shard()) { + env.finishWithError(EggsError::SAME_SHARD); + } else { + removeEdge(); + } + } + + void removeEdge() { + auto& shardReq = env.needsShard(CROSS_SHARD_HARD_UNLINK_FILE_REMOVE_EDGE, req.ownerId.shard()).setRemoveOwnedSnapshotFileEdge(); + shardReq.ownerId = req.ownerId; + shardReq.targetId = req.targetId; + shardReq.name = req.name; + shardReq.creationTime = req.creationTime; + } + + void afterRemoveEdge(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT || err == EggsError::MTIME_IS_TOO_RECENT) { + removeEdge(); + } else if (err == EggsError::DIRECTORY_NOT_FOUND) { + env.finishWithError(err); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + makeTransient(); + } + } + + void makeTransient() { + auto& shardReq = env.needsShard(CROSS_SHARD_HARD_UNLINK_FILE_MAKE_TRANSIENT, req.targetId.shard()).setMakeFileTransient(); + shardReq.id = req.targetId; + shardReq.note = req.name; + } + + void afterMakeTransient(EggsError err, const ShardRespContainer* resp) { + if (err == EggsError::TIMEOUT) { + makeTransient(); + } else { + ALWAYS_ASSERT(err == NO_ERROR); + env.finish().setCrossShardHardUnlinkFile(); + } + } +}; + +// Steps: +// +// 1. Remove owning edge in one shard +// 2. Make file transient in other shard +// +// Step 2 cannot fail. + + // Wrapper types to pack/unpack with kind struct PackCDCReq { const CDCReqContainer& req; @@ -352,6 +1156,7 @@ struct CDCDBImpl { uint64_t _reqQueuePop(rocksdb::Transaction& dbTxn) { uint64_t first = _firstTxnInQueue(dbTxn); if (first == 0) { + LOG_DEBUG(_env, "txn queue empty, returning 0"); return 0; } // Update metadata @@ -362,513 +1167,10 @@ struct CDCDBImpl { } else { _setFirstTxnInQueue(dbTxn, first+1); } + LOG_DEBUG(_env, "popped txn %s from queue", first); return first; } - InodeId _nextDirectoryId(rocksdb::Transaction& dbTxn) { - std::string v; - ROCKS_DB_CHECKED(_db->Get({}, _defaultCf, cdcMetadataKey(&NEXT_DIRECTORY_ID_KEY), &v)); - ExternalValue nextId(v); - InodeId id = nextId().id(); - nextId().setId(InodeId::FromU64(id.u64 + 1)); - ROCKS_DB_CHECKED(dbTxn.Put(_defaultCf, cdcMetadataKey(&NEXT_DIRECTORY_ID_KEY), nextId.toSlice())); - return id; - } - - ShardReqContainer& _needsShard(CDCStep& step, uint64_t txnId, ShardId shid) { - step.txnFinished = 0; - step.txnNeedsShard = txnId; - step.shardReq.shid = shid; - return step.shardReq.req; - } - - void _finishWithError(CDCStep& step, uint64_t txnId, EggsError err) { - ALWAYS_ASSERT(err != NO_ERROR); - step.txnFinished = txnId; - step.err = err; - step.txnNeedsShard = 0; - } - - CDCRespContainer& _finish(CDCStep& step, uint64_t txnId) { - step.txnFinished = txnId; - step.err = NO_ERROR; - return step.resp; - } - - // Steps: - // - // 1. Allocate inode id here in the CDC - // 2. Create directory in shard we get from the inode - // 3. Create locked edge from owner to newly created directory - // 4. Unlock the edge created in 3 - // - // If 3 fails, 2 must be rolled back. 4 does not fail. - MakeDirectoryStep _advanceMakeDirectory( - EggsTime time, - rocksdb::Transaction& dbTxn, - uint64_t txnId, - const CDCReqContainer& reqContainer, - EggsError shardRespError, - const ShardRespContainer* shardResp, - MakeDirectoryStep mkDirStep, - MakeDirectoryState state, - CDCStep& step - ) { - ALWAYS_ASSERT((shardRespError == NO_ERROR && shardResp == nullptr) == (mkDirStep == MakeDirectoryStep::START)); - const auto& req = reqContainer.getMakeDirectory(); - - switch (mkDirStep) { - case MakeDirectoryStep::START: { - mkDirStep = MakeDirectoryStep::AFTER_CREATE_DIR; - auto dirId = _nextDirectoryId(dbTxn); - state.setDirId(dirId); - auto& shardReq = _needsShard(step, txnId, state.dirId().shard()).setCreateDirectoryInode(); - shardReq.id = dirId; - shardReq.info.inherited = req.info.inherited; - shardReq.info.body = req.info.body; - shardReq.ownerId = req.ownerId; - } break; - case MakeDirectoryStep::AFTER_CREATE_DIR: { - if (shardRespError != NO_ERROR) { - // We never expect to fail here. That said, there's nothing to roll back, - // so we just finish. - RAISE_ALERT(_env, "Unexpected error %s when creating diretory inode with id %s", shardRespError, state.dirId()); - _finishWithError(step, txnId, EggsError::INTERNAL_ERROR); - } else { - mkDirStep = MakeDirectoryStep::AFTER_CREATE_LOCKED_EDGE; - auto& shardReq = _needsShard(step, txnId, req.ownerId.shard()).setCreateLockedCurrentEdge(); - shardReq.creationTime = time; - shardReq.dirId = req.ownerId; - shardReq.targetId = state.dirId(); - shardReq.name = req.name; - } - } break; - case MakeDirectoryStep::AFTER_CREATE_LOCKED_EDGE: { - if (shardRespError != NO_ERROR) { - // We've failed to link: we need to rollback the directory creation - LOG_INFO(_env, "Failed to create locked edge, rolling back"); - mkDirStep = MakeDirectoryStep::AFTER_ROLLBACK; - state.setExitError(shardRespError); - auto& shardReq = _needsShard(step, txnId, state.dirId().shard()).setRemoveDirectoryOwner(); - shardReq.dirId = state.dirId(); - // we've just created this directory, it is empty, therefore the policy - // is irrelevant. - shardReq.info = defaultDirectoryInfo(); - } else { - mkDirStep = MakeDirectoryStep::AFTER_UNLOCK_EDGE; - auto& shardReq = _needsShard(step, txnId, req.ownerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.ownerId; - shardReq.name = req.name; - shardReq.targetId = state.dirId(); - shardReq.wasMoved = false; - } - } break; - case MakeDirectoryStep::AFTER_UNLOCK_EDGE: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts and similar, and possibly other stuff - // We're done, record the parent relationship and finish - { - auto k = InodeIdKey::Static(state.dirId()); - auto v = InodeIdKey::Static(req.ownerId); - ROCKS_DB_CHECKED(dbTxn.Put(_parentCf, k.toSlice(), v.toSlice())); - } - auto& resp = _finish(step, txnId).setMakeDirectory(); - resp.id = state.dirId(); - } break; - case MakeDirectoryStep::AFTER_ROLLBACK: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO retry, handle timeouts, etc. - _finishWithError(step, txnId, state.exitError()); - } break; - default: - throw EGGS_EXCEPTION("bad directory step %s", (int)mkDirStep); - } - - return mkDirStep; - } - - // The only reason we have this here is for possible conflicts with RemoveDirectoryOwner, - // which might temporarily set the owner of a directory to NULL. Since in the current - // implementation we only ever have one transaction in flight in the CDC, we can just - // execute this. - HardUnlinkDirectoryStep _advanceHardUnlinkDirectory( - uint64_t txnId, - const CDCReqContainer& reqContainer, - EggsError shardRespError, - const ShardRespContainer* shardResp, - HardUnlinkDirectoryStep reqStep, - HardUnlinkDirectoryState state, - CDCStep& step - ) { - ALWAYS_ASSERT((shardRespError == NO_ERROR && shardResp == nullptr) == (reqStep == HardUnlinkDirectoryStep::START)); - const auto& req = reqContainer.getHardUnlinkDirectory(); - - switch (reqStep) { - case HardUnlinkDirectoryStep::START: { - reqStep = HardUnlinkDirectoryStep::AFTER_REMOVE_INODE; - auto& shardReq = _needsShard(step, txnId, req.dirId.shard()).setRemoveInode(); - shardReq.id = req.dirId; - } break; - case HardUnlinkDirectoryStep::AFTER_REMOVE_INODE: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle - _finish(step, txnId); - } break; - default: - throw EGGS_EXCEPTION("bad hard unlink directory step %s", (int)reqStep); - } - - return reqStep; - } - - // Steps: - // - // 1. lock source current edge - // 2. create destination locked current target edge - // 3. unlock edge in step 2 - // 4. unlock source target current edge, and soft unlink it - // - // If we fail at step 2, we need to roll back step 1. Steps 3 and 4 should never fail. - RenameFileStep _advanceRenameFile( - EggsTime time, - rocksdb::Transaction& dbTxn, - uint64_t txnId, - const CDCReqContainer& reqContainer, - EggsError shardRespError, - const ShardRespContainer* shardResp, - RenameFileStep reqStep, - RenameFileState state, - CDCStep& step - ) { - ALWAYS_ASSERT((shardRespError == NO_ERROR && shardResp == nullptr) == (reqStep == RenameFileStep::START)); - const auto& req = reqContainer.getRenameFile(); - - switch (reqStep) { - case RenameFileStep::START: { - // We need this explicit check here because moving directories is more complicated, - // and therefore we do it in another transaction type entirely. - if (req.targetId.type() == InodeType::DIRECTORY) { - _finishWithError(step, txnId, EggsError::TYPE_IS_NOT_DIRECTORY); - } else if (req.oldOwnerId == req.newOwnerId) { - _finishWithError(step, txnId, EggsError::SAME_DIRECTORIES); - } else { - reqStep = RenameFileStep::AFTER_LOCK_OLD_EDGE; - auto& shardReq = _needsShard(step, txnId, req.oldOwnerId.shard()).setLockCurrentEdge(); - shardReq.dirId = req.oldOwnerId; - shardReq.name = req.oldName; - shardReq.targetId = req.targetId; - } - } break; - case RenameFileStep::AFTER_LOCK_OLD_EDGE: { - if (shardRespError != NO_ERROR) { - // We couldn't acquire the lock at the source -- we can terminate immediately, there's nothing to roll back - if (shardRespError == EggsError::DIRECTORY_NOT_FOUND) { - shardRespError = EggsError::OLD_DIRECTORY_NOT_FOUND; - } - if (shardRespError == EggsError::NAME_IS_LOCKED) { - shardRespError = EggsError::OLD_NAME_IS_LOCKED; - } - _finishWithError(step, txnId, shardRespError); - } else { - reqStep = RenameFileStep::AFTER_CREATE_NEW_LOCKED_EDGE; - auto& shardReq = _needsShard(step, txnId, req.newOwnerId.shard()).setCreateLockedCurrentEdge(); - shardReq.creationTime = time, - shardReq.dirId = req.newOwnerId; - shardReq.name = req.newName; - shardReq.targetId = req.targetId; - } - } break; - case RenameFileStep::AFTER_CREATE_NEW_LOCKED_EDGE: { - if (shardRespError != NO_ERROR) { - // We couldn't create the new edge, we need to unlock the old edge. - if (shardRespError == EggsError::DIRECTORY_NOT_FOUND) { - shardRespError = EggsError::NEW_DIRECTORY_NOT_FOUND; - } - if (shardRespError == EggsError::NAME_IS_LOCKED) { - shardRespError = EggsError::NEW_NAME_IS_LOCKED; - } - state.setExitError(shardRespError); - reqStep = RenameFileStep::AFTER_ROLLBACK; - auto& shardReq = _needsShard(step, txnId, req.oldOwnerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.oldOwnerId; - shardReq.name = req.oldName; - shardReq.targetId = req.targetId; - shardReq.wasMoved = false; - } else { - reqStep = RenameFileStep::AFTER_UNLOCK_NEW_EDGE; - auto& shardReq = _needsShard(step, txnId, req.newOwnerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.newOwnerId; - shardReq.targetId = req.targetId; - shardReq.name = req.newName; - shardReq.wasMoved = false; - } - } break; - case RenameFileStep::AFTER_UNLOCK_NEW_EDGE: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO Handle timeouts etc. - // We're done creating the destination edge, now unlock the source, marking it as moved - reqStep = RenameFileStep::AFTER_UNLOCK_OLD_EDGE; - auto& shardReq = _needsShard(step, txnId, req.oldOwnerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.oldOwnerId; - shardReq.targetId = req.targetId; - shardReq.name = req.oldName; - shardReq.wasMoved = true; - } break; - case RenameFileStep::AFTER_UNLOCK_OLD_EDGE: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc. - // We're finally done - auto& resp = _finish(step, txnId); - resp.setRenameFile(); - } break; - case RenameFileStep::AFTER_ROLLBACK: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc. - _finishWithError(step, txnId, state.exitError()); - } break; - default: - throw EGGS_EXCEPTION("bad rename file step %s", (int)reqStep); - } - - return reqStep; - } - - // Steps: - // - // 1. Lock edge going into the directory to remove. This prevents things making - // making it snapshot or similar in the meantime. - // 2. Resolve the directory info, since we'll need to store it when we remove the directory owner. - // 3. Remove directory owner from directory that we want to remove. This will fail if there still - // are current edges there. - // 4. Unlock edge going into the directory, making it snapshot. - // - // If 2 or 3 fail, we need to roll back the locking, without making the edge snapshot. - SoftUnlinkDirectoryStep _advanceSoftUnlinkDirectory( - EggsTime time, - rocksdb::Transaction& dbTxn, - uint64_t txnId, - const CDCReqContainer& reqContainer, - EggsError shardRespError, - const ShardRespContainer* shardResp, - SoftUnlinkDirectoryStep reqStep, - SoftUnlinkDirectoryState state, - CDCStep& step - ) { - ALWAYS_ASSERT((shardRespError == NO_ERROR && shardResp == nullptr) == (reqStep == SoftUnlinkDirectoryStep::START)); - const auto& req = reqContainer.getSoftUnlinkDirectory(); - - auto initStat = [&](InodeId dir) { - reqStep = SoftUnlinkDirectoryStep::AFTER_STAT; - auto& shardReq = _needsShard(step, txnId, dir.shard()).setStatDirectory(); - shardReq.id = dir; - }; - - auto initRollback = [&](EggsError err) { - state.setExitError(err); - reqStep = SoftUnlinkDirectoryStep::AFTER_ROLLBACK; - auto& shardReq = _needsShard(step, txnId, req.ownerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.ownerId; - shardReq.name = req.name; - shardReq.targetId = req.targetId; - shardReq.wasMoved = false; - }; - - switch (reqStep) { - case SoftUnlinkDirectoryStep::START: { - if (req.targetId.type() != InodeType::DIRECTORY) { - _finishWithError(step, txnId, EggsError::TYPE_IS_NOT_DIRECTORY); - } else { - reqStep = SoftUnlinkDirectoryStep::AFTER_LOCK_EDGE; - auto& shardReq = _needsShard(step, txnId, req.ownerId.shard()).setLockCurrentEdge(); - shardReq.dirId = req.ownerId; - shardReq.name = req.name; - shardReq.targetId = req.targetId; - } - } break; - case SoftUnlinkDirectoryStep::AFTER_LOCK_EDGE: { - if (shardRespError != NO_ERROR) { // TODO handle timeouts etc. - // Nothing to roll back - _finishWithError(step, txnId, shardRespError); - } else { - // Start resolving dir info - initStat(req.targetId); - } - } break; - case SoftUnlinkDirectoryStep::AFTER_STAT: { - if (shardRespError != NO_ERROR) { // TODO handle timeouts etc. - initRollback(shardRespError); - } else { - const auto& statResp = shardResp->getStatDirectory(); - if (statResp.info.size() > 0) { - // We're done for the info, we can remove the owner - reqStep = SoftUnlinkDirectoryStep::AFTER_REMOVE_OWNER; - auto& shardReq = _needsShard(step, txnId, req.targetId.shard()).setRemoveDirectoryOwner(); - shardReq.dirId = req.targetId; - shardReq.info = statResp.info; - } else { - ALWAYS_ASSERT(statResp.owner != NULL_INODE_ID); // can't be root with no info - // keep walking upwards - initStat(statResp.owner); - } - } - } break; - case SoftUnlinkDirectoryStep::AFTER_REMOVE_OWNER: { - if (shardRespError != NO_ERROR) { // TODO handle timeouts etc. - initRollback(shardRespError); - } else { - reqStep = SoftUnlinkDirectoryStep::AFTER_UNLOCK_EDGE; - auto& shardReq = _needsShard(step, txnId, req.ownerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.ownerId; - shardReq.name = req.name; - shardReq.targetId = req.targetId; - shardReq.wasMoved = true; // make it snapshot - } - } break; - case SoftUnlinkDirectoryStep::AFTER_UNLOCK_EDGE: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc - auto& resp = _finish(step, txnId).setSoftUnlinkDirectory(); - // Update parent map - { - auto k = InodeIdKey::Static(req.targetId); - ROCKS_DB_CHECKED(dbTxn.Delete(_parentCf, k.toSlice())); - } - } break; - case SoftUnlinkDirectoryStep::AFTER_ROLLBACK: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc. - _finishWithError(step, txnId, state.exitError()); - } break; - default: - throw EGGS_EXCEPTION("bad step %s", (int)reqStep); - } - - return reqStep; - } - - bool _loopCheck(rocksdb::Transaction& dbTxn, const RenameDirectoryReq& req) { - return true; - // throw EGGS_EXCEPTION("UNIMPLEMENTED"); - } - - // Steps: - // - // 1. Make sure there's no loop by traversing the parents - // 2. Lock old edge - // 3. Create and lock the new edge - // 4. Unlock the new edge - // 5. Unlock and unlink the old edge - // 6. Update the owner of the moved directory to the new directory - // - // If we fail at step 2 or 3, we need to unlock the edge we locked at step 1. Step 4 and 5 - // should never fail. - RenameDirectoryStep _advanceRenameDirectory( - EggsTime time, - rocksdb::Transaction& dbTxn, - uint64_t txnId, - const CDCReqContainer& reqContainer, - EggsError shardRespError, - const ShardRespContainer* shardResp, - RenameDirectoryStep reqStep, - RenameDirectoryState state, - CDCStep& step - ) { - ALWAYS_ASSERT((shardRespError == NO_ERROR && shardResp == nullptr) == (reqStep == RenameDirectoryStep::START)); - const auto& req = reqContainer.getRenameDirectory(); - - auto initRollback = [&](EggsError err) { - state.setExitError(err); - reqStep = RenameDirectoryStep::AFTER_ROLLBACK; - auto& shardReq = _needsShard(step, txnId, req.oldOwnerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.oldOwnerId; - shardReq.name = req.oldName; - shardReq.targetId = req.targetId; - shardReq.wasMoved = false; - }; - - switch (reqStep) { - case RenameDirectoryStep::START: { - if (req.targetId.type() != InodeType::DIRECTORY) { - _finishWithError(step, txnId, EggsError::TYPE_IS_NOT_DIRECTORY); - } else if (req.oldOwnerId == req.newOwnerId) { - _finishWithError(step, txnId, EggsError::SAME_DIRECTORIES); - } else if (!_loopCheck(dbTxn, req)) { - // First, check if we'd create a loop - _finishWithError(step, txnId, EggsError::LOOP_IN_DIRECTORY_RENAME); - } else { - // Now, actually start by locking the old edge - reqStep = RenameDirectoryStep::AFTER_LOCK_OLD_EDGE; - auto& shardReq = _needsShard(step, txnId, req.oldOwnerId.shard()).setLockCurrentEdge(); - shardReq.dirId = req.oldOwnerId; - shardReq.name = req.oldName; - shardReq.targetId = req.targetId; - } - } break; - case RenameDirectoryStep::AFTER_LOCK_OLD_EDGE: { - if (shardRespError != NO_ERROR) { // TODO handle timeouts etc - if (shardRespError == EggsError::DIRECTORY_NOT_FOUND) { - shardRespError = EggsError::OLD_DIRECTORY_NOT_FOUND; - } - if (shardRespError == EggsError::NAME_IS_LOCKED) { - shardRespError = EggsError::OLD_NAME_IS_LOCKED; - } - _finishWithError(step, txnId, shardRespError); - } else { - reqStep = RenameDirectoryStep::AFTER_CREATE_LOCKED_EDGE; - auto& shardReq = _needsShard(step, txnId, req.newOwnerId.shard()).setCreateLockedCurrentEdge(); - shardReq.dirId = req.newOwnerId; - shardReq.name = req.newName; - shardReq.creationTime = time; - shardReq.targetId = req.targetId; - } - } break; - case RenameDirectoryStep::AFTER_CREATE_LOCKED_EDGE: { - if (shardRespError != NO_ERROR) { // TODO handle timeouts etc - if (shardRespError == EggsError::DIRECTORY_NOT_FOUND) { - shardRespError = EggsError::NEW_DIRECTORY_NOT_FOUND; - } - if (shardRespError == EggsError::NAME_IS_LOCKED) { - shardRespError = EggsError::NEW_NAME_IS_LOCKED; - } - initRollback(shardRespError); - } else { - reqStep = RenameDirectoryStep::AFTER_UNLOCK_NEW_EDGE; - auto& shardReq = _needsShard(step, txnId, req.newOwnerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.newOwnerId; - shardReq.name = req.newName; - shardReq.targetId = req.targetId; - shardReq.wasMoved = false; - } - } break; - case RenameDirectoryStep::AFTER_UNLOCK_NEW_EDGE: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc. - reqStep = RenameDirectoryStep::AFTER_UNLOCK_OLD_EDGE; - auto& shardReq = _needsShard(step, txnId, req.oldOwnerId.shard()).setUnlockCurrentEdge(); - shardReq.dirId = req.oldOwnerId; - shardReq.name = req.oldName; - shardReq.targetId = req.targetId; - shardReq.wasMoved = true; - } break; - case RenameDirectoryStep::AFTER_UNLOCK_OLD_EDGE: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc. - reqStep = RenameDirectoryStep::AFTER_SET_OWNER; - auto& shardReq = _needsShard(step, txnId, req.targetId.shard()).setSetDirectoryOwner(); - shardReq.ownerId = req.newOwnerId; - shardReq.dirId = req.targetId; - } break; - case RenameDirectoryStep::AFTER_SET_OWNER: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc. - _finish(step, txnId).setRenameDirectory(); - // update cache - { - auto k = InodeIdKey::Static(req.targetId); - auto v = InodeIdKey::Static(req.newOwnerId); - ROCKS_DB_CHECKED(dbTxn.Put(_parentCf, k.toSlice(), v.toSlice())); - } - } - case RenameDirectoryStep::AFTER_ROLLBACK: { - ALWAYS_ASSERT(shardRespError == NO_ERROR); // TODO handle timeouts etc. - _finishWithError(step, txnId, state.exitError()); - } break; - default: - throw EGGS_EXCEPTION("bad step %s", (int)reqStep); - } - - return reqStep; - } - // Moves the state forward, filling in `step` appropriatedly, and writing // out the updated state. // @@ -887,27 +1189,31 @@ struct CDCDBImpl { V state, CDCStep& step ) { - uint8_t stateStep = state().step(); + LOG_DEBUG(_env, "advancing txn %s of kind %s", txnId, req.kind()); + StateMachineEnv env(_db, _defaultCf, _parentCf, dbTxn, time, txnId, state().step(), step); switch (req.kind()) { case CDCMessageKind::MAKE_DIRECTORY: - stateStep = (uint8_t)_advanceMakeDirectory(time, dbTxn, txnId, req, shardRespError, shardResp, (MakeDirectoryStep)stateStep, state().getMakeDirectory(), step); + MakeDirectoryStateMachine(env, req.getMakeDirectory(), state().getMakeDirectory()).resume(shardRespError, shardResp); break; case CDCMessageKind::HARD_UNLINK_DIRECTORY: - stateStep = (uint8_t)_advanceHardUnlinkDirectory(txnId, req, shardRespError, shardResp, (HardUnlinkDirectoryStep)stateStep, state().getHardUnlinkDirectory(), step); + HardUnlinkDirectoryStateMachine(env, req.getHardUnlinkDirectory(), state().getHardUnlinkDirectory()).resume(shardRespError, shardResp); break; case CDCMessageKind::RENAME_FILE: - stateStep = (uint8_t)_advanceRenameFile(time, dbTxn, txnId, req, shardRespError, shardResp, (RenameFileStep)stateStep, state().getRenameFile(), step); + RenameFileStateMachine(env, req.getRenameFile(), state().getRenameFile()).resume(shardRespError, shardResp); break; case CDCMessageKind::SOFT_UNLINK_DIRECTORY: - stateStep = (uint8_t)_advanceSoftUnlinkDirectory(time, dbTxn, txnId, req, shardRespError, shardResp, (SoftUnlinkDirectoryStep)stateStep, state().getSoftUnlinkDirectory(), step); + SoftUnlinkDirectoryStateMachine(env, req.getSoftUnlinkDirectory(), state().getSoftUnlinkDirectory()).resume(shardRespError, shardResp); break; case CDCMessageKind::RENAME_DIRECTORY: - stateStep = (uint8_t)_advanceRenameDirectory(time, dbTxn, txnId, req, shardRespError, shardResp, (RenameDirectoryStep)stateStep, state().getRenameDirectory(), step); + RenameDirectoryStateMachine(env, req.getRenameDirectory(), state().getRenameDirectory()).resume(shardRespError, shardResp); + break; + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: + CrossShardHardUnlinkFileStateMachine(env, req.getCrossShardHardUnlinkFile(), state().getCrossShardHardUnlinkFile()).resume(shardRespError, shardResp); break; default: throw EGGS_EXCEPTION("bad cdc message kind %s", req.kind()); } - state().setStep(stateStep); + state().setStep(env.txnStep); ALWAYS_ASSERT(step.txnFinished == 0 || step.txnFinished == txnId); ALWAYS_ASSERT(step.txnNeedsShard == 0 || step.txnNeedsShard == txnId); @@ -942,8 +1248,8 @@ struct CDCDBImpl { return; } - LOG_DEBUG(_env, "starting to execute txn %s", txnToExecute); _setExecutingTxn(dbTxn, txnToExecute); + LOG_DEBUG(_env, "starting to execute txn %s with req %s", txnToExecute, _cdcReq); StaticValue txnState; txnState().start(_cdcReq.kind()); _advance(time, dbTxn, txnToExecute, _cdcReq, NO_ERROR, nullptr, txnState, step); @@ -969,19 +1275,18 @@ struct CDCDBImpl { // Enqueue req uint64_t txnId; { - LOG_DEBUG(_env, "enqueueing cdc req %s", req.kind()); - // Generate new txn id txnId = _lastTxn(*dbTxn) + 1; _setLastTxn(*dbTxn, txnId); - // Push to queue _reqQueuePush(*dbTxn, txnId, req); + LOG_DEBUG(_env, "enqueued CDC req %s with txn id %s", req.kind(), txnId); } // Start executing, if we can _startExecuting(time, *dbTxn, step); + LOG_DEBUG(_env, "committing transaction"); dbTxn->Commit(); return txnId; diff --git a/cpp/CDCDB.hpp b/cpp/CDCDB.hpp index 6f8cf2e3..dc70b155 100644 --- a/cpp/CDCDB.hpp +++ b/cpp/CDCDB.hpp @@ -1,5 +1,6 @@ #pragma once +#include #include #include "Bincode.hpp" @@ -25,39 +26,26 @@ struct CDCStep { uint64_t txnFinished; EggsError err; // if NO_ERROR, resp is contains the response. CDCRespContainer resp; + // If non-zero, a transaction is running, but we need something // from a shard to have it proceed. // - // We have !((finishedTxn != 0) && (runningTxn != 0)) as an invariant + // We have !((finishedTxn != 0) && (txnNeedsShard != 0)) as an invariant // -- we can't have finished and be running a thing in the same step. uint64_t txnNeedsShard; CDCShardReq shardReq; + // If non-zero, there is a transaction after the current one waiting - // to be executed. Only filled in if `runningTxn == 0`. + // to be executed. Only filled in if `txnNeedsShard == 0`. // Useful to decide when to call `startNextTransaction` (although // calling it is safe in any case). - uint64_t nextTxn = 0; + uint64_t nextTxn; void clear() { txnFinished = 0; txnNeedsShard = 0; nextTxn = 0; } - - /* - void setFinishedTxnErr(uint64_t txnId, EggsError err) { - ALWAYS_ASSERT(err != NO_ERROR); - finishedTxn = txnId; - this->err = err; - runningTxn = 0; - } - - CDCRespContainer& setFinishedTxn(uint64_t txnId) { - finishedTxn = txnId; - runningTxn = 0; - return resp; - } - */ }; std::ostream& operator<<(std::ostream& out, const CDCStep& x); diff --git a/cpp/CDCDBData.hpp b/cpp/CDCDBData.hpp index b20fe11b..dc5f83e0 100644 --- a/cpp/CDCDBData.hpp +++ b/cpp/CDCDBData.hpp @@ -15,11 +15,11 @@ enum class CDCMetadataKey : uint8_t { LAST_APPLIED_LOG_ENTRY = 0, LAST_TXN = 1, - FIRST_TXN_IN_QUEUE = 1, - LAST_TXN_IN_QUEUE = 2, - EXECUTING_TXN = 3, - EXECUTING_TXN_STATE = 4, - LAST_DIRECTORY_ID = 5, + FIRST_TXN_IN_QUEUE = 2, + LAST_TXN_IN_QUEUE = 3, + EXECUTING_TXN = 4, + EXECUTING_TXN_STATE = 6, + LAST_DIRECTORY_ID = 7, }; constexpr CDCMetadataKey LAST_APPLIED_LOG_ENTRY_KEY = CDCMetadataKey::LAST_APPLIED_LOG_ENTRY; constexpr CDCMetadataKey LAST_TXN_KEY = CDCMetadataKey::LAST_TXN; @@ -38,16 +38,19 @@ struct MakeDirectoryState { static constexpr size_t MAX_SIZE = sizeof(InodeId) + // dir id we've generated + sizeof(EggsTime) + // creation time for the edge sizeof(EggsError); // exit error if we're rolling back size_t size() const { return MAX_SIZE; } void checkSize(size_t size) { ALWAYS_ASSERT(size == MAX_SIZE); } - LE_VAL(InodeId, dirId, setDirId, 0) - LE_VAL(EggsError, exitError, setExitError, 8) + LE_VAL(InodeId, dirId, setDirId, 0) + LE_VAL(EggsTime, creationTime, setCreationTime, 8) + LE_VAL(EggsError, exitError, setExitError, 16) void start() { setDirId(NULL_INODE_ID); + setCreationTime({}); setExitError(NO_ERROR); } }; @@ -56,12 +59,14 @@ struct RenameFileState { char* _data; static constexpr size_t MAX_SIZE = + sizeof(EggsTime) + // the time at which we created the current edge sizeof(EggsError); // exit error if we're rolling back size_t size() const { return MAX_SIZE; } void checkSize(size_t size) { ALWAYS_ASSERT(size == MAX_SIZE); } - LE_VAL(EggsError, exitError, setExitError, 0) + LE_VAL(EggsTime, newCreationTime, setNewCreationTime, 0) + LE_VAL(EggsError, exitError, setExitError, 8) void start() { setExitError(NO_ERROR); @@ -72,12 +77,14 @@ struct SoftUnlinkDirectoryState { char* _data; static constexpr size_t MAX_SIZE = + sizeof(InodeId) + // what we're currently stat'ing sizeof(EggsError); // exit error if we're rolling back size_t size() const { return MAX_SIZE; } void checkSize(size_t size) { ALWAYS_ASSERT(size == MAX_SIZE); } - LE_VAL(EggsError, exitError, setExitError, 0) + LE_VAL(InodeId, statDirId, setStatDirId, 0) + LE_VAL(EggsError, exitError, setExitError, 8) void start() { setExitError(NO_ERROR); @@ -88,17 +95,17 @@ struct RenameDirectoryState { char* _data; static constexpr size_t MAX_SIZE = - sizeof(InodeId) + // current directory we're traversing upwards when looking for cycles + sizeof(EggsTime) + // time at which we created the old edge sizeof(EggsError); // exit error if we're rolling back size_t size() const { return MAX_SIZE; } void checkSize(size_t size) { ALWAYS_ASSERT(size == MAX_SIZE); } - LE_VAL(InodeId, currentDirectory, setCurrentDirectory, 0) + LE_VAL(EggsTime, newCreationTime, setNewCreationTime, 0) LE_VAL(EggsError, exitError, setExitError, 8) void start() { - setCurrentDirectory(NULL_INODE_ID); + setNewCreationTime({}); setExitError(NO_ERROR); } }; @@ -111,6 +118,14 @@ struct HardUnlinkDirectoryState { void start() {} }; +struct CrossShardHardUnlinkFileState { + char* _data; + static constexpr size_t MAX_SIZE = 0; + size_t size() const { return MAX_SIZE; } + void checkSize(size_t size) { ALWAYS_ASSERT(size == MAX_SIZE); } + void start() {} +}; + template constexpr size_t maxMaxSize() { if constexpr (sizeof...(Types) > 0) { @@ -128,7 +143,7 @@ struct TxnState { sizeof(uint8_t); // step static constexpr size_t MAX_SIZE = MIN_SIZE + - maxMaxSize(); + maxMaxSize(); U8_VAL(CDCMessageKind, reqKind, setReqKind, 0); U8_VAL(uint8_t, step, setStep, 1); @@ -146,6 +161,8 @@ struct TxnState { sz += RenameDirectoryState::MAX_SIZE; break; case CDCMessageKind::HARD_UNLINK_DIRECTORY: sz += RenameDirectoryState::MAX_SIZE; break; + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: + sz += CrossShardHardUnlinkFileState::MAX_SIZE; break; default: throw EGGS_EXCEPTION("bad cdc message kind %s", reqKind()); } @@ -157,8 +174,8 @@ struct TxnState { ALWAYS_ASSERT(sz == size()); } - #define TXN_STATE(kind, type, get, startName) \ - const type get() { \ + #define TXN_STATE(kind, type, getName, startName) \ + type getName() { \ ALWAYS_ASSERT(reqKind() == CDCMessageKind::kind); \ type v; \ v._data = _data + MIN_SIZE; \ @@ -173,11 +190,12 @@ struct TxnState { return v; \ } - TXN_STATE(MAKE_DIRECTORY, MakeDirectoryState, getMakeDirectory, startMakeDirectory) - TXN_STATE(RENAME_FILE, RenameFileState, getRenameFile, startRenameFile) - TXN_STATE(SOFT_UNLINK_DIRECTORY, SoftUnlinkDirectoryState, getSoftUnlinkDirectory, startSoftUnlinkDirectory) - TXN_STATE(RENAME_DIRECTORY, RenameDirectoryState, getRenameDirectory, startRenameDirectory) - TXN_STATE(HARD_UNLINK_DIRECTORY, HardUnlinkDirectoryState, getHardUnlinkDirectory, startHardUnlinkDirectory) + TXN_STATE(MAKE_DIRECTORY, MakeDirectoryState, getMakeDirectory, startMakeDirectory) + TXN_STATE(RENAME_FILE, RenameFileState, getRenameFile, startRenameFile) + TXN_STATE(SOFT_UNLINK_DIRECTORY, SoftUnlinkDirectoryState, getSoftUnlinkDirectory, startSoftUnlinkDirectory) + TXN_STATE(RENAME_DIRECTORY, RenameDirectoryState, getRenameDirectory, startRenameDirectory) + TXN_STATE(HARD_UNLINK_DIRECTORY, HardUnlinkDirectoryState, getHardUnlinkDirectory, startHardUnlinkDirectory) + TXN_STATE(CROSS_SHARD_HARD_UNLINK_FILE, CrossShardHardUnlinkFileState, getCrossShardHardUnlinkFile, startCrossShardHardUnlinkFile) #undef TXN_STATE @@ -189,76 +207,9 @@ struct TxnState { case CDCMessageKind::SOFT_UNLINK_DIRECTORY: startSoftUnlinkDirectory(); break; case CDCMessageKind::RENAME_DIRECTORY: startRenameDirectory(); break; case CDCMessageKind::HARD_UNLINK_DIRECTORY: startHardUnlinkDirectory(); break; + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: startCrossShardHardUnlinkFile(); break; default: throw EGGS_EXCEPTION("bad cdc message kind %s", reqKind()); } } }; - -/* -struct TxnIdKey { - char* _data; - - static constexpr size_t MAX_SIZE = sizeof(uint64_t); - size_t size() const { return MAX_SIZE; } - void checkSize(size_t size) { ALWAYS_ASSERT(size == MAX_SIZE); } - - BE64_VAL(uint64_t, txnId, setTxnId, 0) - - static StaticValue Static(uint64_t txnId) { - auto x = StaticValue(); - x->setTxnId(txnId); - return x; - } -}; - -std::string cdcReqToValue(uint64_t requestId, const CDCReqContainer& req); -vodi cdcReqFromValue() - -struct TxnIdKey { - // the txn id, in big endian - char* _data; - - TxnIdKey(const rocksdb::Slice& slice) { - ALWAYS_ASSERT(slice.size() == sizeof(_data)); - memcpy(&_data, slice.data(), sizeof(_data)); - } - - TxnIdKey(uint64_t txnId) { - txnId = byteswapU64(txnId); // LE -> BE - memcpy(&_data, &txnId, sizeof(txnId)); - } - - uint64_t txnId() const { - uint64_t txnId; - memcpy(&txnId, _data, sizeof(txnId)); - return byteswapU64(); // BE -> LE - } - - rocksdb::Slice toSlice() const { - return rocksdb::Slice((const char*)_data, sizeof(_data)); - } -}; - -struct CDCReqValue { - -}; - -struct NoState { - NoState(CDCMessageKind kind, char* data, size_t len) { - ALWAYS_ASSERT(len == 0); - } -}; - -struct MakeDirectoryState { - char* _data; - - // 0-1: step - // 1-9: dir id - MakeDirectoryState(CDCMessageKind kind, char* data, size_t len) { - ALWAYS_ASSERT(kind == CDCMessageKind::MAKE_DIRECTORY); - - } - -} -*/ \ No newline at end of file diff --git a/cpp/Crypto.cpp b/cpp/Crypto.cpp index ccbb1fb9..2b48a538 100644 --- a/cpp/Crypto.cpp +++ b/cpp/Crypto.cpp @@ -79,23 +79,15 @@ void expandKey(const std::array& userkey, AES128Key& key) { } std::array cbcmac(const AES128Key& key, const uint8_t* data, size_t len) { + // load key __m128i xmmKey[11]; for (int i = 0; i < 11; i++) { xmmKey[i] = _mm_load_si128((__m128i*)(&key) + i); } + // CBC MAC step __m128i block = _mm_setzero_si128(); __m128i dataBlock; - size_t i; - ALIGNED(16) uint8_t scratch[16]; - for (i = 0; i < len; i += 16) { - // load data + CBC xor - if (unlikely(len-i < 16)) { - memset(scratch, 0, 16); - memcpy(scratch, data+i, len-i); - dataBlock = _mm_load_si128((__m128i*)scratch); - } else { - dataBlock = _mm_loadu_si128((__m128i*)(data+i)); - } + auto step = [&xmmKey, &block, &dataBlock]() { // CBC xor block = _mm_xor_si128(block, dataBlock); // encrypt @@ -104,7 +96,22 @@ std::array cbcmac(const AES128Key& key, const uint8_t* data, size_t block = _mm_aesenc_si128(block, xmmKey[i]); // Round i } block = _mm_aesenclast_si128(block, xmmKey[10]); // Round 10 + }; + // unpadded load + size_t i = 0; + for (; len-i >= 16; i += 16) { + dataBlock = _mm_loadu_si128((__m128i*)(data+i)); + step(); } + // zero-padded load + ALIGNED(16) uint8_t scratch[16]; + if (len-i > 0) { + memset(scratch, 0, 16); + memcpy(scratch, data+i, len-i); + dataBlock = _mm_load_si128((__m128i*)scratch); + step(); + } + // extract MAC _mm_store_si128((__m128i*)scratch, block); std::array mac; memcpy(&mac[0], scratch, 8); diff --git a/cpp/MsgsGen.cpp b/cpp/MsgsGen.cpp index 0c5e0c5e..b1833b4c 100644 --- a/cpp/MsgsGen.cpp +++ b/cpp/MsgsGen.cpp @@ -34,6 +34,12 @@ std::ostream& operator<<(std::ostream& out, EggsError err) { case EggsError::NAME_NOT_FOUND: out << "NAME_NOT_FOUND"; break; + case EggsError::EDGE_NOT_FOUND: + out << "EDGE_NOT_FOUND"; + break; + case EggsError::EDGE_IS_LOCKED: + out << "EDGE_IS_LOCKED"; + break; case EggsError::TYPE_IS_DIRECTORY: out << "TYPE_IS_DIRECTORY"; break; @@ -76,12 +82,6 @@ std::ostream& operator<<(std::ostream& out, EggsError err) { case EggsError::NAME_IS_LOCKED: out << "NAME_IS_LOCKED"; break; - case EggsError::OLD_NAME_IS_LOCKED: - out << "OLD_NAME_IS_LOCKED"; - break; - case EggsError::NEW_NAME_IS_LOCKED: - out << "NEW_NAME_IS_LOCKED"; - break; case EggsError::MTIME_IS_TOO_RECENT: out << "MTIME_IS_TOO_RECENT"; break; @@ -91,6 +91,9 @@ std::ostream& operator<<(std::ostream& out, EggsError err) { case EggsError::MISMATCHING_OWNER: out << "MISMATCHING_OWNER"; break; + case EggsError::MISMATCHING_CREATION_TIME: + out << "MISMATCHING_CREATION_TIME"; + break; case EggsError::DIRECTORY_NOT_EMPTY: out << "DIRECTORY_NOT_EMPTY"; break; @@ -106,9 +109,6 @@ std::ostream& operator<<(std::ostream& out, EggsError err) { case EggsError::LOOP_IN_DIRECTORY_RENAME: out << "LOOP_IN_DIRECTORY_RENAME"; break; - case EggsError::EDGE_NOT_FOUND: - out << "EDGE_NOT_FOUND"; - break; case EggsError::DIRECTORY_HAS_OWNER: out << "DIRECTORY_HAS_OWNER"; break; @@ -142,9 +142,6 @@ std::ostream& operator<<(std::ostream& out, EggsError err) { case EggsError::BAD_DIRECTORY_INFO: out << "BAD_DIRECTORY_INFO"; break; - case EggsError::CREATION_TIME_TOO_RECENT: - out << "CREATION_TIME_TOO_RECENT"; - break; case EggsError::DEADLINE_NOT_PASSED: out << "DEADLINE_NOT_PASSED"; break; @@ -154,6 +151,9 @@ std::ostream& operator<<(std::ostream& out, EggsError err) { case EggsError::SAME_DIRECTORIES: out << "SAME_DIRECTORIES"; break; + case EggsError::SAME_SHARD: + out << "SAME_SHARD"; + break; default: out << "EggsError(" << ((int)err) << ")"; break; @@ -621,6 +621,28 @@ std::ostream& operator<<(std::ostream& out, const EntryNewBlockInfo& x) { return out; } +void SnapshotLookupEdge::pack(BincodeBuf& buf) const { + targetId.pack(buf); + creationTime.pack(buf); +} +void SnapshotLookupEdge::unpack(BincodeBuf& buf) { + targetId.unpack(buf); + creationTime.unpack(buf); +} +void SnapshotLookupEdge::clear() { + targetId = InodeIdExtra(); + creationTime = EggsTime(); +} +bool SnapshotLookupEdge::operator==(const SnapshotLookupEdge& rhs) const { + if ((InodeIdExtra)this->targetId != (InodeIdExtra)rhs.targetId) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; + return true; +} +std::ostream& operator<<(std::ostream& out, const SnapshotLookupEdge& x) { + out << "SnapshotLookupEdge(" << "TargetId=" << x.targetId << ", " << "CreationTime=" << x.creationTime << ")"; + return out; +} + void LookupReq::pack(BincodeBuf& buf) const { dirId.pack(buf); buf.packBytes(name); @@ -1032,16 +1054,20 @@ std::ostream& operator<<(std::ostream& out, const LinkFileReq& x) { } void LinkFileResp::pack(BincodeBuf& buf) const { + creationTime.pack(buf); } void LinkFileResp::unpack(BincodeBuf& buf) { + creationTime.unpack(buf); } void LinkFileResp::clear() { + creationTime = EggsTime(); } bool LinkFileResp::operator==(const LinkFileResp& rhs) const { + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const LinkFileResp& x) { - out << "LinkFileResp(" << ")"; + out << "LinkFileResp(" << "CreationTime=" << x.creationTime << ")"; return out; } @@ -1049,25 +1075,29 @@ void SoftUnlinkFileReq::pack(BincodeBuf& buf) const { ownerId.pack(buf); fileId.pack(buf); buf.packBytes(name); + creationTime.pack(buf); } void SoftUnlinkFileReq::unpack(BincodeBuf& buf) { ownerId.unpack(buf); fileId.unpack(buf); buf.unpackBytes(name); + creationTime.unpack(buf); } void SoftUnlinkFileReq::clear() { ownerId = InodeId(); fileId = InodeId(); name.clear(); + creationTime = EggsTime(); } bool SoftUnlinkFileReq::operator==(const SoftUnlinkFileReq& rhs) const { if ((InodeId)this->ownerId != (InodeId)rhs.ownerId) { return false; }; if ((InodeId)this->fileId != (InodeId)rhs.fileId) { return false; }; if (name != rhs.name) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const SoftUnlinkFileReq& x) { - out << "SoftUnlinkFileReq(" << "OwnerId=" << x.ownerId << ", " << "FileId=" << x.fileId << ", " << "Name=" << x.name << ")"; + out << "SoftUnlinkFileReq(" << "OwnerId=" << x.ownerId << ", " << "FileId=" << x.fileId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; return out; } @@ -1137,43 +1167,51 @@ void SameDirectoryRenameReq::pack(BincodeBuf& buf) const { targetId.pack(buf); dirId.pack(buf); buf.packBytes(oldName); + oldCreationTime.pack(buf); buf.packBytes(newName); } void SameDirectoryRenameReq::unpack(BincodeBuf& buf) { targetId.unpack(buf); dirId.unpack(buf); buf.unpackBytes(oldName); + oldCreationTime.unpack(buf); buf.unpackBytes(newName); } void SameDirectoryRenameReq::clear() { targetId = InodeId(); dirId = InodeId(); oldName.clear(); + oldCreationTime = EggsTime(); newName.clear(); } bool SameDirectoryRenameReq::operator==(const SameDirectoryRenameReq& rhs) const { if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; if (oldName != rhs.oldName) { return false; }; + if ((EggsTime)this->oldCreationTime != (EggsTime)rhs.oldCreationTime) { return false; }; if (newName != rhs.newName) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const SameDirectoryRenameReq& x) { - out << "SameDirectoryRenameReq(" << "TargetId=" << x.targetId << ", " << "DirId=" << x.dirId << ", " << "OldName=" << x.oldName << ", " << "NewName=" << x.newName << ")"; + out << "SameDirectoryRenameReq(" << "TargetId=" << x.targetId << ", " << "DirId=" << x.dirId << ", " << "OldName=" << x.oldName << ", " << "OldCreationTime=" << x.oldCreationTime << ", " << "NewName=" << x.newName << ")"; return out; } void SameDirectoryRenameResp::pack(BincodeBuf& buf) const { + newCreationTime.pack(buf); } void SameDirectoryRenameResp::unpack(BincodeBuf& buf) { + newCreationTime.unpack(buf); } void SameDirectoryRenameResp::clear() { + newCreationTime = EggsTime(); } bool SameDirectoryRenameResp::operator==(const SameDirectoryRenameResp& rhs) const { + if ((EggsTime)this->newCreationTime != (EggsTime)rhs.newCreationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const SameDirectoryRenameResp& x) { - out << "SameDirectoryRenameResp(" << ")"; + out << "SameDirectoryRenameResp(" << "NewCreationTime=" << x.newCreationTime << ")"; return out; } @@ -1213,6 +1251,54 @@ std::ostream& operator<<(std::ostream& out, const SetDirectoryInfoResp& x) { return out; } +void SnapshotLookupReq::pack(BincodeBuf& buf) const { + dirId.pack(buf); + buf.packBytes(name); + startFrom.pack(buf); +} +void SnapshotLookupReq::unpack(BincodeBuf& buf) { + dirId.unpack(buf); + buf.unpackBytes(name); + startFrom.unpack(buf); +} +void SnapshotLookupReq::clear() { + dirId = InodeId(); + name.clear(); + startFrom = EggsTime(); +} +bool SnapshotLookupReq::operator==(const SnapshotLookupReq& rhs) const { + if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; + if (name != rhs.name) { return false; }; + if ((EggsTime)this->startFrom != (EggsTime)rhs.startFrom) { return false; }; + return true; +} +std::ostream& operator<<(std::ostream& out, const SnapshotLookupReq& x) { + out << "SnapshotLookupReq(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "StartFrom=" << x.startFrom << ")"; + return out; +} + +void SnapshotLookupResp::pack(BincodeBuf& buf) const { + nextTime.pack(buf); + buf.packList(edges); +} +void SnapshotLookupResp::unpack(BincodeBuf& buf) { + nextTime.unpack(buf); + buf.unpackList(edges); +} +void SnapshotLookupResp::clear() { + nextTime = EggsTime(); + edges.clear(); +} +bool SnapshotLookupResp::operator==(const SnapshotLookupResp& rhs) const { + if ((EggsTime)this->nextTime != (EggsTime)rhs.nextTime) { return false; }; + if (edges != rhs.edges) { return false; }; + return true; +} +std::ostream& operator<<(std::ostream& out, const SnapshotLookupResp& x) { + out << "SnapshotLookupResp(" << "NextTime=" << x.nextTime << ", " << "Edges=" << x.edges << ")"; + return out; +} + void VisitDirectoriesReq::pack(BincodeBuf& buf) const { beginId.pack(buf); } @@ -1421,47 +1507,47 @@ std::ostream& operator<<(std::ostream& out, const RemoveNonOwnedEdgeResp& x) { return out; } -void IntraShardHardFileUnlinkReq::pack(BincodeBuf& buf) const { +void SameShardHardFileUnlinkReq::pack(BincodeBuf& buf) const { ownerId.pack(buf); targetId.pack(buf); buf.packBytes(name); creationTime.pack(buf); } -void IntraShardHardFileUnlinkReq::unpack(BincodeBuf& buf) { +void SameShardHardFileUnlinkReq::unpack(BincodeBuf& buf) { ownerId.unpack(buf); targetId.unpack(buf); buf.unpackBytes(name); creationTime.unpack(buf); } -void IntraShardHardFileUnlinkReq::clear() { +void SameShardHardFileUnlinkReq::clear() { ownerId = InodeId(); targetId = InodeId(); name.clear(); creationTime = EggsTime(); } -bool IntraShardHardFileUnlinkReq::operator==(const IntraShardHardFileUnlinkReq& rhs) const { +bool SameShardHardFileUnlinkReq::operator==(const SameShardHardFileUnlinkReq& rhs) const { if ((InodeId)this->ownerId != (InodeId)rhs.ownerId) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if (name != rhs.name) { return false; }; if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } -std::ostream& operator<<(std::ostream& out, const IntraShardHardFileUnlinkReq& x) { - out << "IntraShardHardFileUnlinkReq(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; +std::ostream& operator<<(std::ostream& out, const SameShardHardFileUnlinkReq& x) { + out << "SameShardHardFileUnlinkReq(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; return out; } -void IntraShardHardFileUnlinkResp::pack(BincodeBuf& buf) const { +void SameShardHardFileUnlinkResp::pack(BincodeBuf& buf) const { } -void IntraShardHardFileUnlinkResp::unpack(BincodeBuf& buf) { +void SameShardHardFileUnlinkResp::unpack(BincodeBuf& buf) { } -void IntraShardHardFileUnlinkResp::clear() { +void SameShardHardFileUnlinkResp::clear() { } -bool IntraShardHardFileUnlinkResp::operator==(const IntraShardHardFileUnlinkResp& rhs) const { +bool SameShardHardFileUnlinkResp::operator==(const SameShardHardFileUnlinkResp& rhs) const { return true; } -std::ostream& operator<<(std::ostream& out, const IntraShardHardFileUnlinkResp& x) { - out << "IntraShardHardFileUnlinkResp(" << ")"; +std::ostream& operator<<(std::ostream& out, const SameShardHardFileUnlinkResp& x) { + out << "SameShardHardFileUnlinkResp(" << ")"; return out; } @@ -1797,69 +1883,73 @@ void CreateLockedCurrentEdgeReq::pack(BincodeBuf& buf) const { dirId.pack(buf); buf.packBytes(name); targetId.pack(buf); - creationTime.pack(buf); } void CreateLockedCurrentEdgeReq::unpack(BincodeBuf& buf) { dirId.unpack(buf); buf.unpackBytes(name); targetId.unpack(buf); - creationTime.unpack(buf); } void CreateLockedCurrentEdgeReq::clear() { dirId = InodeId(); name.clear(); targetId = InodeId(); - creationTime = EggsTime(); } bool CreateLockedCurrentEdgeReq::operator==(const CreateLockedCurrentEdgeReq& rhs) const { if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; if (name != rhs.name) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; - if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const CreateLockedCurrentEdgeReq& x) { - out << "CreateLockedCurrentEdgeReq(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ", " << "CreationTime=" << x.creationTime << ")"; + out << "CreateLockedCurrentEdgeReq(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ")"; return out; } void CreateLockedCurrentEdgeResp::pack(BincodeBuf& buf) const { + creationTime.pack(buf); } void CreateLockedCurrentEdgeResp::unpack(BincodeBuf& buf) { + creationTime.unpack(buf); } void CreateLockedCurrentEdgeResp::clear() { + creationTime = EggsTime(); } bool CreateLockedCurrentEdgeResp::operator==(const CreateLockedCurrentEdgeResp& rhs) const { + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const CreateLockedCurrentEdgeResp& x) { - out << "CreateLockedCurrentEdgeResp(" << ")"; + out << "CreateLockedCurrentEdgeResp(" << "CreationTime=" << x.creationTime << ")"; return out; } void LockCurrentEdgeReq::pack(BincodeBuf& buf) const { dirId.pack(buf); - buf.packBytes(name); targetId.pack(buf); + creationTime.pack(buf); + buf.packBytes(name); } void LockCurrentEdgeReq::unpack(BincodeBuf& buf) { dirId.unpack(buf); - buf.unpackBytes(name); targetId.unpack(buf); + creationTime.unpack(buf); + buf.unpackBytes(name); } void LockCurrentEdgeReq::clear() { dirId = InodeId(); - name.clear(); targetId = InodeId(); + creationTime = EggsTime(); + name.clear(); } bool LockCurrentEdgeReq::operator==(const LockCurrentEdgeReq& rhs) const { if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; - if (name != rhs.name) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; + if (name != rhs.name) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const LockCurrentEdgeReq& x) { - out << "LockCurrentEdgeReq(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ")"; + out << "LockCurrentEdgeReq(" << "DirId=" << x.dirId << ", " << "TargetId=" << x.targetId << ", " << "CreationTime=" << x.creationTime << ", " << "Name=" << x.name << ")"; return out; } @@ -1880,30 +1970,34 @@ std::ostream& operator<<(std::ostream& out, const LockCurrentEdgeResp& x) { void UnlockCurrentEdgeReq::pack(BincodeBuf& buf) const { dirId.pack(buf); buf.packBytes(name); + creationTime.pack(buf); targetId.pack(buf); buf.packScalar(wasMoved); } void UnlockCurrentEdgeReq::unpack(BincodeBuf& buf) { dirId.unpack(buf); buf.unpackBytes(name); + creationTime.unpack(buf); targetId.unpack(buf); wasMoved = buf.unpackScalar(); } void UnlockCurrentEdgeReq::clear() { dirId = InodeId(); name.clear(); + creationTime = EggsTime(); targetId = InodeId(); wasMoved = bool(0); } bool UnlockCurrentEdgeReq::operator==(const UnlockCurrentEdgeReq& rhs) const { if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; if (name != rhs.name) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if ((bool)this->wasMoved != (bool)rhs.wasMoved) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const UnlockCurrentEdgeReq& x) { - out << "UnlockCurrentEdgeReq(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ", " << "WasMoved=" << x.wasMoved << ")"; + out << "UnlockCurrentEdgeReq(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ", " << "TargetId=" << x.targetId << ", " << "WasMoved=" << x.wasMoved << ")"; return out; } @@ -2029,19 +2123,23 @@ std::ostream& operator<<(std::ostream& out, const MakeDirectoryReq& x) { void MakeDirectoryResp::pack(BincodeBuf& buf) const { id.pack(buf); + creationTime.pack(buf); } void MakeDirectoryResp::unpack(BincodeBuf& buf) { id.unpack(buf); + creationTime.unpack(buf); } void MakeDirectoryResp::clear() { id = InodeId(); + creationTime = EggsTime(); } bool MakeDirectoryResp::operator==(const MakeDirectoryResp& rhs) const { if ((InodeId)this->id != (InodeId)rhs.id) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const MakeDirectoryResp& x) { - out << "MakeDirectoryResp(" << "Id=" << x.id << ")"; + out << "MakeDirectoryResp(" << "Id=" << x.id << ", " << "CreationTime=" << x.creationTime << ")"; return out; } @@ -2049,6 +2147,7 @@ void RenameFileReq::pack(BincodeBuf& buf) const { targetId.pack(buf); oldOwnerId.pack(buf); buf.packBytes(oldName); + oldCreationTime.pack(buf); newOwnerId.pack(buf); buf.packBytes(newName); } @@ -2056,6 +2155,7 @@ void RenameFileReq::unpack(BincodeBuf& buf) { targetId.unpack(buf); oldOwnerId.unpack(buf); buf.unpackBytes(oldName); + oldCreationTime.unpack(buf); newOwnerId.unpack(buf); buf.unpackBytes(newName); } @@ -2063,6 +2163,7 @@ void RenameFileReq::clear() { targetId = InodeId(); oldOwnerId = InodeId(); oldName.clear(); + oldCreationTime = EggsTime(); newOwnerId = InodeId(); newName.clear(); } @@ -2070,52 +2171,61 @@ bool RenameFileReq::operator==(const RenameFileReq& rhs) const { if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if ((InodeId)this->oldOwnerId != (InodeId)rhs.oldOwnerId) { return false; }; if (oldName != rhs.oldName) { return false; }; + if ((EggsTime)this->oldCreationTime != (EggsTime)rhs.oldCreationTime) { return false; }; if ((InodeId)this->newOwnerId != (InodeId)rhs.newOwnerId) { return false; }; if (newName != rhs.newName) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const RenameFileReq& x) { - out << "RenameFileReq(" << "TargetId=" << x.targetId << ", " << "OldOwnerId=" << x.oldOwnerId << ", " << "OldName=" << x.oldName << ", " << "NewOwnerId=" << x.newOwnerId << ", " << "NewName=" << x.newName << ")"; + out << "RenameFileReq(" << "TargetId=" << x.targetId << ", " << "OldOwnerId=" << x.oldOwnerId << ", " << "OldName=" << x.oldName << ", " << "OldCreationTime=" << x.oldCreationTime << ", " << "NewOwnerId=" << x.newOwnerId << ", " << "NewName=" << x.newName << ")"; return out; } void RenameFileResp::pack(BincodeBuf& buf) const { + creationTime.pack(buf); } void RenameFileResp::unpack(BincodeBuf& buf) { + creationTime.unpack(buf); } void RenameFileResp::clear() { + creationTime = EggsTime(); } bool RenameFileResp::operator==(const RenameFileResp& rhs) const { + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const RenameFileResp& x) { - out << "RenameFileResp(" << ")"; + out << "RenameFileResp(" << "CreationTime=" << x.creationTime << ")"; return out; } void SoftUnlinkDirectoryReq::pack(BincodeBuf& buf) const { ownerId.pack(buf); targetId.pack(buf); + creationTime.pack(buf); buf.packBytes(name); } void SoftUnlinkDirectoryReq::unpack(BincodeBuf& buf) { ownerId.unpack(buf); targetId.unpack(buf); + creationTime.unpack(buf); buf.unpackBytes(name); } void SoftUnlinkDirectoryReq::clear() { ownerId = InodeId(); targetId = InodeId(); + creationTime = EggsTime(); name.clear(); } bool SoftUnlinkDirectoryReq::operator==(const SoftUnlinkDirectoryReq& rhs) const { if ((InodeId)this->ownerId != (InodeId)rhs.ownerId) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; if (name != rhs.name) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const SoftUnlinkDirectoryReq& x) { - out << "SoftUnlinkDirectoryReq(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "Name=" << x.name << ")"; + out << "SoftUnlinkDirectoryReq(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "CreationTime=" << x.creationTime << ", " << "Name=" << x.name << ")"; return out; } @@ -2137,6 +2247,7 @@ void RenameDirectoryReq::pack(BincodeBuf& buf) const { targetId.pack(buf); oldOwnerId.pack(buf); buf.packBytes(oldName); + oldCreationTime.pack(buf); newOwnerId.pack(buf); buf.packBytes(newName); } @@ -2144,6 +2255,7 @@ void RenameDirectoryReq::unpack(BincodeBuf& buf) { targetId.unpack(buf); oldOwnerId.unpack(buf); buf.unpackBytes(oldName); + oldCreationTime.unpack(buf); newOwnerId.unpack(buf); buf.unpackBytes(newName); } @@ -2151,6 +2263,7 @@ void RenameDirectoryReq::clear() { targetId = InodeId(); oldOwnerId = InodeId(); oldName.clear(); + oldCreationTime = EggsTime(); newOwnerId = InodeId(); newName.clear(); } @@ -2158,26 +2271,31 @@ bool RenameDirectoryReq::operator==(const RenameDirectoryReq& rhs) const { if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if ((InodeId)this->oldOwnerId != (InodeId)rhs.oldOwnerId) { return false; }; if (oldName != rhs.oldName) { return false; }; + if ((EggsTime)this->oldCreationTime != (EggsTime)rhs.oldCreationTime) { return false; }; if ((InodeId)this->newOwnerId != (InodeId)rhs.newOwnerId) { return false; }; if (newName != rhs.newName) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const RenameDirectoryReq& x) { - out << "RenameDirectoryReq(" << "TargetId=" << x.targetId << ", " << "OldOwnerId=" << x.oldOwnerId << ", " << "OldName=" << x.oldName << ", " << "NewOwnerId=" << x.newOwnerId << ", " << "NewName=" << x.newName << ")"; + out << "RenameDirectoryReq(" << "TargetId=" << x.targetId << ", " << "OldOwnerId=" << x.oldOwnerId << ", " << "OldName=" << x.oldName << ", " << "OldCreationTime=" << x.oldCreationTime << ", " << "NewOwnerId=" << x.newOwnerId << ", " << "NewName=" << x.newName << ")"; return out; } void RenameDirectoryResp::pack(BincodeBuf& buf) const { + creationTime.pack(buf); } void RenameDirectoryResp::unpack(BincodeBuf& buf) { + creationTime.unpack(buf); } void RenameDirectoryResp::clear() { + creationTime = EggsTime(); } bool RenameDirectoryResp::operator==(const RenameDirectoryResp& rhs) const { + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const RenameDirectoryResp& x) { - out << "RenameDirectoryResp(" << ")"; + out << "RenameDirectoryResp(" << "CreationTime=" << x.creationTime << ")"; return out; } @@ -2213,47 +2331,47 @@ std::ostream& operator<<(std::ostream& out, const HardUnlinkDirectoryResp& x) { return out; } -void HardUnlinkFileReq::pack(BincodeBuf& buf) const { +void CrossShardHardUnlinkFileReq::pack(BincodeBuf& buf) const { ownerId.pack(buf); targetId.pack(buf); buf.packBytes(name); creationTime.pack(buf); } -void HardUnlinkFileReq::unpack(BincodeBuf& buf) { +void CrossShardHardUnlinkFileReq::unpack(BincodeBuf& buf) { ownerId.unpack(buf); targetId.unpack(buf); buf.unpackBytes(name); creationTime.unpack(buf); } -void HardUnlinkFileReq::clear() { +void CrossShardHardUnlinkFileReq::clear() { ownerId = InodeId(); targetId = InodeId(); name.clear(); creationTime = EggsTime(); } -bool HardUnlinkFileReq::operator==(const HardUnlinkFileReq& rhs) const { +bool CrossShardHardUnlinkFileReq::operator==(const CrossShardHardUnlinkFileReq& rhs) const { if ((InodeId)this->ownerId != (InodeId)rhs.ownerId) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if (name != rhs.name) { return false; }; if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } -std::ostream& operator<<(std::ostream& out, const HardUnlinkFileReq& x) { - out << "HardUnlinkFileReq(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; +std::ostream& operator<<(std::ostream& out, const CrossShardHardUnlinkFileReq& x) { + out << "CrossShardHardUnlinkFileReq(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; return out; } -void HardUnlinkFileResp::pack(BincodeBuf& buf) const { +void CrossShardHardUnlinkFileResp::pack(BincodeBuf& buf) const { } -void HardUnlinkFileResp::unpack(BincodeBuf& buf) { +void CrossShardHardUnlinkFileResp::unpack(BincodeBuf& buf) { } -void HardUnlinkFileResp::clear() { +void CrossShardHardUnlinkFileResp::clear() { } -bool HardUnlinkFileResp::operator==(const HardUnlinkFileResp& rhs) const { +bool CrossShardHardUnlinkFileResp::operator==(const CrossShardHardUnlinkFileResp& rhs) const { return true; } -std::ostream& operator<<(std::ostream& out, const HardUnlinkFileResp& x) { - out << "HardUnlinkFileResp(" << ")"; +std::ostream& operator<<(std::ostream& out, const CrossShardHardUnlinkFileResp& x) { + out << "CrossShardHardUnlinkFileResp(" << ")"; return out; } @@ -2298,6 +2416,9 @@ std::ostream& operator<<(std::ostream& out, ShardMessageKind kind) { case ShardMessageKind::SET_DIRECTORY_INFO: out << "SET_DIRECTORY_INFO"; break; + case ShardMessageKind::SNAPSHOT_LOOKUP: + out << "SNAPSHOT_LOOKUP"; + break; case ShardMessageKind::VISIT_DIRECTORIES: out << "VISIT_DIRECTORIES"; break; @@ -2313,8 +2434,8 @@ std::ostream& operator<<(std::ostream& out, ShardMessageKind kind) { case ShardMessageKind::REMOVE_NON_OWNED_EDGE: out << "REMOVE_NON_OWNED_EDGE"; break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: - out << "INTRA_SHARD_HARD_FILE_UNLINK"; + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: + out << "SAME_SHARD_HARD_FILE_UNLINK"; break; case ShardMessageKind::REMOVE_SPAN_INITIATE: out << "REMOVE_SPAN_INITIATE"; @@ -2492,193 +2613,203 @@ SetDirectoryInfoReq& ShardReqContainer::setSetDirectoryInfo() { x.clear(); return x; } +const SnapshotLookupReq& ShardReqContainer::getSnapshotLookup() const { + ALWAYS_ASSERT(_kind == ShardMessageKind::SNAPSHOT_LOOKUP, "%s != %s", _kind, ShardMessageKind::SNAPSHOT_LOOKUP); + return std::get<13>(_data); +} +SnapshotLookupReq& ShardReqContainer::setSnapshotLookup() { + _kind = ShardMessageKind::SNAPSHOT_LOOKUP; + auto& x = std::get<13>(_data); + x.clear(); + return x; +} const VisitDirectoriesReq& ShardReqContainer::getVisitDirectories() const { ALWAYS_ASSERT(_kind == ShardMessageKind::VISIT_DIRECTORIES, "%s != %s", _kind, ShardMessageKind::VISIT_DIRECTORIES); - return std::get<13>(_data); + return std::get<14>(_data); } VisitDirectoriesReq& ShardReqContainer::setVisitDirectories() { _kind = ShardMessageKind::VISIT_DIRECTORIES; - auto& x = std::get<13>(_data); + auto& x = std::get<14>(_data); x.clear(); return x; } const VisitFilesReq& ShardReqContainer::getVisitFiles() const { ALWAYS_ASSERT(_kind == ShardMessageKind::VISIT_FILES, "%s != %s", _kind, ShardMessageKind::VISIT_FILES); - return std::get<14>(_data); + return std::get<15>(_data); } VisitFilesReq& ShardReqContainer::setVisitFiles() { _kind = ShardMessageKind::VISIT_FILES; - auto& x = std::get<14>(_data); + auto& x = std::get<15>(_data); x.clear(); return x; } const VisitTransientFilesReq& ShardReqContainer::getVisitTransientFiles() const { ALWAYS_ASSERT(_kind == ShardMessageKind::VISIT_TRANSIENT_FILES, "%s != %s", _kind, ShardMessageKind::VISIT_TRANSIENT_FILES); - return std::get<15>(_data); + return std::get<16>(_data); } VisitTransientFilesReq& ShardReqContainer::setVisitTransientFiles() { _kind = ShardMessageKind::VISIT_TRANSIENT_FILES; - auto& x = std::get<15>(_data); + auto& x = std::get<16>(_data); x.clear(); return x; } const FullReadDirReq& ShardReqContainer::getFullReadDir() const { ALWAYS_ASSERT(_kind == ShardMessageKind::FULL_READ_DIR, "%s != %s", _kind, ShardMessageKind::FULL_READ_DIR); - return std::get<16>(_data); + return std::get<17>(_data); } FullReadDirReq& ShardReqContainer::setFullReadDir() { _kind = ShardMessageKind::FULL_READ_DIR; - auto& x = std::get<16>(_data); + auto& x = std::get<17>(_data); x.clear(); return x; } const RemoveNonOwnedEdgeReq& ShardReqContainer::getRemoveNonOwnedEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_NON_OWNED_EDGE, "%s != %s", _kind, ShardMessageKind::REMOVE_NON_OWNED_EDGE); - return std::get<17>(_data); + return std::get<18>(_data); } RemoveNonOwnedEdgeReq& ShardReqContainer::setRemoveNonOwnedEdge() { _kind = ShardMessageKind::REMOVE_NON_OWNED_EDGE; - auto& x = std::get<17>(_data); + auto& x = std::get<18>(_data); x.clear(); return x; } -const IntraShardHardFileUnlinkReq& ShardReqContainer::getIntraShardHardFileUnlink() const { - ALWAYS_ASSERT(_kind == ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK, "%s != %s", _kind, ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK); - return std::get<18>(_data); +const SameShardHardFileUnlinkReq& ShardReqContainer::getSameShardHardFileUnlink() const { + ALWAYS_ASSERT(_kind == ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK, "%s != %s", _kind, ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK); + return std::get<19>(_data); } -IntraShardHardFileUnlinkReq& ShardReqContainer::setIntraShardHardFileUnlink() { - _kind = ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK; - auto& x = std::get<18>(_data); +SameShardHardFileUnlinkReq& ShardReqContainer::setSameShardHardFileUnlink() { + _kind = ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK; + auto& x = std::get<19>(_data); x.clear(); return x; } const RemoveSpanInitiateReq& ShardReqContainer::getRemoveSpanInitiate() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_SPAN_INITIATE, "%s != %s", _kind, ShardMessageKind::REMOVE_SPAN_INITIATE); - return std::get<19>(_data); + return std::get<20>(_data); } RemoveSpanInitiateReq& ShardReqContainer::setRemoveSpanInitiate() { _kind = ShardMessageKind::REMOVE_SPAN_INITIATE; - auto& x = std::get<19>(_data); + auto& x = std::get<20>(_data); x.clear(); return x; } const RemoveSpanCertifyReq& ShardReqContainer::getRemoveSpanCertify() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_SPAN_CERTIFY, "%s != %s", _kind, ShardMessageKind::REMOVE_SPAN_CERTIFY); - return std::get<20>(_data); + return std::get<21>(_data); } RemoveSpanCertifyReq& ShardReqContainer::setRemoveSpanCertify() { _kind = ShardMessageKind::REMOVE_SPAN_CERTIFY; - auto& x = std::get<20>(_data); + auto& x = std::get<21>(_data); x.clear(); return x; } const SwapBlocksReq& ShardReqContainer::getSwapBlocks() const { ALWAYS_ASSERT(_kind == ShardMessageKind::SWAP_BLOCKS, "%s != %s", _kind, ShardMessageKind::SWAP_BLOCKS); - return std::get<21>(_data); + return std::get<22>(_data); } SwapBlocksReq& ShardReqContainer::setSwapBlocks() { _kind = ShardMessageKind::SWAP_BLOCKS; - auto& x = std::get<21>(_data); + auto& x = std::get<22>(_data); x.clear(); return x; } const BlockServiceFilesReq& ShardReqContainer::getBlockServiceFiles() const { ALWAYS_ASSERT(_kind == ShardMessageKind::BLOCK_SERVICE_FILES, "%s != %s", _kind, ShardMessageKind::BLOCK_SERVICE_FILES); - return std::get<22>(_data); + return std::get<23>(_data); } BlockServiceFilesReq& ShardReqContainer::setBlockServiceFiles() { _kind = ShardMessageKind::BLOCK_SERVICE_FILES; - auto& x = std::get<22>(_data); + auto& x = std::get<23>(_data); x.clear(); return x; } const RemoveInodeReq& ShardReqContainer::getRemoveInode() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_INODE, "%s != %s", _kind, ShardMessageKind::REMOVE_INODE); - return std::get<23>(_data); + return std::get<24>(_data); } RemoveInodeReq& ShardReqContainer::setRemoveInode() { _kind = ShardMessageKind::REMOVE_INODE; - auto& x = std::get<23>(_data); + auto& x = std::get<24>(_data); x.clear(); return x; } const CreateDirectoryInodeReq& ShardReqContainer::getCreateDirectoryInode() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_DIRECTORY_INODE, "%s != %s", _kind, ShardMessageKind::CREATE_DIRECTORY_INODE); - return std::get<24>(_data); + return std::get<25>(_data); } CreateDirectoryInodeReq& ShardReqContainer::setCreateDirectoryInode() { _kind = ShardMessageKind::CREATE_DIRECTORY_INODE; - auto& x = std::get<24>(_data); + auto& x = std::get<25>(_data); x.clear(); return x; } const SetDirectoryOwnerReq& ShardReqContainer::getSetDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::SET_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::SET_DIRECTORY_OWNER); - return std::get<25>(_data); + return std::get<26>(_data); } SetDirectoryOwnerReq& ShardReqContainer::setSetDirectoryOwner() { _kind = ShardMessageKind::SET_DIRECTORY_OWNER; - auto& x = std::get<25>(_data); + auto& x = std::get<26>(_data); x.clear(); return x; } const RemoveDirectoryOwnerReq& ShardReqContainer::getRemoveDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::REMOVE_DIRECTORY_OWNER); - return std::get<26>(_data); + return std::get<27>(_data); } RemoveDirectoryOwnerReq& ShardReqContainer::setRemoveDirectoryOwner() { _kind = ShardMessageKind::REMOVE_DIRECTORY_OWNER; - auto& x = std::get<26>(_data); + auto& x = std::get<27>(_data); x.clear(); return x; } const CreateLockedCurrentEdgeReq& ShardReqContainer::getCreateLockedCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE); - return std::get<27>(_data); + return std::get<28>(_data); } CreateLockedCurrentEdgeReq& ShardReqContainer::setCreateLockedCurrentEdge() { _kind = ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE; - auto& x = std::get<27>(_data); + auto& x = std::get<28>(_data); x.clear(); return x; } const LockCurrentEdgeReq& ShardReqContainer::getLockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::LOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::LOCK_CURRENT_EDGE); - return std::get<28>(_data); + return std::get<29>(_data); } LockCurrentEdgeReq& ShardReqContainer::setLockCurrentEdge() { _kind = ShardMessageKind::LOCK_CURRENT_EDGE; - auto& x = std::get<28>(_data); + auto& x = std::get<29>(_data); x.clear(); return x; } const UnlockCurrentEdgeReq& ShardReqContainer::getUnlockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::UNLOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::UNLOCK_CURRENT_EDGE); - return std::get<29>(_data); + return std::get<30>(_data); } UnlockCurrentEdgeReq& ShardReqContainer::setUnlockCurrentEdge() { _kind = ShardMessageKind::UNLOCK_CURRENT_EDGE; - auto& x = std::get<29>(_data); + auto& x = std::get<30>(_data); x.clear(); return x; } const RemoveOwnedSnapshotFileEdgeReq& ShardReqContainer::getRemoveOwnedSnapshotFileEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE, "%s != %s", _kind, ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE); - return std::get<30>(_data); + return std::get<31>(_data); } RemoveOwnedSnapshotFileEdgeReq& ShardReqContainer::setRemoveOwnedSnapshotFileEdge() { _kind = ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE; - auto& x = std::get<30>(_data); + auto& x = std::get<31>(_data); x.clear(); return x; } const MakeFileTransientReq& ShardReqContainer::getMakeFileTransient() const { ALWAYS_ASSERT(_kind == ShardMessageKind::MAKE_FILE_TRANSIENT, "%s != %s", _kind, ShardMessageKind::MAKE_FILE_TRANSIENT); - return std::get<31>(_data); + return std::get<32>(_data); } MakeFileTransientReq& ShardReqContainer::setMakeFileTransient() { _kind = ShardMessageKind::MAKE_FILE_TRANSIENT; - auto& x = std::get<31>(_data); + auto& x = std::get<32>(_data); x.clear(); return x; } @@ -2710,44 +2841,46 @@ size_t ShardReqContainer::packedSize() const { return std::get<11>(_data).packedSize(); case ShardMessageKind::SET_DIRECTORY_INFO: return std::get<12>(_data).packedSize(); - case ShardMessageKind::VISIT_DIRECTORIES: + case ShardMessageKind::SNAPSHOT_LOOKUP: return std::get<13>(_data).packedSize(); - case ShardMessageKind::VISIT_FILES: + case ShardMessageKind::VISIT_DIRECTORIES: return std::get<14>(_data).packedSize(); - case ShardMessageKind::VISIT_TRANSIENT_FILES: + case ShardMessageKind::VISIT_FILES: return std::get<15>(_data).packedSize(); - case ShardMessageKind::FULL_READ_DIR: + case ShardMessageKind::VISIT_TRANSIENT_FILES: return std::get<16>(_data).packedSize(); - case ShardMessageKind::REMOVE_NON_OWNED_EDGE: + case ShardMessageKind::FULL_READ_DIR: return std::get<17>(_data).packedSize(); - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardMessageKind::REMOVE_NON_OWNED_EDGE: return std::get<18>(_data).packedSize(); - case ShardMessageKind::REMOVE_SPAN_INITIATE: + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: return std::get<19>(_data).packedSize(); - case ShardMessageKind::REMOVE_SPAN_CERTIFY: + case ShardMessageKind::REMOVE_SPAN_INITIATE: return std::get<20>(_data).packedSize(); - case ShardMessageKind::SWAP_BLOCKS: + case ShardMessageKind::REMOVE_SPAN_CERTIFY: return std::get<21>(_data).packedSize(); - case ShardMessageKind::BLOCK_SERVICE_FILES: + case ShardMessageKind::SWAP_BLOCKS: return std::get<22>(_data).packedSize(); - case ShardMessageKind::REMOVE_INODE: + case ShardMessageKind::BLOCK_SERVICE_FILES: return std::get<23>(_data).packedSize(); - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::REMOVE_INODE: return std::get<24>(_data).packedSize(); - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: return std::get<25>(_data).packedSize(); - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: return std::get<26>(_data).packedSize(); - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: return std::get<27>(_data).packedSize(); - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: return std::get<28>(_data).packedSize(); - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: return std::get<29>(_data).packedSize(); - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: return std::get<30>(_data).packedSize(); - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: return std::get<31>(_data).packedSize(); + case ShardMessageKind::MAKE_FILE_TRANSIENT: + return std::get<32>(_data).packedSize(); default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -2794,63 +2927,66 @@ void ShardReqContainer::pack(BincodeBuf& buf) const { case ShardMessageKind::SET_DIRECTORY_INFO: std::get<12>(_data).pack(buf); break; - case ShardMessageKind::VISIT_DIRECTORIES: + case ShardMessageKind::SNAPSHOT_LOOKUP: std::get<13>(_data).pack(buf); break; - case ShardMessageKind::VISIT_FILES: + case ShardMessageKind::VISIT_DIRECTORIES: std::get<14>(_data).pack(buf); break; - case ShardMessageKind::VISIT_TRANSIENT_FILES: + case ShardMessageKind::VISIT_FILES: std::get<15>(_data).pack(buf); break; - case ShardMessageKind::FULL_READ_DIR: + case ShardMessageKind::VISIT_TRANSIENT_FILES: std::get<16>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_NON_OWNED_EDGE: + case ShardMessageKind::FULL_READ_DIR: std::get<17>(_data).pack(buf); break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardMessageKind::REMOVE_NON_OWNED_EDGE: std::get<18>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_SPAN_INITIATE: + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: std::get<19>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_SPAN_CERTIFY: + case ShardMessageKind::REMOVE_SPAN_INITIATE: std::get<20>(_data).pack(buf); break; - case ShardMessageKind::SWAP_BLOCKS: + case ShardMessageKind::REMOVE_SPAN_CERTIFY: std::get<21>(_data).pack(buf); break; - case ShardMessageKind::BLOCK_SERVICE_FILES: + case ShardMessageKind::SWAP_BLOCKS: std::get<22>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_INODE: + case ShardMessageKind::BLOCK_SERVICE_FILES: std::get<23>(_data).pack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::REMOVE_INODE: std::get<24>(_data).pack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<25>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<26>(_data).pack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<27>(_data).pack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<28>(_data).pack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<29>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<30>(_data).pack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<31>(_data).pack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<32>(_data).pack(buf); + break; default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -2898,63 +3034,66 @@ void ShardReqContainer::unpack(BincodeBuf& buf, ShardMessageKind kind) { case ShardMessageKind::SET_DIRECTORY_INFO: std::get<12>(_data).unpack(buf); break; - case ShardMessageKind::VISIT_DIRECTORIES: + case ShardMessageKind::SNAPSHOT_LOOKUP: std::get<13>(_data).unpack(buf); break; - case ShardMessageKind::VISIT_FILES: + case ShardMessageKind::VISIT_DIRECTORIES: std::get<14>(_data).unpack(buf); break; - case ShardMessageKind::VISIT_TRANSIENT_FILES: + case ShardMessageKind::VISIT_FILES: std::get<15>(_data).unpack(buf); break; - case ShardMessageKind::FULL_READ_DIR: + case ShardMessageKind::VISIT_TRANSIENT_FILES: std::get<16>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_NON_OWNED_EDGE: + case ShardMessageKind::FULL_READ_DIR: std::get<17>(_data).unpack(buf); break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardMessageKind::REMOVE_NON_OWNED_EDGE: std::get<18>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_SPAN_INITIATE: + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: std::get<19>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_SPAN_CERTIFY: + case ShardMessageKind::REMOVE_SPAN_INITIATE: std::get<20>(_data).unpack(buf); break; - case ShardMessageKind::SWAP_BLOCKS: + case ShardMessageKind::REMOVE_SPAN_CERTIFY: std::get<21>(_data).unpack(buf); break; - case ShardMessageKind::BLOCK_SERVICE_FILES: + case ShardMessageKind::SWAP_BLOCKS: std::get<22>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_INODE: + case ShardMessageKind::BLOCK_SERVICE_FILES: std::get<23>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::REMOVE_INODE: std::get<24>(_data).unpack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<25>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<26>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<27>(_data).unpack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<28>(_data).unpack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<29>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<30>(_data).unpack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<31>(_data).unpack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<32>(_data).unpack(buf); + break; default: throw BINCODE_EXCEPTION("bad ShardMessageKind kind %s", kind); } @@ -3001,6 +3140,9 @@ std::ostream& operator<<(std::ostream& out, const ShardReqContainer& x) { case ShardMessageKind::SET_DIRECTORY_INFO: out << x.getSetDirectoryInfo(); break; + case ShardMessageKind::SNAPSHOT_LOOKUP: + out << x.getSnapshotLookup(); + break; case ShardMessageKind::VISIT_DIRECTORIES: out << x.getVisitDirectories(); break; @@ -3016,8 +3158,8 @@ std::ostream& operator<<(std::ostream& out, const ShardReqContainer& x) { case ShardMessageKind::REMOVE_NON_OWNED_EDGE: out << x.getRemoveNonOwnedEdge(); break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: - out << x.getIntraShardHardFileUnlink(); + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: + out << x.getSameShardHardFileUnlink(); break; case ShardMessageKind::REMOVE_SPAN_INITIATE: out << x.getRemoveSpanInitiate(); @@ -3194,193 +3336,203 @@ SetDirectoryInfoResp& ShardRespContainer::setSetDirectoryInfo() { x.clear(); return x; } +const SnapshotLookupResp& ShardRespContainer::getSnapshotLookup() const { + ALWAYS_ASSERT(_kind == ShardMessageKind::SNAPSHOT_LOOKUP, "%s != %s", _kind, ShardMessageKind::SNAPSHOT_LOOKUP); + return std::get<13>(_data); +} +SnapshotLookupResp& ShardRespContainer::setSnapshotLookup() { + _kind = ShardMessageKind::SNAPSHOT_LOOKUP; + auto& x = std::get<13>(_data); + x.clear(); + return x; +} const VisitDirectoriesResp& ShardRespContainer::getVisitDirectories() const { ALWAYS_ASSERT(_kind == ShardMessageKind::VISIT_DIRECTORIES, "%s != %s", _kind, ShardMessageKind::VISIT_DIRECTORIES); - return std::get<13>(_data); + return std::get<14>(_data); } VisitDirectoriesResp& ShardRespContainer::setVisitDirectories() { _kind = ShardMessageKind::VISIT_DIRECTORIES; - auto& x = std::get<13>(_data); + auto& x = std::get<14>(_data); x.clear(); return x; } const VisitFilesResp& ShardRespContainer::getVisitFiles() const { ALWAYS_ASSERT(_kind == ShardMessageKind::VISIT_FILES, "%s != %s", _kind, ShardMessageKind::VISIT_FILES); - return std::get<14>(_data); + return std::get<15>(_data); } VisitFilesResp& ShardRespContainer::setVisitFiles() { _kind = ShardMessageKind::VISIT_FILES; - auto& x = std::get<14>(_data); + auto& x = std::get<15>(_data); x.clear(); return x; } const VisitTransientFilesResp& ShardRespContainer::getVisitTransientFiles() const { ALWAYS_ASSERT(_kind == ShardMessageKind::VISIT_TRANSIENT_FILES, "%s != %s", _kind, ShardMessageKind::VISIT_TRANSIENT_FILES); - return std::get<15>(_data); + return std::get<16>(_data); } VisitTransientFilesResp& ShardRespContainer::setVisitTransientFiles() { _kind = ShardMessageKind::VISIT_TRANSIENT_FILES; - auto& x = std::get<15>(_data); + auto& x = std::get<16>(_data); x.clear(); return x; } const FullReadDirResp& ShardRespContainer::getFullReadDir() const { ALWAYS_ASSERT(_kind == ShardMessageKind::FULL_READ_DIR, "%s != %s", _kind, ShardMessageKind::FULL_READ_DIR); - return std::get<16>(_data); + return std::get<17>(_data); } FullReadDirResp& ShardRespContainer::setFullReadDir() { _kind = ShardMessageKind::FULL_READ_DIR; - auto& x = std::get<16>(_data); + auto& x = std::get<17>(_data); x.clear(); return x; } const RemoveNonOwnedEdgeResp& ShardRespContainer::getRemoveNonOwnedEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_NON_OWNED_EDGE, "%s != %s", _kind, ShardMessageKind::REMOVE_NON_OWNED_EDGE); - return std::get<17>(_data); + return std::get<18>(_data); } RemoveNonOwnedEdgeResp& ShardRespContainer::setRemoveNonOwnedEdge() { _kind = ShardMessageKind::REMOVE_NON_OWNED_EDGE; - auto& x = std::get<17>(_data); + auto& x = std::get<18>(_data); x.clear(); return x; } -const IntraShardHardFileUnlinkResp& ShardRespContainer::getIntraShardHardFileUnlink() const { - ALWAYS_ASSERT(_kind == ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK, "%s != %s", _kind, ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK); - return std::get<18>(_data); +const SameShardHardFileUnlinkResp& ShardRespContainer::getSameShardHardFileUnlink() const { + ALWAYS_ASSERT(_kind == ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK, "%s != %s", _kind, ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK); + return std::get<19>(_data); } -IntraShardHardFileUnlinkResp& ShardRespContainer::setIntraShardHardFileUnlink() { - _kind = ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK; - auto& x = std::get<18>(_data); +SameShardHardFileUnlinkResp& ShardRespContainer::setSameShardHardFileUnlink() { + _kind = ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK; + auto& x = std::get<19>(_data); x.clear(); return x; } const RemoveSpanInitiateResp& ShardRespContainer::getRemoveSpanInitiate() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_SPAN_INITIATE, "%s != %s", _kind, ShardMessageKind::REMOVE_SPAN_INITIATE); - return std::get<19>(_data); + return std::get<20>(_data); } RemoveSpanInitiateResp& ShardRespContainer::setRemoveSpanInitiate() { _kind = ShardMessageKind::REMOVE_SPAN_INITIATE; - auto& x = std::get<19>(_data); + auto& x = std::get<20>(_data); x.clear(); return x; } const RemoveSpanCertifyResp& ShardRespContainer::getRemoveSpanCertify() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_SPAN_CERTIFY, "%s != %s", _kind, ShardMessageKind::REMOVE_SPAN_CERTIFY); - return std::get<20>(_data); + return std::get<21>(_data); } RemoveSpanCertifyResp& ShardRespContainer::setRemoveSpanCertify() { _kind = ShardMessageKind::REMOVE_SPAN_CERTIFY; - auto& x = std::get<20>(_data); + auto& x = std::get<21>(_data); x.clear(); return x; } const SwapBlocksResp& ShardRespContainer::getSwapBlocks() const { ALWAYS_ASSERT(_kind == ShardMessageKind::SWAP_BLOCKS, "%s != %s", _kind, ShardMessageKind::SWAP_BLOCKS); - return std::get<21>(_data); + return std::get<22>(_data); } SwapBlocksResp& ShardRespContainer::setSwapBlocks() { _kind = ShardMessageKind::SWAP_BLOCKS; - auto& x = std::get<21>(_data); + auto& x = std::get<22>(_data); x.clear(); return x; } const BlockServiceFilesResp& ShardRespContainer::getBlockServiceFiles() const { ALWAYS_ASSERT(_kind == ShardMessageKind::BLOCK_SERVICE_FILES, "%s != %s", _kind, ShardMessageKind::BLOCK_SERVICE_FILES); - return std::get<22>(_data); + return std::get<23>(_data); } BlockServiceFilesResp& ShardRespContainer::setBlockServiceFiles() { _kind = ShardMessageKind::BLOCK_SERVICE_FILES; - auto& x = std::get<22>(_data); + auto& x = std::get<23>(_data); x.clear(); return x; } const RemoveInodeResp& ShardRespContainer::getRemoveInode() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_INODE, "%s != %s", _kind, ShardMessageKind::REMOVE_INODE); - return std::get<23>(_data); + return std::get<24>(_data); } RemoveInodeResp& ShardRespContainer::setRemoveInode() { _kind = ShardMessageKind::REMOVE_INODE; - auto& x = std::get<23>(_data); + auto& x = std::get<24>(_data); x.clear(); return x; } const CreateDirectoryInodeResp& ShardRespContainer::getCreateDirectoryInode() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_DIRECTORY_INODE, "%s != %s", _kind, ShardMessageKind::CREATE_DIRECTORY_INODE); - return std::get<24>(_data); + return std::get<25>(_data); } CreateDirectoryInodeResp& ShardRespContainer::setCreateDirectoryInode() { _kind = ShardMessageKind::CREATE_DIRECTORY_INODE; - auto& x = std::get<24>(_data); + auto& x = std::get<25>(_data); x.clear(); return x; } const SetDirectoryOwnerResp& ShardRespContainer::getSetDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::SET_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::SET_DIRECTORY_OWNER); - return std::get<25>(_data); + return std::get<26>(_data); } SetDirectoryOwnerResp& ShardRespContainer::setSetDirectoryOwner() { _kind = ShardMessageKind::SET_DIRECTORY_OWNER; - auto& x = std::get<25>(_data); + auto& x = std::get<26>(_data); x.clear(); return x; } const RemoveDirectoryOwnerResp& ShardRespContainer::getRemoveDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::REMOVE_DIRECTORY_OWNER); - return std::get<26>(_data); + return std::get<27>(_data); } RemoveDirectoryOwnerResp& ShardRespContainer::setRemoveDirectoryOwner() { _kind = ShardMessageKind::REMOVE_DIRECTORY_OWNER; - auto& x = std::get<26>(_data); + auto& x = std::get<27>(_data); x.clear(); return x; } const CreateLockedCurrentEdgeResp& ShardRespContainer::getCreateLockedCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE); - return std::get<27>(_data); + return std::get<28>(_data); } CreateLockedCurrentEdgeResp& ShardRespContainer::setCreateLockedCurrentEdge() { _kind = ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE; - auto& x = std::get<27>(_data); + auto& x = std::get<28>(_data); x.clear(); return x; } const LockCurrentEdgeResp& ShardRespContainer::getLockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::LOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::LOCK_CURRENT_EDGE); - return std::get<28>(_data); + return std::get<29>(_data); } LockCurrentEdgeResp& ShardRespContainer::setLockCurrentEdge() { _kind = ShardMessageKind::LOCK_CURRENT_EDGE; - auto& x = std::get<28>(_data); + auto& x = std::get<29>(_data); x.clear(); return x; } const UnlockCurrentEdgeResp& ShardRespContainer::getUnlockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::UNLOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::UNLOCK_CURRENT_EDGE); - return std::get<29>(_data); + return std::get<30>(_data); } UnlockCurrentEdgeResp& ShardRespContainer::setUnlockCurrentEdge() { _kind = ShardMessageKind::UNLOCK_CURRENT_EDGE; - auto& x = std::get<29>(_data); + auto& x = std::get<30>(_data); x.clear(); return x; } const RemoveOwnedSnapshotFileEdgeResp& ShardRespContainer::getRemoveOwnedSnapshotFileEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE, "%s != %s", _kind, ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE); - return std::get<30>(_data); + return std::get<31>(_data); } RemoveOwnedSnapshotFileEdgeResp& ShardRespContainer::setRemoveOwnedSnapshotFileEdge() { _kind = ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE; - auto& x = std::get<30>(_data); + auto& x = std::get<31>(_data); x.clear(); return x; } const MakeFileTransientResp& ShardRespContainer::getMakeFileTransient() const { ALWAYS_ASSERT(_kind == ShardMessageKind::MAKE_FILE_TRANSIENT, "%s != %s", _kind, ShardMessageKind::MAKE_FILE_TRANSIENT); - return std::get<31>(_data); + return std::get<32>(_data); } MakeFileTransientResp& ShardRespContainer::setMakeFileTransient() { _kind = ShardMessageKind::MAKE_FILE_TRANSIENT; - auto& x = std::get<31>(_data); + auto& x = std::get<32>(_data); x.clear(); return x; } @@ -3412,44 +3564,46 @@ size_t ShardRespContainer::packedSize() const { return std::get<11>(_data).packedSize(); case ShardMessageKind::SET_DIRECTORY_INFO: return std::get<12>(_data).packedSize(); - case ShardMessageKind::VISIT_DIRECTORIES: + case ShardMessageKind::SNAPSHOT_LOOKUP: return std::get<13>(_data).packedSize(); - case ShardMessageKind::VISIT_FILES: + case ShardMessageKind::VISIT_DIRECTORIES: return std::get<14>(_data).packedSize(); - case ShardMessageKind::VISIT_TRANSIENT_FILES: + case ShardMessageKind::VISIT_FILES: return std::get<15>(_data).packedSize(); - case ShardMessageKind::FULL_READ_DIR: + case ShardMessageKind::VISIT_TRANSIENT_FILES: return std::get<16>(_data).packedSize(); - case ShardMessageKind::REMOVE_NON_OWNED_EDGE: + case ShardMessageKind::FULL_READ_DIR: return std::get<17>(_data).packedSize(); - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardMessageKind::REMOVE_NON_OWNED_EDGE: return std::get<18>(_data).packedSize(); - case ShardMessageKind::REMOVE_SPAN_INITIATE: + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: return std::get<19>(_data).packedSize(); - case ShardMessageKind::REMOVE_SPAN_CERTIFY: + case ShardMessageKind::REMOVE_SPAN_INITIATE: return std::get<20>(_data).packedSize(); - case ShardMessageKind::SWAP_BLOCKS: + case ShardMessageKind::REMOVE_SPAN_CERTIFY: return std::get<21>(_data).packedSize(); - case ShardMessageKind::BLOCK_SERVICE_FILES: + case ShardMessageKind::SWAP_BLOCKS: return std::get<22>(_data).packedSize(); - case ShardMessageKind::REMOVE_INODE: + case ShardMessageKind::BLOCK_SERVICE_FILES: return std::get<23>(_data).packedSize(); - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::REMOVE_INODE: return std::get<24>(_data).packedSize(); - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: return std::get<25>(_data).packedSize(); - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: return std::get<26>(_data).packedSize(); - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: return std::get<27>(_data).packedSize(); - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: return std::get<28>(_data).packedSize(); - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: return std::get<29>(_data).packedSize(); - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: return std::get<30>(_data).packedSize(); - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: return std::get<31>(_data).packedSize(); + case ShardMessageKind::MAKE_FILE_TRANSIENT: + return std::get<32>(_data).packedSize(); default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -3496,63 +3650,66 @@ void ShardRespContainer::pack(BincodeBuf& buf) const { case ShardMessageKind::SET_DIRECTORY_INFO: std::get<12>(_data).pack(buf); break; - case ShardMessageKind::VISIT_DIRECTORIES: + case ShardMessageKind::SNAPSHOT_LOOKUP: std::get<13>(_data).pack(buf); break; - case ShardMessageKind::VISIT_FILES: + case ShardMessageKind::VISIT_DIRECTORIES: std::get<14>(_data).pack(buf); break; - case ShardMessageKind::VISIT_TRANSIENT_FILES: + case ShardMessageKind::VISIT_FILES: std::get<15>(_data).pack(buf); break; - case ShardMessageKind::FULL_READ_DIR: + case ShardMessageKind::VISIT_TRANSIENT_FILES: std::get<16>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_NON_OWNED_EDGE: + case ShardMessageKind::FULL_READ_DIR: std::get<17>(_data).pack(buf); break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardMessageKind::REMOVE_NON_OWNED_EDGE: std::get<18>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_SPAN_INITIATE: + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: std::get<19>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_SPAN_CERTIFY: + case ShardMessageKind::REMOVE_SPAN_INITIATE: std::get<20>(_data).pack(buf); break; - case ShardMessageKind::SWAP_BLOCKS: + case ShardMessageKind::REMOVE_SPAN_CERTIFY: std::get<21>(_data).pack(buf); break; - case ShardMessageKind::BLOCK_SERVICE_FILES: + case ShardMessageKind::SWAP_BLOCKS: std::get<22>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_INODE: + case ShardMessageKind::BLOCK_SERVICE_FILES: std::get<23>(_data).pack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::REMOVE_INODE: std::get<24>(_data).pack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<25>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<26>(_data).pack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<27>(_data).pack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<28>(_data).pack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<29>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<30>(_data).pack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<31>(_data).pack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<32>(_data).pack(buf); + break; default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -3600,63 +3757,66 @@ void ShardRespContainer::unpack(BincodeBuf& buf, ShardMessageKind kind) { case ShardMessageKind::SET_DIRECTORY_INFO: std::get<12>(_data).unpack(buf); break; - case ShardMessageKind::VISIT_DIRECTORIES: + case ShardMessageKind::SNAPSHOT_LOOKUP: std::get<13>(_data).unpack(buf); break; - case ShardMessageKind::VISIT_FILES: + case ShardMessageKind::VISIT_DIRECTORIES: std::get<14>(_data).unpack(buf); break; - case ShardMessageKind::VISIT_TRANSIENT_FILES: + case ShardMessageKind::VISIT_FILES: std::get<15>(_data).unpack(buf); break; - case ShardMessageKind::FULL_READ_DIR: + case ShardMessageKind::VISIT_TRANSIENT_FILES: std::get<16>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_NON_OWNED_EDGE: + case ShardMessageKind::FULL_READ_DIR: std::get<17>(_data).unpack(buf); break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardMessageKind::REMOVE_NON_OWNED_EDGE: std::get<18>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_SPAN_INITIATE: + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: std::get<19>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_SPAN_CERTIFY: + case ShardMessageKind::REMOVE_SPAN_INITIATE: std::get<20>(_data).unpack(buf); break; - case ShardMessageKind::SWAP_BLOCKS: + case ShardMessageKind::REMOVE_SPAN_CERTIFY: std::get<21>(_data).unpack(buf); break; - case ShardMessageKind::BLOCK_SERVICE_FILES: + case ShardMessageKind::SWAP_BLOCKS: std::get<22>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_INODE: + case ShardMessageKind::BLOCK_SERVICE_FILES: std::get<23>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::REMOVE_INODE: std::get<24>(_data).unpack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<25>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<26>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<27>(_data).unpack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<28>(_data).unpack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<29>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<30>(_data).unpack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<31>(_data).unpack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<32>(_data).unpack(buf); + break; default: throw BINCODE_EXCEPTION("bad ShardMessageKind kind %s", kind); } @@ -3703,6 +3863,9 @@ std::ostream& operator<<(std::ostream& out, const ShardRespContainer& x) { case ShardMessageKind::SET_DIRECTORY_INFO: out << x.getSetDirectoryInfo(); break; + case ShardMessageKind::SNAPSHOT_LOOKUP: + out << x.getSnapshotLookup(); + break; case ShardMessageKind::VISIT_DIRECTORIES: out << x.getVisitDirectories(); break; @@ -3718,8 +3881,8 @@ std::ostream& operator<<(std::ostream& out, const ShardRespContainer& x) { case ShardMessageKind::REMOVE_NON_OWNED_EDGE: out << x.getRemoveNonOwnedEdge(); break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: - out << x.getIntraShardHardFileUnlink(); + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: + out << x.getSameShardHardFileUnlink(); break; case ShardMessageKind::REMOVE_SPAN_INITIATE: out << x.getRemoveSpanInitiate(); @@ -3783,8 +3946,8 @@ std::ostream& operator<<(std::ostream& out, CDCMessageKind kind) { case CDCMessageKind::HARD_UNLINK_DIRECTORY: out << "HARD_UNLINK_DIRECTORY"; break; - case CDCMessageKind::HARD_UNLINK_FILE: - out << "HARD_UNLINK_FILE"; + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: + out << "CROSS_SHARD_HARD_UNLINK_FILE"; break; default: out << "CDCMessageKind(" << ((int)kind) << ")"; @@ -3843,12 +4006,12 @@ HardUnlinkDirectoryReq& CDCReqContainer::setHardUnlinkDirectory() { x.clear(); return x; } -const HardUnlinkFileReq& CDCReqContainer::getHardUnlinkFile() const { - ALWAYS_ASSERT(_kind == CDCMessageKind::HARD_UNLINK_FILE, "%s != %s", _kind, CDCMessageKind::HARD_UNLINK_FILE); +const CrossShardHardUnlinkFileReq& CDCReqContainer::getCrossShardHardUnlinkFile() const { + ALWAYS_ASSERT(_kind == CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE, "%s != %s", _kind, CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE); return std::get<5>(_data); } -HardUnlinkFileReq& CDCReqContainer::setHardUnlinkFile() { - _kind = CDCMessageKind::HARD_UNLINK_FILE; +CrossShardHardUnlinkFileReq& CDCReqContainer::setCrossShardHardUnlinkFile() { + _kind = CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE; auto& x = std::get<5>(_data); x.clear(); return x; @@ -3865,7 +4028,7 @@ size_t CDCReqContainer::packedSize() const { return std::get<3>(_data).packedSize(); case CDCMessageKind::HARD_UNLINK_DIRECTORY: return std::get<4>(_data).packedSize(); - case CDCMessageKind::HARD_UNLINK_FILE: + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: return std::get<5>(_data).packedSize(); default: throw EGGS_EXCEPTION("bad CDCMessageKind kind %s", _kind); @@ -3889,7 +4052,7 @@ void CDCReqContainer::pack(BincodeBuf& buf) const { case CDCMessageKind::HARD_UNLINK_DIRECTORY: std::get<4>(_data).pack(buf); break; - case CDCMessageKind::HARD_UNLINK_FILE: + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: std::get<5>(_data).pack(buf); break; default: @@ -3915,7 +4078,7 @@ void CDCReqContainer::unpack(BincodeBuf& buf, CDCMessageKind kind) { case CDCMessageKind::HARD_UNLINK_DIRECTORY: std::get<4>(_data).unpack(buf); break; - case CDCMessageKind::HARD_UNLINK_FILE: + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: std::get<5>(_data).unpack(buf); break; default: @@ -3940,8 +4103,8 @@ std::ostream& operator<<(std::ostream& out, const CDCReqContainer& x) { case CDCMessageKind::HARD_UNLINK_DIRECTORY: out << x.getHardUnlinkDirectory(); break; - case CDCMessageKind::HARD_UNLINK_FILE: - out << x.getHardUnlinkFile(); + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: + out << x.getCrossShardHardUnlinkFile(); break; default: throw EGGS_EXCEPTION("bad CDCMessageKind kind %s", x.kind()); @@ -3999,12 +4162,12 @@ HardUnlinkDirectoryResp& CDCRespContainer::setHardUnlinkDirectory() { x.clear(); return x; } -const HardUnlinkFileResp& CDCRespContainer::getHardUnlinkFile() const { - ALWAYS_ASSERT(_kind == CDCMessageKind::HARD_UNLINK_FILE, "%s != %s", _kind, CDCMessageKind::HARD_UNLINK_FILE); +const CrossShardHardUnlinkFileResp& CDCRespContainer::getCrossShardHardUnlinkFile() const { + ALWAYS_ASSERT(_kind == CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE, "%s != %s", _kind, CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE); return std::get<5>(_data); } -HardUnlinkFileResp& CDCRespContainer::setHardUnlinkFile() { - _kind = CDCMessageKind::HARD_UNLINK_FILE; +CrossShardHardUnlinkFileResp& CDCRespContainer::setCrossShardHardUnlinkFile() { + _kind = CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE; auto& x = std::get<5>(_data); x.clear(); return x; @@ -4021,7 +4184,7 @@ size_t CDCRespContainer::packedSize() const { return std::get<3>(_data).packedSize(); case CDCMessageKind::HARD_UNLINK_DIRECTORY: return std::get<4>(_data).packedSize(); - case CDCMessageKind::HARD_UNLINK_FILE: + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: return std::get<5>(_data).packedSize(); default: throw EGGS_EXCEPTION("bad CDCMessageKind kind %s", _kind); @@ -4045,7 +4208,7 @@ void CDCRespContainer::pack(BincodeBuf& buf) const { case CDCMessageKind::HARD_UNLINK_DIRECTORY: std::get<4>(_data).pack(buf); break; - case CDCMessageKind::HARD_UNLINK_FILE: + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: std::get<5>(_data).pack(buf); break; default: @@ -4071,7 +4234,7 @@ void CDCRespContainer::unpack(BincodeBuf& buf, CDCMessageKind kind) { case CDCMessageKind::HARD_UNLINK_DIRECTORY: std::get<4>(_data).unpack(buf); break; - case CDCMessageKind::HARD_UNLINK_FILE: + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: std::get<5>(_data).unpack(buf); break; default: @@ -4096,8 +4259,8 @@ std::ostream& operator<<(std::ostream& out, const CDCRespContainer& x) { case CDCMessageKind::HARD_UNLINK_DIRECTORY: out << x.getHardUnlinkDirectory(); break; - case CDCMessageKind::HARD_UNLINK_FILE: - out << x.getHardUnlinkFile(); + case CDCMessageKind::CROSS_SHARD_HARD_UNLINK_FILE: + out << x.getCrossShardHardUnlinkFile(); break; default: throw EGGS_EXCEPTION("bad CDCMessageKind kind %s", x.kind()); @@ -4146,8 +4309,8 @@ std::ostream& operator<<(std::ostream& out, ShardLogEntryKind err) { case ShardLogEntryKind::REMOVE_NON_OWNED_EDGE: out << "REMOVE_NON_OWNED_EDGE"; break; - case ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK: - out << "INTRA_SHARD_HARD_FILE_UNLINK"; + case ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK: + out << "SAME_SHARD_HARD_FILE_UNLINK"; break; case ShardLogEntryKind::REMOVE_SPAN_INITIATE: out << "REMOVE_SPAN_INITIATE"; @@ -4230,32 +4393,36 @@ std::ostream& operator<<(std::ostream& out, const LinkFileEntry& x) { } void SameDirectoryRenameEntry::pack(BincodeBuf& buf) const { - targetId.pack(buf); dirId.pack(buf); + targetId.pack(buf); buf.packBytes(oldName); + oldCreationTime.pack(buf); buf.packBytes(newName); } void SameDirectoryRenameEntry::unpack(BincodeBuf& buf) { - targetId.unpack(buf); dirId.unpack(buf); + targetId.unpack(buf); buf.unpackBytes(oldName); + oldCreationTime.unpack(buf); buf.unpackBytes(newName); } void SameDirectoryRenameEntry::clear() { - targetId = InodeId(); dirId = InodeId(); + targetId = InodeId(); oldName.clear(); + oldCreationTime = EggsTime(); newName.clear(); } bool SameDirectoryRenameEntry::operator==(const SameDirectoryRenameEntry& rhs) const { - if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; + if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if (oldName != rhs.oldName) { return false; }; + if ((EggsTime)this->oldCreationTime != (EggsTime)rhs.oldCreationTime) { return false; }; if (newName != rhs.newName) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const SameDirectoryRenameEntry& x) { - out << "SameDirectoryRenameEntry(" << "TargetId=" << x.targetId << ", " << "DirId=" << x.dirId << ", " << "OldName=" << x.oldName << ", " << "NewName=" << x.newName << ")"; + out << "SameDirectoryRenameEntry(" << "DirId=" << x.dirId << ", " << "TargetId=" << x.targetId << ", " << "OldName=" << x.oldName << ", " << "OldCreationTime=" << x.oldCreationTime << ", " << "NewName=" << x.newName << ")"; return out; } @@ -4263,25 +4430,29 @@ void SoftUnlinkFileEntry::pack(BincodeBuf& buf) const { ownerId.pack(buf); fileId.pack(buf); buf.packBytes(name); + creationTime.pack(buf); } void SoftUnlinkFileEntry::unpack(BincodeBuf& buf) { ownerId.unpack(buf); fileId.unpack(buf); buf.unpackBytes(name); + creationTime.unpack(buf); } void SoftUnlinkFileEntry::clear() { ownerId = InodeId(); fileId = InodeId(); name.clear(); + creationTime = EggsTime(); } bool SoftUnlinkFileEntry::operator==(const SoftUnlinkFileEntry& rhs) const { if ((InodeId)this->ownerId != (InodeId)rhs.ownerId) { return false; }; if ((InodeId)this->fileId != (InodeId)rhs.fileId) { return false; }; if (name != rhs.name) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const SoftUnlinkFileEntry& x) { - out << "SoftUnlinkFileEntry(" << "OwnerId=" << x.ownerId << ", " << "FileId=" << x.fileId << ", " << "Name=" << x.name << ")"; + out << "SoftUnlinkFileEntry(" << "OwnerId=" << x.ownerId << ", " << "FileId=" << x.fileId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; return out; } @@ -4315,85 +4486,89 @@ void CreateLockedCurrentEdgeEntry::pack(BincodeBuf& buf) const { dirId.pack(buf); buf.packBytes(name); targetId.pack(buf); - creationTime.pack(buf); } void CreateLockedCurrentEdgeEntry::unpack(BincodeBuf& buf) { dirId.unpack(buf); buf.unpackBytes(name); targetId.unpack(buf); - creationTime.unpack(buf); } void CreateLockedCurrentEdgeEntry::clear() { dirId = InodeId(); name.clear(); targetId = InodeId(); - creationTime = EggsTime(); } bool CreateLockedCurrentEdgeEntry::operator==(const CreateLockedCurrentEdgeEntry& rhs) const { if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; if (name != rhs.name) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; - if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const CreateLockedCurrentEdgeEntry& x) { - out << "CreateLockedCurrentEdgeEntry(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ", " << "CreationTime=" << x.creationTime << ")"; + out << "CreateLockedCurrentEdgeEntry(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ")"; return out; } void UnlockCurrentEdgeEntry::pack(BincodeBuf& buf) const { dirId.pack(buf); buf.packBytes(name); + creationTime.pack(buf); targetId.pack(buf); buf.packScalar(wasMoved); } void UnlockCurrentEdgeEntry::unpack(BincodeBuf& buf) { dirId.unpack(buf); buf.unpackBytes(name); + creationTime.unpack(buf); targetId.unpack(buf); wasMoved = buf.unpackScalar(); } void UnlockCurrentEdgeEntry::clear() { dirId = InodeId(); name.clear(); + creationTime = EggsTime(); targetId = InodeId(); wasMoved = bool(0); } bool UnlockCurrentEdgeEntry::operator==(const UnlockCurrentEdgeEntry& rhs) const { if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; if (name != rhs.name) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if ((bool)this->wasMoved != (bool)rhs.wasMoved) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const UnlockCurrentEdgeEntry& x) { - out << "UnlockCurrentEdgeEntry(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ", " << "WasMoved=" << x.wasMoved << ")"; + out << "UnlockCurrentEdgeEntry(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ", " << "TargetId=" << x.targetId << ", " << "WasMoved=" << x.wasMoved << ")"; return out; } void LockCurrentEdgeEntry::pack(BincodeBuf& buf) const { dirId.pack(buf); buf.packBytes(name); + creationTime.pack(buf); targetId.pack(buf); } void LockCurrentEdgeEntry::unpack(BincodeBuf& buf) { dirId.unpack(buf); buf.unpackBytes(name); + creationTime.unpack(buf); targetId.unpack(buf); } void LockCurrentEdgeEntry::clear() { dirId = InodeId(); name.clear(); + creationTime = EggsTime(); targetId = InodeId(); } bool LockCurrentEdgeEntry::operator==(const LockCurrentEdgeEntry& rhs) const { if ((InodeId)this->dirId != (InodeId)rhs.dirId) { return false; }; if (name != rhs.name) { return false; }; + if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; return true; } std::ostream& operator<<(std::ostream& out, const LockCurrentEdgeEntry& x) { - out << "LockCurrentEdgeEntry(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "TargetId=" << x.targetId << ")"; + out << "LockCurrentEdgeEntry(" << "DirId=" << x.dirId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ", " << "TargetId=" << x.targetId << ")"; return out; } @@ -4511,33 +4686,33 @@ std::ostream& operator<<(std::ostream& out, const RemoveNonOwnedEdgeEntry& x) { return out; } -void IntraShardHardFileUnlinkEntry::pack(BincodeBuf& buf) const { +void SameShardHardFileUnlinkEntry::pack(BincodeBuf& buf) const { ownerId.pack(buf); targetId.pack(buf); buf.packBytes(name); creationTime.pack(buf); } -void IntraShardHardFileUnlinkEntry::unpack(BincodeBuf& buf) { +void SameShardHardFileUnlinkEntry::unpack(BincodeBuf& buf) { ownerId.unpack(buf); targetId.unpack(buf); buf.unpackBytes(name); creationTime.unpack(buf); } -void IntraShardHardFileUnlinkEntry::clear() { +void SameShardHardFileUnlinkEntry::clear() { ownerId = InodeId(); targetId = InodeId(); name.clear(); creationTime = EggsTime(); } -bool IntraShardHardFileUnlinkEntry::operator==(const IntraShardHardFileUnlinkEntry& rhs) const { +bool SameShardHardFileUnlinkEntry::operator==(const SameShardHardFileUnlinkEntry& rhs) const { if ((InodeId)this->ownerId != (InodeId)rhs.ownerId) { return false; }; if ((InodeId)this->targetId != (InodeId)rhs.targetId) { return false; }; if (name != rhs.name) { return false; }; if ((EggsTime)this->creationTime != (EggsTime)rhs.creationTime) { return false; }; return true; } -std::ostream& operator<<(std::ostream& out, const IntraShardHardFileUnlinkEntry& x) { - out << "IntraShardHardFileUnlinkEntry(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; +std::ostream& operator<<(std::ostream& out, const SameShardHardFileUnlinkEntry& x) { + out << "SameShardHardFileUnlinkEntry(" << "OwnerId=" << x.ownerId << ", " << "TargetId=" << x.targetId << ", " << "Name=" << x.name << ", " << "CreationTime=" << x.creationTime << ")"; return out; } @@ -4861,12 +5036,12 @@ RemoveNonOwnedEdgeEntry& ShardLogEntryContainer::setRemoveNonOwnedEdge() { x.clear(); return x; } -const IntraShardHardFileUnlinkEntry& ShardLogEntryContainer::getIntraShardHardFileUnlink() const { - ALWAYS_ASSERT(_kind == ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK, "%s != %s", _kind, ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK); +const SameShardHardFileUnlinkEntry& ShardLogEntryContainer::getSameShardHardFileUnlink() const { + ALWAYS_ASSERT(_kind == ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK, "%s != %s", _kind, ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK); return std::get<13>(_data); } -IntraShardHardFileUnlinkEntry& ShardLogEntryContainer::setIntraShardHardFileUnlink() { - _kind = ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK; +SameShardHardFileUnlinkEntry& ShardLogEntryContainer::setSameShardHardFileUnlink() { + _kind = ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK; auto& x = std::get<13>(_data); x.clear(); return x; @@ -4969,7 +5144,7 @@ size_t ShardLogEntryContainer::packedSize() const { return std::get<11>(_data).packedSize(); case ShardLogEntryKind::REMOVE_NON_OWNED_EDGE: return std::get<12>(_data).packedSize(); - case ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK: return std::get<13>(_data).packedSize(); case ShardLogEntryKind::REMOVE_SPAN_INITIATE: return std::get<14>(_data).packedSize(); @@ -5031,7 +5206,7 @@ void ShardLogEntryContainer::pack(BincodeBuf& buf) const { case ShardLogEntryKind::REMOVE_NON_OWNED_EDGE: std::get<12>(_data).pack(buf); break; - case ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK: std::get<13>(_data).pack(buf); break; case ShardLogEntryKind::REMOVE_SPAN_INITIATE: @@ -5102,7 +5277,7 @@ void ShardLogEntryContainer::unpack(BincodeBuf& buf, ShardLogEntryKind kind) { case ShardLogEntryKind::REMOVE_NON_OWNED_EDGE: std::get<12>(_data).unpack(buf); break; - case ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK: std::get<13>(_data).unpack(buf); break; case ShardLogEntryKind::REMOVE_SPAN_INITIATE: @@ -5172,8 +5347,8 @@ std::ostream& operator<<(std::ostream& out, const ShardLogEntryContainer& x) { case ShardLogEntryKind::REMOVE_NON_OWNED_EDGE: out << x.getRemoveNonOwnedEdge(); break; - case ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK: - out << x.getIntraShardHardFileUnlink(); + case ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK: + out << x.getSameShardHardFileUnlink(); break; case ShardLogEntryKind::REMOVE_SPAN_INITIATE: out << x.getRemoveSpanInitiate(); diff --git a/cpp/MsgsGen.hpp b/cpp/MsgsGen.hpp index ce0a80d2..0303860e 100644 --- a/cpp/MsgsGen.hpp +++ b/cpp/MsgsGen.hpp @@ -14,31 +14,31 @@ enum class EggsError : uint16_t { FILE_NOT_FOUND = 17, DIRECTORY_NOT_FOUND = 18, NAME_NOT_FOUND = 19, - TYPE_IS_DIRECTORY = 20, - TYPE_IS_NOT_DIRECTORY = 21, - BAD_COOKIE = 22, - INCONSISTENT_STORAGE_CLASS_PARITY = 23, - LAST_SPAN_STATE_NOT_CLEAN = 24, - COULD_NOT_PICK_BLOCK_SERVICES = 25, - BAD_SPAN_BODY = 26, - SPAN_NOT_FOUND = 27, - BLOCK_SERVICE_NOT_FOUND = 28, - CANNOT_CERTIFY_BLOCKLESS_SPAN = 29, - BAD_NUMBER_OF_BLOCKS_PROOFS = 30, - BAD_BLOCK_PROOF = 31, - CANNOT_OVERRIDE_NAME = 32, - NAME_IS_LOCKED = 33, - OLD_NAME_IS_LOCKED = 34, - NEW_NAME_IS_LOCKED = 35, + EDGE_NOT_FOUND = 20, + EDGE_IS_LOCKED = 21, + TYPE_IS_DIRECTORY = 22, + TYPE_IS_NOT_DIRECTORY = 23, + BAD_COOKIE = 24, + INCONSISTENT_STORAGE_CLASS_PARITY = 25, + LAST_SPAN_STATE_NOT_CLEAN = 26, + COULD_NOT_PICK_BLOCK_SERVICES = 27, + BAD_SPAN_BODY = 28, + SPAN_NOT_FOUND = 29, + BLOCK_SERVICE_NOT_FOUND = 30, + CANNOT_CERTIFY_BLOCKLESS_SPAN = 31, + BAD_NUMBER_OF_BLOCKS_PROOFS = 32, + BAD_BLOCK_PROOF = 33, + CANNOT_OVERRIDE_NAME = 34, + NAME_IS_LOCKED = 35, MTIME_IS_TOO_RECENT = 36, MISMATCHING_TARGET = 37, MISMATCHING_OWNER = 38, - DIRECTORY_NOT_EMPTY = 39, - FILE_IS_TRANSIENT = 40, - OLD_DIRECTORY_NOT_FOUND = 41, - NEW_DIRECTORY_NOT_FOUND = 42, - LOOP_IN_DIRECTORY_RENAME = 43, - EDGE_NOT_FOUND = 44, + MISMATCHING_CREATION_TIME = 39, + DIRECTORY_NOT_EMPTY = 40, + FILE_IS_TRANSIENT = 41, + OLD_DIRECTORY_NOT_FOUND = 42, + NEW_DIRECTORY_NOT_FOUND = 43, + LOOP_IN_DIRECTORY_RENAME = 44, DIRECTORY_HAS_OWNER = 45, FILE_IS_NOT_TRANSIENT = 46, FILE_NOT_EMPTY = 47, @@ -50,10 +50,10 @@ enum class EggsError : uint16_t { MORE_RECENT_SNAPSHOT_EDGE = 53, MORE_RECENT_CURRENT_EDGE = 54, BAD_DIRECTORY_INFO = 55, - CREATION_TIME_TOO_RECENT = 56, - DEADLINE_NOT_PASSED = 57, - SAME_SOURCE_AND_DESTINATION = 58, - SAME_DIRECTORIES = 59, + DEADLINE_NOT_PASSED = 56, + SAME_SOURCE_AND_DESTINATION = 57, + SAME_DIRECTORIES = 58, + SAME_SHARD = 59, }; std::ostream& operator<<(std::ostream& out, EggsError err); @@ -448,6 +448,27 @@ struct EntryNewBlockInfo { std::ostream& operator<<(std::ostream& out, const EntryNewBlockInfo& x); +struct SnapshotLookupEdge { + InodeIdExtra targetId; + EggsTime creationTime; + + static constexpr uint16_t STATIC_SIZE = 8 + 8; // targetId + creationTime + + SnapshotLookupEdge() { clear(); } + uint16_t packedSize() const { + uint16_t _size = 0; + _size += 8; // targetId + _size += 8; // creationTime + return _size; + } + void pack(BincodeBuf& buf) const; + void unpack(BincodeBuf& buf); + void clear(); + bool operator==(const SnapshotLookupEdge&rhs) const; +}; + +std::ostream& operator<<(std::ostream& out, const SnapshotLookupEdge& x); + struct LookupReq { InodeId dirId; BincodeBytes name; @@ -824,12 +845,14 @@ struct LinkFileReq { std::ostream& operator<<(std::ostream& out, const LinkFileReq& x); struct LinkFileResp { + EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 0; // + static constexpr uint16_t STATIC_SIZE = 8; // creationTime LinkFileResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; + _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -844,8 +867,9 @@ struct SoftUnlinkFileReq { InodeId ownerId; InodeId fileId; BincodeBytes name; + EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE; // ownerId + fileId + name + static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8; // ownerId + fileId + name + creationTime SoftUnlinkFileReq() { clear(); } uint16_t packedSize() const { @@ -853,6 +877,7 @@ struct SoftUnlinkFileReq { _size += 8; // ownerId _size += 8; // fileId _size += name.packedSize(); // name + _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -928,9 +953,10 @@ struct SameDirectoryRenameReq { InodeId targetId; InodeId dirId; BincodeBytes oldName; + EggsTime oldCreationTime; BincodeBytes newName; - static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + BincodeBytes::STATIC_SIZE; // targetId + dirId + oldName + newName + static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8 + BincodeBytes::STATIC_SIZE; // targetId + dirId + oldName + oldCreationTime + newName SameDirectoryRenameReq() { clear(); } uint16_t packedSize() const { @@ -938,6 +964,7 @@ struct SameDirectoryRenameReq { _size += 8; // targetId _size += 8; // dirId _size += oldName.packedSize(); // oldName + _size += 8; // oldCreationTime _size += newName.packedSize(); // newName return _size; } @@ -950,12 +977,14 @@ struct SameDirectoryRenameReq { std::ostream& operator<<(std::ostream& out, const SameDirectoryRenameReq& x); struct SameDirectoryRenameResp { + EggsTime newCreationTime; - static constexpr uint16_t STATIC_SIZE = 0; // + static constexpr uint16_t STATIC_SIZE = 8; // newCreationTime SameDirectoryRenameResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; + _size += 8; // newCreationTime return _size; } void pack(BincodeBuf& buf) const; @@ -1004,6 +1033,50 @@ struct SetDirectoryInfoResp { std::ostream& operator<<(std::ostream& out, const SetDirectoryInfoResp& x); +struct SnapshotLookupReq { + InodeId dirId; + BincodeBytes name; + EggsTime startFrom; + + static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8; // dirId + name + startFrom + + SnapshotLookupReq() { clear(); } + uint16_t packedSize() const { + uint16_t _size = 0; + _size += 8; // dirId + _size += name.packedSize(); // name + _size += 8; // startFrom + return _size; + } + void pack(BincodeBuf& buf) const; + void unpack(BincodeBuf& buf); + void clear(); + bool operator==(const SnapshotLookupReq&rhs) const; +}; + +std::ostream& operator<<(std::ostream& out, const SnapshotLookupReq& x); + +struct SnapshotLookupResp { + EggsTime nextTime; + BincodeList edges; + + static constexpr uint16_t STATIC_SIZE = 8 + BincodeList::STATIC_SIZE; // nextTime + edges + + SnapshotLookupResp() { clear(); } + uint16_t packedSize() const { + uint16_t _size = 0; + _size += 8; // nextTime + _size += edges.packedSize(); // edges + return _size; + } + void pack(BincodeBuf& buf) const; + void unpack(BincodeBuf& buf); + void clear(); + bool operator==(const SnapshotLookupResp&rhs) const; +}; + +std::ostream& operator<<(std::ostream& out, const SnapshotLookupResp& x); + struct VisitDirectoriesReq { InodeId beginId; @@ -1208,7 +1281,7 @@ struct RemoveNonOwnedEdgeResp { std::ostream& operator<<(std::ostream& out, const RemoveNonOwnedEdgeResp& x); -struct IntraShardHardFileUnlinkReq { +struct SameShardHardFileUnlinkReq { InodeId ownerId; InodeId targetId; BincodeBytes name; @@ -1216,7 +1289,7 @@ struct IntraShardHardFileUnlinkReq { static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8; // ownerId + targetId + name + creationTime - IntraShardHardFileUnlinkReq() { clear(); } + SameShardHardFileUnlinkReq() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // ownerId @@ -1228,16 +1301,16 @@ struct IntraShardHardFileUnlinkReq { void pack(BincodeBuf& buf) const; void unpack(BincodeBuf& buf); void clear(); - bool operator==(const IntraShardHardFileUnlinkReq&rhs) const; + bool operator==(const SameShardHardFileUnlinkReq&rhs) const; }; -std::ostream& operator<<(std::ostream& out, const IntraShardHardFileUnlinkReq& x); +std::ostream& operator<<(std::ostream& out, const SameShardHardFileUnlinkReq& x); -struct IntraShardHardFileUnlinkResp { +struct SameShardHardFileUnlinkResp { static constexpr uint16_t STATIC_SIZE = 0; // - IntraShardHardFileUnlinkResp() { clear(); } + SameShardHardFileUnlinkResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; return _size; @@ -1245,10 +1318,10 @@ struct IntraShardHardFileUnlinkResp { void pack(BincodeBuf& buf) const; void unpack(BincodeBuf& buf); void clear(); - bool operator==(const IntraShardHardFileUnlinkResp&rhs) const; + bool operator==(const SameShardHardFileUnlinkResp&rhs) const; }; -std::ostream& operator<<(std::ostream& out, const IntraShardHardFileUnlinkResp& x); +std::ostream& operator<<(std::ostream& out, const SameShardHardFileUnlinkResp& x); struct RemoveSpanInitiateReq { InodeId fileId; @@ -1578,9 +1651,8 @@ struct CreateLockedCurrentEdgeReq { InodeId dirId; BincodeBytes name; InodeId targetId; - EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8 + 8; // dirId + name + targetId + creationTime + static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8; // dirId + name + targetId CreateLockedCurrentEdgeReq() { clear(); } uint16_t packedSize() const { @@ -1588,7 +1660,6 @@ struct CreateLockedCurrentEdgeReq { _size += 8; // dirId _size += name.packedSize(); // name _size += 8; // targetId - _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -1600,12 +1671,14 @@ struct CreateLockedCurrentEdgeReq { std::ostream& operator<<(std::ostream& out, const CreateLockedCurrentEdgeReq& x); struct CreateLockedCurrentEdgeResp { + EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 0; // + static constexpr uint16_t STATIC_SIZE = 8; // creationTime CreateLockedCurrentEdgeResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; + _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -1618,17 +1691,19 @@ std::ostream& operator<<(std::ostream& out, const CreateLockedCurrentEdgeResp& x struct LockCurrentEdgeReq { InodeId dirId; - BincodeBytes name; InodeId targetId; + EggsTime creationTime; + BincodeBytes name; - static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8; // dirId + name + targetId + static constexpr uint16_t STATIC_SIZE = 8 + 8 + 8 + BincodeBytes::STATIC_SIZE; // dirId + targetId + creationTime + name LockCurrentEdgeReq() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // dirId - _size += name.packedSize(); // name _size += 8; // targetId + _size += 8; // creationTime + _size += name.packedSize(); // name return _size; } void pack(BincodeBuf& buf) const; @@ -1659,16 +1734,18 @@ std::ostream& operator<<(std::ostream& out, const LockCurrentEdgeResp& x); struct UnlockCurrentEdgeReq { InodeId dirId; BincodeBytes name; + EggsTime creationTime; InodeId targetId; bool wasMoved; - static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8 + 1; // dirId + name + targetId + wasMoved + static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8 + 8 + 1; // dirId + name + creationTime + targetId + wasMoved UnlockCurrentEdgeReq() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // dirId _size += name.packedSize(); // name + _size += 8; // creationTime _size += 8; // targetId _size += 1; // wasMoved return _size; @@ -1803,13 +1880,15 @@ std::ostream& operator<<(std::ostream& out, const MakeDirectoryReq& x); struct MakeDirectoryResp { InodeId id; + EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 8; // id + static constexpr uint16_t STATIC_SIZE = 8 + 8; // id + creationTime MakeDirectoryResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // id + _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -1824,10 +1903,11 @@ struct RenameFileReq { InodeId targetId; InodeId oldOwnerId; BincodeBytes oldName; + EggsTime oldCreationTime; InodeId newOwnerId; BincodeBytes newName; - static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8 + BincodeBytes::STATIC_SIZE; // targetId + oldOwnerId + oldName + newOwnerId + newName + static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8 + 8 + BincodeBytes::STATIC_SIZE; // targetId + oldOwnerId + oldName + oldCreationTime + newOwnerId + newName RenameFileReq() { clear(); } uint16_t packedSize() const { @@ -1835,6 +1915,7 @@ struct RenameFileReq { _size += 8; // targetId _size += 8; // oldOwnerId _size += oldName.packedSize(); // oldName + _size += 8; // oldCreationTime _size += 8; // newOwnerId _size += newName.packedSize(); // newName return _size; @@ -1848,12 +1929,14 @@ struct RenameFileReq { std::ostream& operator<<(std::ostream& out, const RenameFileReq& x); struct RenameFileResp { + EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 0; // + static constexpr uint16_t STATIC_SIZE = 8; // creationTime RenameFileResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; + _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -1867,15 +1950,17 @@ std::ostream& operator<<(std::ostream& out, const RenameFileResp& x); struct SoftUnlinkDirectoryReq { InodeId ownerId; InodeId targetId; + EggsTime creationTime; BincodeBytes name; - static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE; // ownerId + targetId + name + static constexpr uint16_t STATIC_SIZE = 8 + 8 + 8 + BincodeBytes::STATIC_SIZE; // ownerId + targetId + creationTime + name SoftUnlinkDirectoryReq() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // ownerId _size += 8; // targetId + _size += 8; // creationTime _size += name.packedSize(); // name return _size; } @@ -1908,10 +1993,11 @@ struct RenameDirectoryReq { InodeId targetId; InodeId oldOwnerId; BincodeBytes oldName; + EggsTime oldCreationTime; InodeId newOwnerId; BincodeBytes newName; - static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8 + BincodeBytes::STATIC_SIZE; // targetId + oldOwnerId + oldName + newOwnerId + newName + static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8 + 8 + BincodeBytes::STATIC_SIZE; // targetId + oldOwnerId + oldName + oldCreationTime + newOwnerId + newName RenameDirectoryReq() { clear(); } uint16_t packedSize() const { @@ -1919,6 +2005,7 @@ struct RenameDirectoryReq { _size += 8; // targetId _size += 8; // oldOwnerId _size += oldName.packedSize(); // oldName + _size += 8; // oldCreationTime _size += 8; // newOwnerId _size += newName.packedSize(); // newName return _size; @@ -1932,12 +2019,14 @@ struct RenameDirectoryReq { std::ostream& operator<<(std::ostream& out, const RenameDirectoryReq& x); struct RenameDirectoryResp { + EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 0; // + static constexpr uint16_t STATIC_SIZE = 8; // creationTime RenameDirectoryResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; + _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -1984,7 +2073,7 @@ struct HardUnlinkDirectoryResp { std::ostream& operator<<(std::ostream& out, const HardUnlinkDirectoryResp& x); -struct HardUnlinkFileReq { +struct CrossShardHardUnlinkFileReq { InodeId ownerId; InodeId targetId; BincodeBytes name; @@ -1992,7 +2081,7 @@ struct HardUnlinkFileReq { static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8; // ownerId + targetId + name + creationTime - HardUnlinkFileReq() { clear(); } + CrossShardHardUnlinkFileReq() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // ownerId @@ -2004,16 +2093,16 @@ struct HardUnlinkFileReq { void pack(BincodeBuf& buf) const; void unpack(BincodeBuf& buf); void clear(); - bool operator==(const HardUnlinkFileReq&rhs) const; + bool operator==(const CrossShardHardUnlinkFileReq&rhs) const; }; -std::ostream& operator<<(std::ostream& out, const HardUnlinkFileReq& x); +std::ostream& operator<<(std::ostream& out, const CrossShardHardUnlinkFileReq& x); -struct HardUnlinkFileResp { +struct CrossShardHardUnlinkFileResp { static constexpr uint16_t STATIC_SIZE = 0; // - HardUnlinkFileResp() { clear(); } + CrossShardHardUnlinkFileResp() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; return _size; @@ -2021,10 +2110,10 @@ struct HardUnlinkFileResp { void pack(BincodeBuf& buf) const; void unpack(BincodeBuf& buf); void clear(); - bool operator==(const HardUnlinkFileResp&rhs) const; + bool operator==(const CrossShardHardUnlinkFileResp&rhs) const; }; -std::ostream& operator<<(std::ostream& out, const HardUnlinkFileResp& x); +std::ostream& operator<<(std::ostream& out, const CrossShardHardUnlinkFileResp& x); enum class ShardMessageKind : uint8_t { ERROR = 0, @@ -2041,12 +2130,13 @@ enum class ShardMessageKind : uint8_t { FILE_SPANS = 13, SAME_DIRECTORY_RENAME = 14, SET_DIRECTORY_INFO = 15, + SNAPSHOT_LOOKUP = 9, VISIT_DIRECTORIES = 21, VISIT_FILES = 32, VISIT_TRANSIENT_FILES = 22, FULL_READ_DIR = 33, REMOVE_NON_OWNED_EDGE = 23, - INTRA_SHARD_HARD_FILE_UNLINK = 24, + SAME_SHARD_HARD_FILE_UNLINK = 24, REMOVE_SPAN_INITIATE = 25, REMOVE_SPAN_CERTIFY = 26, SWAP_BLOCKS = 34, @@ -2067,7 +2157,7 @@ std::ostream& operator<<(std::ostream& out, ShardMessageKind kind); struct ShardReqContainer { private: ShardMessageKind _kind = (ShardMessageKind)0; - std::tuple _data; + std::tuple _data; public: ShardMessageKind kind() const { return _kind; } const LookupReq& getLookup() const; @@ -2096,6 +2186,8 @@ public: SameDirectoryRenameReq& setSameDirectoryRename(); const SetDirectoryInfoReq& getSetDirectoryInfo() const; SetDirectoryInfoReq& setSetDirectoryInfo(); + const SnapshotLookupReq& getSnapshotLookup() const; + SnapshotLookupReq& setSnapshotLookup(); const VisitDirectoriesReq& getVisitDirectories() const; VisitDirectoriesReq& setVisitDirectories(); const VisitFilesReq& getVisitFiles() const; @@ -2106,8 +2198,8 @@ public: FullReadDirReq& setFullReadDir(); const RemoveNonOwnedEdgeReq& getRemoveNonOwnedEdge() const; RemoveNonOwnedEdgeReq& setRemoveNonOwnedEdge(); - const IntraShardHardFileUnlinkReq& getIntraShardHardFileUnlink() const; - IntraShardHardFileUnlinkReq& setIntraShardHardFileUnlink(); + const SameShardHardFileUnlinkReq& getSameShardHardFileUnlink() const; + SameShardHardFileUnlinkReq& setSameShardHardFileUnlink(); const RemoveSpanInitiateReq& getRemoveSpanInitiate() const; RemoveSpanInitiateReq& setRemoveSpanInitiate(); const RemoveSpanCertifyReq& getRemoveSpanCertify() const; @@ -2147,7 +2239,7 @@ std::ostream& operator<<(std::ostream& out, const ShardReqContainer& x); struct ShardRespContainer { private: ShardMessageKind _kind = (ShardMessageKind)0; - std::tuple _data; + std::tuple _data; public: ShardMessageKind kind() const { return _kind; } const LookupResp& getLookup() const; @@ -2176,6 +2268,8 @@ public: SameDirectoryRenameResp& setSameDirectoryRename(); const SetDirectoryInfoResp& getSetDirectoryInfo() const; SetDirectoryInfoResp& setSetDirectoryInfo(); + const SnapshotLookupResp& getSnapshotLookup() const; + SnapshotLookupResp& setSnapshotLookup(); const VisitDirectoriesResp& getVisitDirectories() const; VisitDirectoriesResp& setVisitDirectories(); const VisitFilesResp& getVisitFiles() const; @@ -2186,8 +2280,8 @@ public: FullReadDirResp& setFullReadDir(); const RemoveNonOwnedEdgeResp& getRemoveNonOwnedEdge() const; RemoveNonOwnedEdgeResp& setRemoveNonOwnedEdge(); - const IntraShardHardFileUnlinkResp& getIntraShardHardFileUnlink() const; - IntraShardHardFileUnlinkResp& setIntraShardHardFileUnlink(); + const SameShardHardFileUnlinkResp& getSameShardHardFileUnlink() const; + SameShardHardFileUnlinkResp& setSameShardHardFileUnlink(); const RemoveSpanInitiateResp& getRemoveSpanInitiate() const; RemoveSpanInitiateResp& setRemoveSpanInitiate(); const RemoveSpanCertifyResp& getRemoveSpanCertify() const; @@ -2231,7 +2325,7 @@ enum class CDCMessageKind : uint8_t { SOFT_UNLINK_DIRECTORY = 3, RENAME_DIRECTORY = 4, HARD_UNLINK_DIRECTORY = 5, - HARD_UNLINK_FILE = 6, + CROSS_SHARD_HARD_UNLINK_FILE = 6, }; std::ostream& operator<<(std::ostream& out, CDCMessageKind kind); @@ -2239,7 +2333,7 @@ std::ostream& operator<<(std::ostream& out, CDCMessageKind kind); struct CDCReqContainer { private: CDCMessageKind _kind = (CDCMessageKind)0; - std::tuple _data; + std::tuple _data; public: CDCMessageKind kind() const { return _kind; } const MakeDirectoryReq& getMakeDirectory() const; @@ -2252,8 +2346,8 @@ public: RenameDirectoryReq& setRenameDirectory(); const HardUnlinkDirectoryReq& getHardUnlinkDirectory() const; HardUnlinkDirectoryReq& setHardUnlinkDirectory(); - const HardUnlinkFileReq& getHardUnlinkFile() const; - HardUnlinkFileReq& setHardUnlinkFile(); + const CrossShardHardUnlinkFileReq& getCrossShardHardUnlinkFile() const; + CrossShardHardUnlinkFileReq& setCrossShardHardUnlinkFile(); void clear() { _kind = (CDCMessageKind)0; }; @@ -2267,7 +2361,7 @@ std::ostream& operator<<(std::ostream& out, const CDCReqContainer& x); struct CDCRespContainer { private: CDCMessageKind _kind = (CDCMessageKind)0; - std::tuple _data; + std::tuple _data; public: CDCMessageKind kind() const { return _kind; } const MakeDirectoryResp& getMakeDirectory() const; @@ -2280,8 +2374,8 @@ public: RenameDirectoryResp& setRenameDirectory(); const HardUnlinkDirectoryResp& getHardUnlinkDirectory() const; HardUnlinkDirectoryResp& setHardUnlinkDirectory(); - const HardUnlinkFileResp& getHardUnlinkFile() const; - HardUnlinkFileResp& setHardUnlinkFile(); + const CrossShardHardUnlinkFileResp& getCrossShardHardUnlinkFile() const; + CrossShardHardUnlinkFileResp& setCrossShardHardUnlinkFile(); void clear() { _kind = (CDCMessageKind)0; }; @@ -2306,7 +2400,7 @@ enum class ShardLogEntryKind : uint16_t { SET_DIRECTORY_OWNER = 11, SET_DIRECTORY_INFO = 12, REMOVE_NON_OWNED_EDGE = 13, - INTRA_SHARD_HARD_FILE_UNLINK = 14, + SAME_SHARD_HARD_FILE_UNLINK = 14, REMOVE_SPAN_INITIATE = 15, UPDATE_BLOCK_SERVICES = 16, ADD_SPAN_INITIATE = 17, @@ -2365,19 +2459,21 @@ struct LinkFileEntry { std::ostream& operator<<(std::ostream& out, const LinkFileEntry& x); struct SameDirectoryRenameEntry { - InodeId targetId; InodeId dirId; + InodeId targetId; BincodeBytes oldName; + EggsTime oldCreationTime; BincodeBytes newName; - static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + BincodeBytes::STATIC_SIZE; // targetId + dirId + oldName + newName + static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8 + BincodeBytes::STATIC_SIZE; // dirId + targetId + oldName + oldCreationTime + newName SameDirectoryRenameEntry() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; - _size += 8; // targetId _size += 8; // dirId + _size += 8; // targetId _size += oldName.packedSize(); // oldName + _size += 8; // oldCreationTime _size += newName.packedSize(); // newName return _size; } @@ -2393,8 +2489,9 @@ struct SoftUnlinkFileEntry { InodeId ownerId; InodeId fileId; BincodeBytes name; + EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE; // ownerId + fileId + name + static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8; // ownerId + fileId + name + creationTime SoftUnlinkFileEntry() { clear(); } uint16_t packedSize() const { @@ -2402,6 +2499,7 @@ struct SoftUnlinkFileEntry { _size += 8; // ownerId _size += 8; // fileId _size += name.packedSize(); // name + _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -2439,9 +2537,8 @@ struct CreateLockedCurrentEdgeEntry { InodeId dirId; BincodeBytes name; InodeId targetId; - EggsTime creationTime; - static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8 + 8; // dirId + name + targetId + creationTime + static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8; // dirId + name + targetId CreateLockedCurrentEdgeEntry() { clear(); } uint16_t packedSize() const { @@ -2449,7 +2546,6 @@ struct CreateLockedCurrentEdgeEntry { _size += 8; // dirId _size += name.packedSize(); // name _size += 8; // targetId - _size += 8; // creationTime return _size; } void pack(BincodeBuf& buf) const; @@ -2463,16 +2559,18 @@ std::ostream& operator<<(std::ostream& out, const CreateLockedCurrentEdgeEntry& struct UnlockCurrentEdgeEntry { InodeId dirId; BincodeBytes name; + EggsTime creationTime; InodeId targetId; bool wasMoved; - static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8 + 1; // dirId + name + targetId + wasMoved + static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8 + 8 + 1; // dirId + name + creationTime + targetId + wasMoved UnlockCurrentEdgeEntry() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // dirId _size += name.packedSize(); // name + _size += 8; // creationTime _size += 8; // targetId _size += 1; // wasMoved return _size; @@ -2488,15 +2586,17 @@ std::ostream& operator<<(std::ostream& out, const UnlockCurrentEdgeEntry& x); struct LockCurrentEdgeEntry { InodeId dirId; BincodeBytes name; + EggsTime creationTime; InodeId targetId; - static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8; // dirId + name + targetId + static constexpr uint16_t STATIC_SIZE = 8 + BincodeBytes::STATIC_SIZE + 8 + 8; // dirId + name + creationTime + targetId LockCurrentEdgeEntry() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // dirId _size += name.packedSize(); // name + _size += 8; // creationTime _size += 8; // targetId return _size; } @@ -2615,7 +2715,7 @@ struct RemoveNonOwnedEdgeEntry { std::ostream& operator<<(std::ostream& out, const RemoveNonOwnedEdgeEntry& x); -struct IntraShardHardFileUnlinkEntry { +struct SameShardHardFileUnlinkEntry { InodeId ownerId; InodeId targetId; BincodeBytes name; @@ -2623,7 +2723,7 @@ struct IntraShardHardFileUnlinkEntry { static constexpr uint16_t STATIC_SIZE = 8 + 8 + BincodeBytes::STATIC_SIZE + 8; // ownerId + targetId + name + creationTime - IntraShardHardFileUnlinkEntry() { clear(); } + SameShardHardFileUnlinkEntry() { clear(); } uint16_t packedSize() const { uint16_t _size = 0; _size += 8; // ownerId @@ -2635,10 +2735,10 @@ struct IntraShardHardFileUnlinkEntry { void pack(BincodeBuf& buf) const; void unpack(BincodeBuf& buf); void clear(); - bool operator==(const IntraShardHardFileUnlinkEntry&rhs) const; + bool operator==(const SameShardHardFileUnlinkEntry&rhs) const; }; -std::ostream& operator<<(std::ostream& out, const IntraShardHardFileUnlinkEntry& x); +std::ostream& operator<<(std::ostream& out, const SameShardHardFileUnlinkEntry& x); struct RemoveSpanInitiateEntry { InodeId fileId; @@ -2808,7 +2908,7 @@ std::ostream& operator<<(std::ostream& out, const RemoveOwnedSnapshotFileEdgeEnt struct ShardLogEntryContainer { private: ShardLogEntryKind _kind = (ShardLogEntryKind)0; - std::tuple _data; + std::tuple _data; public: ShardLogEntryKind kind() const { return _kind; } const ConstructFileEntry& getConstructFile() const; @@ -2837,8 +2937,8 @@ public: SetDirectoryInfoEntry& setSetDirectoryInfo(); const RemoveNonOwnedEdgeEntry& getRemoveNonOwnedEdge() const; RemoveNonOwnedEdgeEntry& setRemoveNonOwnedEdge(); - const IntraShardHardFileUnlinkEntry& getIntraShardHardFileUnlink() const; - IntraShardHardFileUnlinkEntry& setIntraShardHardFileUnlink(); + const SameShardHardFileUnlinkEntry& getSameShardHardFileUnlink() const; + SameShardHardFileUnlinkEntry& setSameShardHardFileUnlink(); const RemoveSpanInitiateEntry& getRemoveSpanInitiate() const; RemoveSpanInitiateEntry& setRemoveSpanInitiate(); const UpdateBlockServicesEntry& getUpdateBlockServices() const; diff --git a/cpp/Shard.cpp b/cpp/Shard.cpp index ec13db79..96c1ec8f 100644 --- a/cpp/Shard.cpp +++ b/cpp/Shard.cpp @@ -7,6 +7,7 @@ #include #include +#include "Assert.hpp" #include "Bincode.hpp" #include "Crypto.hpp" #include "Exception.hpp" @@ -19,6 +20,7 @@ #include "Shuckle.hpp" #include "Time.hpp" #include "Undertaker.hpp" +#include "splitmix64.hpp" // Data needed to synchronize between the different threads struct ShardShared { @@ -49,14 +51,30 @@ private: ShardId _shid; std::atomic _stop; bool _waitForShuckle; + uint64_t _packetDropRand; + uint64_t _incomingPacketDropProbability; // probability * 10,000 + uint64_t _outgoingPacketDropProbability; // probability * 10,000 public: ShardServer(Logger& logger, ShardShared& shared, ShardId shid, const ShardOptions& options): _env(logger, "server"), _shared(shared), _shid(shid), _stop(false), - _waitForShuckle(options.waitForShuckle) - {} + _waitForShuckle(options.waitForShuckle), + _packetDropRand(shid.port()), + _incomingPacketDropProbability(0), + _outgoingPacketDropProbability(0) + { + auto convertProb = [this](const std::string& what, double prob, uint64_t& iprob) { + if (prob != 0.0) { + LOG_INFO(_env, "Will drop %s%% of %s packets", prob*100.0, what); + iprob = prob * 10'000.0; + ALWAYS_ASSERT(iprob > 0 && iprob < 10'000); + } + }; + convertProb("incoming", options.simulateIncomingPacketDrop, _incomingPacketDropProbability); + convertProb("outgoing", options.simulateOutgoingPacketDrop, _outgoingPacketDropProbability); + } virtual ~ShardServer() = default; @@ -91,7 +109,7 @@ public: { struct timeval tv; tv.tv_sec = 0; - tv.tv_usec = 100000; + tv.tv_usec = 10'000; if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,&tv,sizeof(tv)) < 0) { throw SYSCALL_EXCEPTION("setsockopt"); } @@ -142,6 +160,11 @@ public: continue; } + if (splitmix64(_packetDropRand) % 10'000 < _incomingPacketDropProbability) { + LOG_DEBUG(_env, "artificially dropping request %s", reqHeader.requestId); + continue; + } + LOG_DEBUG(_env, "received request id %s, kind %s, from %s", reqHeader.requestId, reqHeader.kind, clientAddr); // If this will be filled in with an actual code, it means that we couldn't process @@ -197,6 +220,11 @@ public: respBbuf.packScalar((uint16_t)err); } + if (splitmix64(_packetDropRand) % 10'000 < _outgoingPacketDropProbability) { + LOG_DEBUG(_env, "artificially dropping response %s", reqHeader.requestId); + continue; + } + if (sendto(sock, respBbuf.data, respBbuf.len(), 0, (struct sockaddr*)&clientAddr, sizeof(clientAddr)) != respBbuf.len()) { throw SYSCALL_EXCEPTION("sendto"); } diff --git a/cpp/Shard.hpp b/cpp/Shard.hpp index 052ff4ef..0fd34b6a 100644 --- a/cpp/Shard.hpp +++ b/cpp/Shard.hpp @@ -8,6 +8,10 @@ struct ShardOptions { LogLevel level = LogLevel::LOG_INFO; std::string logFile = ""; // if empty, stdout std::string shuckleHost = "http://localhost:39999"; + // If non-zero, packets will be dropped with this probability. Useful to test + // resilience of the system. + double simulateIncomingPacketDrop = 0.0; + double simulateOutgoingPacketDrop = 0.0; }; void runShard(ShardId shid, const std::string& dbDir, const ShardOptions& options); diff --git a/cpp/ShardDB.cpp b/cpp/ShardDB.cpp index 1b0beebb..6ea7cdf3 100644 --- a/cpp/ShardDB.cpp +++ b/cpp/ShardDB.cpp @@ -584,13 +584,10 @@ struct ShardDBImpl { uint64_t nameHash; { - std::string value; - ExternalValue dir; - EggsError err = _getDirectory(options, req.dirId, false, value, dir); + EggsError err = _getDirectoryAndHash(options, req.dirId, false, req.name.ref(), nameHash); if (err != NO_ERROR) { return err; } - nameHash = computeHash(dir().hashMode(), req.name.ref()); } { @@ -792,6 +789,55 @@ struct ShardDBImpl { return _visitInodes(_filesCf, req, resp); } + EggsError _snapshotLookup(const SnapshotLookupReq& req, SnapshotLookupResp& resp) { + if (req.dirId.type() != InodeType::DIRECTORY) { + return EggsError::TYPE_IS_NOT_DIRECTORY; + } + + uint64_t nameHash; + { + // allowSnapshot=true since we want this to also work for snaphsot dirs + EggsError err = _getDirectoryAndHash({}, req.dirId, true, req.name.ref(), nameHash); + if (err != NO_ERROR) { + return err; + } + } + + int maxEdges = 1 + (UDP_MTU - ShardResponseHeader::STATIC_SIZE - SnapshotLookupResp::STATIC_SIZE) / SnapshotLookupEdge::STATIC_SIZE; + WrappedIterator it(_db->NewIterator({}, _edgesCf)); + StaticValue firstK; + firstK().setDirIdWithCurrent(req.dirId, false); + firstK().setNameHash(nameHash); + firstK().setName(req.name.ref()); + firstK().setCreationTime(req.startFrom); + int i; + for ( + i = 0, it->Seek(firstK.toSlice()); + i < maxEdges && it->Valid(); + i++, it->Next() + ) { + auto k = ExternalValue::FromSlice(it->key()); + if ( + k().nameHash() != nameHash || + k().current() || // only snapshot edges + k().dirId() != req.dirId || + k().name() != req.name.ref() + ) { + break; + } + auto v = ExternalValue::FromSlice(it->value()); + auto& edge = resp.edges.els.emplace_back(); + edge.targetId = v().targetIdWithOwned(); + edge.creationTime = k().creationTime(); + } + ROCKS_DB_CHECKED(it->status()); + if (resp.edges.els.size() == maxEdges) { + resp.nextTime = resp.edges.els.back().creationTime; + resp.edges.els.pop_back(); + } + return NO_ERROR; + } + EggsError read(const ShardReqContainer& req, ShardRespContainer& resp) { LOG_DEBUG(_env, "processing read-only request of kind %s", req.kind()); @@ -832,6 +878,9 @@ struct ShardDBImpl { case ShardMessageKind::VISIT_FILES: err = _visitFiles(req.getVisitFiles(), resp.setVisitFiles()); break; + case ShardMessageKind::SNAPSHOT_LOOKUP: + err = _snapshotLookup(req.getSnapshotLookup(), resp.setSnapshotLookup()); + break; default: throw EGGS_EXCEPTION("bad read-only shard message kind %s", req.kind()); } @@ -901,6 +950,7 @@ struct ShardDBImpl { return EggsError::BAD_SHARD; } entry.dirId = req.dirId; + entry.oldCreationTime = req.oldCreationTime; entry.oldName = req.oldName; entry.newName = req.newName; entry.targetId = req.targetId; @@ -920,6 +970,7 @@ struct ShardDBImpl { entry.ownerId = req.ownerId; entry.fileId = req.fileId; entry.name = req.name; + entry.creationTime = req.creationTime; return NO_ERROR; } @@ -941,9 +992,6 @@ struct ShardDBImpl { } EggsError _prepareCreateLockedCurrentEdge(EggsTime time, const CreateLockedCurrentEdgeReq& req, CreateLockedCurrentEdgeEntry& entry) { - if (entry.creationTime >= time) { - return EggsError::CREATION_TIME_TOO_RECENT; - } if (req.dirId.type() != InodeType::DIRECTORY) { return EggsError::TYPE_IS_NOT_DIRECTORY; } @@ -955,7 +1003,6 @@ struct ShardDBImpl { } ALWAYS_ASSERT(req.targetId != NULL_INODE_ID); // proper error entry.dirId = req.dirId; - entry.creationTime = req.creationTime; entry.targetId = req.targetId; entry.name = req.name; return NO_ERROR; @@ -972,6 +1019,7 @@ struct ShardDBImpl { entry.targetId = req.targetId; entry.name = req.name; entry.wasMoved = req.wasMoved; + entry.creationTime = req.creationTime; return NO_ERROR; } @@ -985,6 +1033,7 @@ struct ShardDBImpl { entry.dirId = req.dirId; entry.name = req.name; entry.targetId = req.targetId; + entry.creationTime = req.creationTime; return NO_ERROR; } @@ -1062,7 +1111,7 @@ struct ShardDBImpl { return NO_ERROR; } - EggsError _prepareIntraShardHardFileUnlink(EggsTime time, const IntraShardHardFileUnlinkReq& req, IntraShardHardFileUnlinkEntry& entry) { + EggsError _prepareSameShardHardFileUnlink(EggsTime time, const SameShardHardFileUnlinkReq& req, SameShardHardFileUnlinkEntry& entry) { if (req.ownerId.type() != InodeType::DIRECTORY) { return EggsError::TYPE_IS_NOT_DIRECTORY; } @@ -1391,8 +1440,8 @@ struct ShardDBImpl { case ShardMessageKind::REMOVE_NON_OWNED_EDGE: err = _prepareRemoveNonOwnedEdge(time, req.getRemoveNonOwnedEdge(), logEntryBody.setRemoveNonOwnedEdge()); break; - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: - err = _prepareIntraShardHardFileUnlink(time, req.getIntraShardHardFileUnlink(), logEntryBody.setIntraShardHardFileUnlink()); + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: + err = _prepareSameShardHardFileUnlink(time, req.getSameShardHardFileUnlink(), logEntryBody.setSameShardHardFileUnlink()); break; case ShardMessageKind::REMOVE_SPAN_INITIATE: err = _prepareRemoveSpanInitiate(time, req.getRemoveSpanInitiate(), logEntryBody.setRemoveSpanInitiate()); @@ -1419,7 +1468,7 @@ struct ShardDBImpl { } if (err == NO_ERROR) { - LOG_DEBUG(_env, "prepared log entry of kind %s, for request of kind %s", logEntryBody.kind(), req.kind()); + LOG_DEBUG(_env, "prepared log entry of kind %s, for request of kind %s: %s", logEntryBody.kind(), req.kind(), logEntryBody); } else { LOG_INFO(_env, "could not prepare log entry for request of kind %s: %s", req.kind(), err); } @@ -1482,11 +1531,32 @@ struct ShardDBImpl { if (err == EggsError::FILE_NOT_FOUND) { // Check if the file has already been linked to simplify the life of retrying // clients. - ExternalValue file; - EggsError err = _getFile({}, entry.fileId, fileValue, file); // don't overwrite old error - if (err == NO_ERROR) { - return NO_ERROR; // the non-transient file exists already, we're done + uint64_t nameHash; + // Return original error if the dir doens't exist, since this is some recovery mechanism anyway + if (_getDirectoryAndHash({}, entry.ownerId, false /*allowSnapshot*/, entry.name.ref(), nameHash) != NO_ERROR) { + LOG_DEBUG(_env, "could not find directory after FILE_NOT_FOUND for link file"); + return err; } + StaticValue edgeKey; + edgeKey().setDirIdWithCurrent(entry.ownerId, true); + edgeKey().setNameHash(nameHash); + edgeKey().setName(entry.name.ref()); + std::string edgeValue; + { + auto status = _db->Get({}, _edgesCf, edgeKey.toSlice(), &edgeValue); + if (status.IsNotFound()) { + LOG_DEBUG(_env, "could not find edge after FILE_NOT_FOUND for link file"); + return err; + } + ROCKS_DB_CHECKED(status); + } + ExternalValue edge(edgeValue); + if (edge().targetId() != entry.fileId) { + LOG_DEBUG(_env, "mismatching file id after FILE_NOT_FOUND for link file"); + return err; + } + resp.creationTime = edge().creationTime(); + return NO_ERROR; } else if (err != NO_ERROR) { return err; } @@ -1505,12 +1575,14 @@ struct ShardDBImpl { // create edge in owner. { - EggsError err = ShardDBImpl::_createCurrentEdge(time, time, batch, entry.ownerId, entry.name, entry.fileId, false); + EggsError err = ShardDBImpl::_createCurrentEdge(time, batch, entry.ownerId, entry.name, entry.fileId, false); if (err != NO_ERROR) { return err; } } + resp.creationTime = time; + return NO_ERROR; } @@ -1522,7 +1594,8 @@ struct ShardDBImpl { } // Don't go backwards in time. This is important amongst other things to ensure - // that we do not have snapshot edges to be the same. This should be very uncommon. + // that we have snapshot edges to be uniquely identified by name, hash, creationTime. + // This should be very uncommon. if (tmpDir().mtime() >= time) { RAISE_ALERT(_env, "trying to modify dir %s going backwards in time, dir mtime is %s, log entry time is %s", dirId, tmpDir().mtime(), time); return EggsError::MTIME_IS_TOO_RECENT; @@ -1539,27 +1612,34 @@ struct ShardDBImpl { return NO_ERROR; } + // When we just want to compute the hash of something when modifying the dir + EggsError _initiateDirectoryModificationAndHash(EggsTime time, bool allowSnapshot, rocksdb::WriteBatch& batch, InodeId dirId, const BincodeBytesRef& name, uint64_t& nameHash) { + ExternalValue dir; + std::string dirValue; + EggsError err = _initiateDirectoryModification(time, allowSnapshot, batch, dirId, dirValue, dir); + if (err != NO_ERROR) { + return err; + } + nameHash = computeHash(dir().hashMode(), name); + return NO_ERROR; + } + // Note that we cannot expose an API which allows us to create non-locked current edges, // see comment for CreateLockedCurrentEdgeReq. // // `logEntryTime` and `creationTime` are separate since we need a different `creationTime` // for CreateDirectoryInodeReq. - // - // It must be the case that `creationTime <= logEntryTime`. - EggsError _createCurrentEdge(EggsTime logEntryTime, EggsTime creationTime, rocksdb::WriteBatch& batch, InodeId dirId, const BincodeBytes& name, InodeId targetId, bool locked) { - ALWAYS_ASSERT(creationTime <= logEntryTime); + EggsError _createCurrentEdge(EggsTime logEntryTime, rocksdb::WriteBatch& batch, InodeId dirId, const BincodeBytes& name, InodeId targetId, bool locked) { + EggsTime creationTime = logEntryTime; - // fetch the directory - ExternalValue dir; - std::string dirValue; + uint64_t nameHash; { // allowSnaphsot=false since we cannot create current edges in snapshot directories. - EggsError err = _initiateDirectoryModification(logEntryTime, false, batch, dirId, dirValue, dir); + EggsError err = _initiateDirectoryModificationAndHash(logEntryTime, false, batch, dirId, name.ref(), nameHash); if (err != NO_ERROR) { return err; } } - uint64_t nameHash = computeHash(dir().hashMode(), name.ref()); // Next, we need to look at the current edge with the same name, if any. StaticValue edgeKey; @@ -1599,7 +1679,7 @@ struct ShardDBImpl { // idempotency. if ( !locked || // the one we're trying to create isn't locked - (targetId != existingEdge().targetIdWithLocked().id() || creationTime != existingEdge().creationTime()) // we're trying to create a different locked edge + (targetId != existingEdge().targetIdWithLocked().id()) // we're trying to create a different locked edge ) { return EggsError::NAME_IS_LOCKED; } @@ -1644,33 +1724,32 @@ struct ShardDBImpl { EggsError _applySameDirectoryRename(EggsTime time, rocksdb::WriteBatch& batch, const SameDirectoryRenameEntry& entry, SameDirectoryRenameResp& resp) { // First, remove the old edge -- which won't be owned anymore, since we're renaming it. { - EggsError err = _softUnlinkCurrentEdge(time, batch, entry.dirId, entry.oldName, entry.targetId, false); + EggsError err = _softUnlinkCurrentEdge(time, batch, entry.dirId, entry.oldName, entry.oldCreationTime, entry.targetId, false); if (err != NO_ERROR) { return err; } } // Now, create the new one { - EggsError err = _createCurrentEdge(time, time, batch, entry.dirId, entry.newName, entry.targetId, false); + EggsError err = _createCurrentEdge(time, batch, entry.dirId, entry.newName, entry.targetId, false); if (err != NO_ERROR) { return err; } } + resp.newCreationTime = time; return NO_ERROR; } - EggsError _softUnlinkCurrentEdge(EggsTime time, rocksdb::WriteBatch& batch, InodeId dirId, const BincodeBytes& name, InodeId targetId, bool owned) { - // fetch the directory - ExternalValue dir; - std::string dirValue; + EggsError _softUnlinkCurrentEdge(EggsTime time, rocksdb::WriteBatch& batch, InodeId dirId, const BincodeBytes& name, EggsTime creationTime, InodeId targetId, bool owned) { + // compute hash + uint64_t nameHash; { // allowSnaphsot=false since we can't have current edges in snapshot dirs - EggsError err = _initiateDirectoryModification(time, false, batch, dirId, dirValue, dir); + EggsError err = _initiateDirectoryModificationAndHash(time, false, batch, dirId, name.ref(), nameHash); if (err != NO_ERROR) { return err; } } - uint64_t nameHash = computeHash(dir().hashMode(), name.ref()); // get the edge StaticValue edgeKey; @@ -1680,15 +1759,19 @@ struct ShardDBImpl { std::string edgeValue; auto status = _db->Get({}, _edgesCf, edgeKey.toSlice(), &edgeValue); if (status.IsNotFound()) { - return EggsError::NAME_NOT_FOUND; + return EggsError::EDGE_NOT_FOUND; } ROCKS_DB_CHECKED(status); ExternalValue edgeBody(edgeValue); if (edgeBody().targetIdWithLocked().id() != targetId) { + LOG_DEBUG(_env, "expecting target %s, but got %s", targetId, edgeBody().targetIdWithLocked().id()); return EggsError::MISMATCHING_TARGET; } + if (edgeBody().creationTime() != creationTime) { + return EggsError::MISMATCHING_CREATION_TIME; + } if (edgeBody().targetIdWithLocked().extra()) { // locked - return EggsError::NAME_IS_LOCKED; + return EggsError::EDGE_IS_LOCKED; } // delete the current edge @@ -1714,7 +1797,7 @@ struct ShardDBImpl { } EggsError _applySoftUnlinkFile(EggsTime time, rocksdb::WriteBatch& batch, const SoftUnlinkFileEntry& entry, SoftUnlinkFileResp& resp) { - return _softUnlinkCurrentEdge(time, batch, entry.ownerId, entry.name, entry.fileId, true); + return _softUnlinkCurrentEdge(time, batch, entry.ownerId, entry.name, entry.creationTime, entry.fileId, true); } EggsError _applyCreateDirectoryInode(EggsTime time, rocksdb::WriteBatch& batch, const CreateDirectoryInodeEntry& entry, CreateDirectoryInodeResp& resp) { @@ -1757,8 +1840,12 @@ struct ShardDBImpl { } EggsError _applyCreateLockedCurrentEdge(EggsTime time, rocksdb::WriteBatch& batch, const CreateLockedCurrentEdgeEntry& entry, CreateLockedCurrentEdgeResp& resp) { - LOG_INFO(_env, "Creating edge %s -> %s", entry.dirId, entry.targetId); - return _createCurrentEdge(time, entry.creationTime, batch, entry.dirId, entry.name, entry.targetId, true); // locked=true + auto err = _createCurrentEdge(time, batch, entry.dirId, entry.name, entry.targetId, true); // locked=true + if (err != NO_ERROR) { + return err; + } + resp.creationTime = time; + return NO_ERROR; } EggsError _applyUnlockCurrentEdge(EggsTime time, rocksdb::WriteBatch& batch, const UnlockCurrentEdgeEntry& entry, UnlockCurrentEdgeResp& resp) { @@ -1787,6 +1874,9 @@ struct ShardDBImpl { ROCKS_DB_CHECKED(status); } ExternalValue edge(edgeValue); + if (edge().creationTime() != entry.creationTime) { + return EggsError::MISMATCHING_CREATION_TIME; + } if (edge().locked()) { edge().setTargetIdWithLocked(InodeIdExtra(entry.targetId, false)); // locked=false ROCKS_DB_CHECKED(batch.Put(_edgesCf, currentKey.toSlice(), edge.toSlice())); @@ -1801,7 +1891,7 @@ struct ShardDBImpl { snapshotKey().setName(entry.name.ref()); snapshotKey().setCreationTime(edge().creationTime()); StaticValue snapshotBody; - snapshotBody().setTargetIdWithOwned(InodeIdExtra(entry.targetId, false)); // not owned (this was moved somewhere else) + snapshotBody().setTargetIdWithOwned(InodeIdExtra(entry.targetId, false)); ROCKS_DB_CHECKED(batch.Put(_edgesCf, snapshotKey.toSlice(), snapshotBody.toSlice())); snapshotKey().setCreationTime(time); snapshotBody().setTargetIdWithOwned(InodeIdExtra(NULL_INODE_ID, false)); // deletion edges are never owned @@ -1838,8 +1928,11 @@ struct ShardDBImpl { ROCKS_DB_CHECKED(status); } ExternalValue edge(edgeValue); + if (edge().creationTime() != entry.creationTime) { + return EggsError::MISMATCHING_CREATION_TIME; + } if (!edge().locked()) { - edge().setTargetIdWithLocked(InodeIdExtra(entry.targetId, true)); // locked=true + edge().setTargetIdWithLocked({entry.targetId, true}); // locked=true ROCKS_DB_CHECKED(batch.Put(_edgesCf, currentKey.toSlice(), edge.toSlice())); } @@ -1880,7 +1973,8 @@ struct ShardDBImpl { } } - // we need to create a new DirectoryBody, the materialized info might have changed size + // we need to create a new DirectoryBody rather than modify the old one, the materialized + // info might have changed size { StaticValue newDir; newDir().setOwnerId(NULL_INODE_ID); @@ -1924,6 +2018,7 @@ struct ShardDBImpl { if (it->Valid()) { auto otherEdge = ExternalValue::FromSlice(it->key()); if (otherEdge().dirId() == entry.id) { + LOG_DEBUG(_env, "found edge %s when trying to remove directory %s", otherEdge(), entry.id); return EggsError::DIRECTORY_NOT_EMPTY; } } else if (it->status().IsNotFound()) { @@ -1957,7 +2052,10 @@ struct ShardDBImpl { if (err == NO_ERROR) { return EggsError::FILE_IS_NOT_TRANSIENT; } else if (err == EggsError::FILE_NOT_FOUND) { - return EggsError::FILE_NOT_FOUND; + // In this case the inode is just gone. The best thing to do is + // to just be OK with it, since we need to handle repeated calls + // nicely. + return NO_ERROR; } else { return err; } @@ -2064,14 +2162,11 @@ struct ShardDBImpl { EggsError _applyRemoveNonOwnedEdge(EggsTime time, rocksdb::WriteBatch& batch, const RemoveNonOwnedEdgeEntry& entry, RemoveNonOwnedEdgeResp& resp) { uint64_t nameHash; { - std::string dirValue; - ExternalValue dir; // allowSnapshot=true since GC needs to be able to remove non-owned edges from snapshot dir - EggsError err = _initiateDirectoryModification(time, true, batch, entry.dirId, dirValue, dir); + EggsError err = _initiateDirectoryModificationAndHash(time, true, batch, entry.dirId, entry.name.ref(), nameHash); if (err != NO_ERROR) { return err; } - nameHash = computeHash(dir().hashMode(), entry.name.ref()); } // We check that edge is still not owned -- otherwise we might orphan a file. @@ -2099,7 +2194,7 @@ struct ShardDBImpl { return NO_ERROR; } - EggsError _applyIntraShardHardFileUnlink(EggsTime time, rocksdb::WriteBatch& batch, const IntraShardHardFileUnlinkEntry& entry, IntraShardHardFileUnlinkResp& resp) { + EggsError _applySameShardHardFileUnlink(EggsTime time, rocksdb::WriteBatch& batch, const SameShardHardFileUnlinkEntry& entry, SameShardHardFileUnlinkResp& resp) { // fetch the file std::string fileValue; ExternalValue file; @@ -2297,6 +2392,21 @@ struct ShardDBImpl { ROCKS_DB_CHECKED(batch.Put(_defaultCf, shardMetadataKey(&NEXT_BLOCK_ID_KEY), v.toSlice())); } + void _fillInAddSpanInitiate(const SpanBody& spanBody, AddSpanInitiateResp& resp) { + resp.blocks.els.reserve(spanBody.parity().blocks()); + BlockBody block; + for (int i = 0; i < spanBody.parity().blocks(); i++) { + BlockBody block = spanBody.block(i); + auto& respBlock = resp.blocks.els.emplace_back(); + respBlock.blockServiceId = block.blockServiceId; + respBlock.blockId = block.blockId; + const auto& cache = _blockServicesCache.at(block.blockServiceId); + respBlock.blockServiceIp = cache.ip; + respBlock.blockServicePort = cache.port; + respBlock.certificate.data = _blockAddCertificate(spanBody.blockSize(), block, cache.secretKey); + } + } + EggsError _applyAddSpanInitiate(EggsTime time, rocksdb::WriteBatch& batch, const AddSpanInitiateEntry& entry, AddSpanInitiateResp& resp) { std::string fileValue; ExternalValue file; @@ -2345,6 +2455,7 @@ struct ShardDBImpl { LOG_DEBUG(_env, "file size does not match, and existing span does not match"); return EggsError::SPAN_NOT_FOUND; } + _fillInAddSpanInitiate(existingSpan(), resp); return NO_ERROR; } LOG_DEBUG(_env, "expecting file size %s, but got %s, returning span not found", entry.byteOffset, file().fileSize()); @@ -2394,18 +2505,7 @@ struct ShardDBImpl { } // Fill in the response - resp.blocks.els.reserve(spanBody().parity().blocks()); - BlockBody block; - for (int i = 0; i < spanBody().parity().blocks(); i++) { - BlockBody block = spanBody().block(i); - auto& respBlock = resp.blocks.els.emplace_back(); - respBlock.blockServiceId = block.blockServiceId; - respBlock.blockId = block.blockId; - const auto& cache = _blockServicesCache.at(block.blockServiceId); - respBlock.blockServiceIp = cache.ip; - respBlock.blockServicePort = cache.port; - respBlock.certificate.data = _blockAddCertificate(spanBody().blockSize(), block, cache.secretKey); - } + _fillInAddSpanInitiate(spanBody(), resp); return NO_ERROR; } @@ -2542,7 +2642,7 @@ struct ShardDBImpl { std::string transientFileValue; ExternalValue transientFile; EggsError err = _getTransientFile({}, time, true, entry.id, transientFileValue, transientFile); - if (err != NO_ERROR) { + if (err == NO_ERROR) { return NO_ERROR; } } @@ -2635,14 +2735,11 @@ struct ShardDBImpl { EggsError _applyRemoveOwnedSnapshotFileEdge(EggsTime time, rocksdb::WriteBatch& batch, const RemoveOwnedSnapshotFileEdgeEntry& entry, RemoveOwnedSnapshotFileEdgeResp& resp) { uint64_t nameHash; { - std::string dirValue; - ExternalValue dir; // the GC needs to work on deleted dirs who might still have owned files, so allowSnapshot=true - EggsError err = _initiateDirectoryModification(time, true, batch, entry.ownerId, dirValue, dir); + EggsError err = _initiateDirectoryModificationAndHash(time, true, batch, entry.ownerId, entry.name.ref(), nameHash); if (err != NO_ERROR) { return err; } - nameHash = computeHash(dir().hashMode(), entry.name.ref()); } { @@ -2650,7 +2747,7 @@ struct ShardDBImpl { edgeKey().setDirIdWithCurrent(entry.ownerId, false); // snapshot (current=false) edgeKey().setNameHash(nameHash); edgeKey().setName(entry.name.ref()); - edgeKey().setCreationTime(time); + edgeKey().setCreationTime(entry.creationTime); ROCKS_DB_CHECKED(batch.Delete(_edgesCf, edgeKey.toSlice())); } @@ -2722,8 +2819,8 @@ struct ShardDBImpl { case ShardLogEntryKind::REMOVE_NON_OWNED_EDGE: err = _applyRemoveNonOwnedEdge(time, batch, logEntryBody.getRemoveNonOwnedEdge(), resp.setRemoveNonOwnedEdge()); break; - case ShardLogEntryKind::INTRA_SHARD_HARD_FILE_UNLINK: - err = _applyIntraShardHardFileUnlink(time, batch, logEntryBody.getIntraShardHardFileUnlink(), resp.setIntraShardHardFileUnlink()); + case ShardLogEntryKind::SAME_SHARD_HARD_FILE_UNLINK: + err = _applySameShardHardFileUnlink(time, batch, logEntryBody.getSameShardHardFileUnlink(), resp.setSameShardHardFileUnlink()); break; case ShardLogEntryKind::REMOVE_SPAN_INITIATE: err = _applyRemoveSpanInitiate(time, batch, logEntryBody.getRemoveSpanInitiate(), resp.setRemoveSpanInitiate()); @@ -2799,6 +2896,17 @@ struct ShardDBImpl { return NO_ERROR; } + EggsError _getDirectoryAndHash(const rocksdb::ReadOptions& options, InodeId id, bool allowSnapshot, const BincodeBytesRef& name, uint64_t& nameHash) { + std::string dirValue; + ExternalValue dir; + EggsError err = _getDirectory(options, id, allowSnapshot, dirValue, dir); + if (err != NO_ERROR) { + return err; + } + nameHash = computeHash(dir().hashMode(), name); + return NO_ERROR; + } + EggsError _getFile(const rocksdb::ReadOptions& options, InodeId id, std::string& fileValue, ExternalValue& file) { if (unlikely(id.type() != InodeType::FILE && id.type() != InodeType::SYMLINK)) { return EggsError::TYPE_IS_DIRECTORY; @@ -2901,6 +3009,7 @@ bool readOnlyShardReq(const ShardMessageKind kind) { case ShardMessageKind::VISIT_FILES: case ShardMessageKind::VISIT_TRANSIENT_FILES: case ShardMessageKind::BLOCK_SERVICE_FILES: + case ShardMessageKind::SNAPSHOT_LOOKUP: return true; case ShardMessageKind::CONSTRUCT_FILE: case ShardMessageKind::ADD_SPAN_INITIATE: @@ -2910,7 +3019,7 @@ bool readOnlyShardReq(const ShardMessageKind kind) { case ShardMessageKind::SAME_DIRECTORY_RENAME: case ShardMessageKind::SET_DIRECTORY_INFO: case ShardMessageKind::REMOVE_NON_OWNED_EDGE: - case ShardMessageKind::INTRA_SHARD_HARD_FILE_UNLINK: + case ShardMessageKind::SAME_SHARD_HARD_FILE_UNLINK: case ShardMessageKind::REMOVE_SPAN_INITIATE: case ShardMessageKind::REMOVE_SPAN_CERTIFY: case ShardMessageKind::SWAP_BLOCKS: diff --git a/cpp/eggs-cdc.app.cpp b/cpp/eggs-cdc.app.cpp index 82ce33e8..9be4f93f 100644 --- a/cpp/eggs-cdc.app.cpp +++ b/cpp/eggs-cdc.app.cpp @@ -10,7 +10,7 @@ int main(int argc, char** argv) { namespace fs = std::filesystem; const auto dieWithUsage = [&argv]() { - die("Usage: %s [-v|--verbose] [--log-level debug|info|error] [--log-file ] db_dir\n", argv[0]); + die("Usage: %s [-v|--verbose] [--log-level debug|info|error] [--log-file ] [--packet-drop ] db_dir\n", argv[0]); }; CDCOptions options; @@ -42,6 +42,16 @@ int main(int argc, char** argv) { } } else if (arg == "--log-file") { options.logFile = getNextArg(); + } else if (arg == "--packet-drop") { + std::string probStr = getNextArg(); + size_t idx; + options.simulatePacketDrop = std::stod(probStr, &idx); + if (idx != probStr.size()) { + die("Runoff characters in number %s", probStr.c_str()); + } + if (options.simulatePacketDrop < 0.0 || options.simulatePacketDrop >= 1.0) { + die("Please specify a number in the interval [0.0, 1.0) for --packet-drop."); + } } else { args.emplace_back(std::move(arg)); } diff --git a/cpp/eggs-shard.app.cpp b/cpp/eggs-shard.app.cpp index 99efc60c..0dd760b0 100644 --- a/cpp/eggs-shard.app.cpp +++ b/cpp/eggs-shard.app.cpp @@ -6,11 +6,28 @@ #define die(...) do { fprintf(stderr, __VA_ARGS__); exit(1); } while(false) +static double parseDouble(const std::string& arg) { + size_t idx; + double x = std::stod(arg, &idx); + if (idx != arg.size()) { + die("Runoff characters in number %s", arg.c_str()); + } + return x; +} + +static double parseProbability(const std::string& arg) { + double x = parseDouble(arg); + if (x < 0.0 || x >= 1.0) { + die("Please specify a number in the interval [0.0, 1.0), rather than %f", x); + } + return x; +} + int main(int argc, char** argv) { namespace fs = std::filesystem; const auto dieWithUsage = [&argv]() { - die("Usage: %s [-v|--verbose] [--log-level debug|info|error] [--log-file ] [--wait-for-shuckle] db_dir shard_id\n", argv[0]); + die("Usage: %s [-v|--verbose] [--log-level debug|info|error] [--log-file ] [--wait-for-shuckle] [--incoming-packet-drop ] [--outgoing-packet-drop ] db_dir shard_id\n", argv[0]); }; ShardOptions options; @@ -44,6 +61,10 @@ int main(int argc, char** argv) { options.logFile = getNextArg(); } else if (arg == "--wait-for-shuckle") { options.waitForShuckle = true; + } else if (arg == "--incoming-packet-drop") { + options.simulateIncomingPacketDrop = parseProbability(getNextArg()); + } else if (arg == "--outgoing-packet-drop") { + options.simulateOutgoingPacketDrop = parseProbability(getNextArg()); } else { args.emplace_back(std::move(arg)); } diff --git a/cpp/tests.app.cpp b/cpp/tests.app.cpp index 4fb413a8..8b41a81f 100644 --- a/cpp/tests.app.cpp +++ b/cpp/tests.app.cpp @@ -519,7 +519,7 @@ TEST_CASE("override") { auto logEntry = std::make_unique(); uint64_t logEntryIndex = 0; - const auto createFile = [&](const char* name) { + const auto createFile = [&](const char* name) -> std::tuple { InodeId id; BincodeFixedBytes<8> cookie; { @@ -536,6 +536,7 @@ TEST_CASE("override") { auto& req = reqContainer->setVisitTransientFiles(); NO_EGGS_ERROR(db->read(*reqContainer, *respContainer)); } + EggsTime creationTime; { auto& req = reqContainer->setLinkFile(); req.fileId = id; @@ -544,18 +545,20 @@ TEST_CASE("override") { req.name = name; NO_EGGS_ERROR(db->prepareLogEntry(*reqContainer, *logEntry)); NO_EGGS_ERROR(db->applyLogEntry(true, ++logEntryIndex, *logEntry, *respContainer)); + creationTime = respContainer->getLinkFile().creationTime; } - return id; + return {id, creationTime}; }; - auto foo = createFile("foo"); - auto bar = createFile("bar"); + auto [foo, fooCreationTime] = createFile("foo"); + auto [bar, barCreationTime] = createFile("bar"); { auto& req = reqContainer->setSameDirectoryRename(); req.dirId = ROOT_DIR_INODE_ID; req.targetId = foo; req.oldName = "foo"; + req.oldCreationTime = fooCreationTime; req.newName = "bar"; NO_EGGS_ERROR(db->prepareLogEntry(*reqContainer, *logEntry)); NO_EGGS_ERROR(db->applyLogEntry(true, ++logEntryIndex, *logEntry, *respContainer)); diff --git a/go/bincodegen/bincodegen.go b/go/bincodegen/bincodegen.go index c6cefc08..b6b36c91 100644 --- a/go/bincodegen/bincodegen.go +++ b/go/bincodegen/bincodegen.go @@ -191,6 +191,18 @@ func generateGoSingle(out io.Writer, t reflect.Type) { out.Write(cg.unpack.Bytes()) } +func generateGoReqResp(out io.Writer, rr reqRespType, enumType string, reqKindFun string, respKindFun string) { + fmt.Fprintf(out, "func (v *%s) %s() %s {\n", rr.req.Name(), reqKindFun, enumType) + fmt.Fprintf(out, "\treturn %s\n", reqRespEnum(rr)) + fmt.Fprintf(out, "}\n\n") + generateGoSingle(out, rr.req) + + fmt.Fprintf(out, "func (v *%s) %s() %s {\n", rr.resp.Name(), respKindFun, enumType) + fmt.Fprintf(out, "\treturn %s\n", reqRespEnum(rr)) + fmt.Fprintf(out, "}\n\n") + generateGoSingle(out, rr.resp) +} + func enumName(t reflect.Type) string { tName := t.Name() if !strings.HasSuffix(tName, "Req") && !strings.HasSuffix(tName, "Resp") && !strings.HasSuffix(tName, "Entry") { @@ -210,32 +222,16 @@ func reqRespEnum(rr reqRespType) string { return reqEnum } -func generateGoMsgKind(out io.Writer, typeName string, funName string, reqResps []reqRespType) { - fmt.Fprintf(out, "func %s(body any) %s {\n", funName, typeName) - fmt.Fprintf(out, "\tswitch body.(type) {\n") - fmt.Fprintf(out, "\tcase ErrCode:\n") - fmt.Fprintf(out, "\t\treturn 0\n") +func generateGoMsgKind(out io.Writer, typeName string, reqResps []reqRespType) { seenKinds := map[uint8]bool{} + + fmt.Fprintf(out, "func (k %s) String() string {\n", typeName) + fmt.Fprintf(out, "\tswitch k {\n") for _, reqResp := range reqResps { present := seenKinds[reqResp.kind] if present { panic(fmt.Errorf("duplicate kind %d for %s", reqResp.kind, typeName)) } - seenKinds[reqResp.kind] = true - reqName := reqResp.req.Name() - respName := reqResp.resp.Name() - kindName := reqRespEnum(reqResp) - fmt.Fprintf(out, "\tcase *%v, *%v:\n", reqName, respName) - fmt.Fprintf(out, "\t\treturn %s\n", kindName) - } - fmt.Fprintf(out, "\tdefault:\n") - fmt.Fprintf(out, "\t\tpanic(fmt.Sprintf(\"bad shard req/resp body %%T\", body))\n") - fmt.Fprintf(out, "\t}\n") - fmt.Fprintf(out, "}\n\n") - - fmt.Fprintf(out, "func (k %s) String() string {\n", typeName) - fmt.Fprintf(out, "\tswitch k {\n") - for _, reqResp := range reqResps { fmt.Fprintf(out, "\tcase %v:\n", reqResp.kind) fmt.Fprintf(out, "\t\treturn \"%s\"\n", reqRespEnum(reqResp)) } @@ -296,16 +292,14 @@ func generateGo(errors []string, shardReqResps []reqRespType, cdcReqResps []reqR generateGoErrorCodes(out, errors) - generateGoMsgKind(out, "ShardMessageKind", "GetShardMessageKind", shardReqResps) - generateGoMsgKind(out, "CDCMessageKind", "GetCDCMessageKind", cdcReqResps) + generateGoMsgKind(out, "ShardMessageKind", shardReqResps) + generateGoMsgKind(out, "CDCMessageKind", cdcReqResps) for _, reqResp := range shardReqResps { - generateGoSingle(out, reqResp.req) - generateGoSingle(out, reqResp.resp) + generateGoReqResp(out, reqResp, "ShardMessageKind", "ShardRequestKind", "ShardResponseKind") } for _, reqResp := range cdcReqResps { - generateGoSingle(out, reqResp.req) - generateGoSingle(out, reqResp.resp) + generateGoReqResp(out, reqResp, "CDCMessageKind", "CDCRequestKind", "CDCResponseKind") } for _, typ := range extras { generateGoSingle(out, typ) @@ -1101,7 +1095,7 @@ func generateCpp(errors []string, shardReqResps []reqRespType, cdcReqResps []req reflect.TypeOf(msgs.SetDirectoryOwnerEntry{}), reflect.TypeOf(msgs.SetDirectoryInfoEntry{}), reflect.TypeOf(msgs.RemoveNonOwnedEdgeEntry{}), - reflect.TypeOf(msgs.IntraShardHardFileUnlinkEntry{}), + reflect.TypeOf(msgs.SameShardHardFileUnlinkEntry{}), reflect.TypeOf(msgs.RemoveSpanInitiateEntry{}), reflect.TypeOf(msgs.UpdateBlockServicesEntry{}), reflect.TypeOf(msgs.AddSpanInitiateEntry{}), @@ -1112,15 +1106,6 @@ func generateCpp(errors []string, shardReqResps []reqRespType, cdcReqResps []req }, ) - /* - generateCppLogEntries( - hppOut, - cppOut, - "CDC", - []reflect.Type{}, - ) - */ - return hppOut.Bytes(), cppOut.Bytes() } @@ -1141,6 +1126,8 @@ func main() { "FILE_NOT_FOUND", "DIRECTORY_NOT_FOUND", "NAME_NOT_FOUND", + "EDGE_NOT_FOUND", + "EDGE_IS_LOCKED", "TYPE_IS_DIRECTORY", "TYPE_IS_NOT_DIRECTORY", "BAD_COOKIE", @@ -1155,17 +1142,15 @@ func main() { "BAD_BLOCK_PROOF", "CANNOT_OVERRIDE_NAME", "NAME_IS_LOCKED", - "OLD_NAME_IS_LOCKED", - "NEW_NAME_IS_LOCKED", "MTIME_IS_TOO_RECENT", "MISMATCHING_TARGET", "MISMATCHING_OWNER", + "MISMATCHING_CREATION_TIME", "DIRECTORY_NOT_EMPTY", "FILE_IS_TRANSIENT", "OLD_DIRECTORY_NOT_FOUND", "NEW_DIRECTORY_NOT_FOUND", "LOOP_IN_DIRECTORY_RENAME", - "EDGE_NOT_FOUND", "DIRECTORY_HAS_OWNER", "FILE_IS_NOT_TRANSIENT", "FILE_NOT_EMPTY", @@ -1177,10 +1162,10 @@ func main() { "MORE_RECENT_SNAPSHOT_EDGE", "MORE_RECENT_CURRENT_EDGE", "BAD_DIRECTORY_INFO", - "CREATION_TIME_TOO_RECENT", "DEADLINE_NOT_PASSED", "SAME_SOURCE_AND_DESTINATION", "SAME_DIRECTORIES", + "SAME_SHARD", } shardReqResps := []reqRespType{ @@ -1249,6 +1234,11 @@ func main() { reflect.TypeOf(msgs.SetDirectoryInfoReq{}), reflect.TypeOf(msgs.SetDirectoryInfoResp{}), }, + { + 0x09, + reflect.TypeOf(msgs.SnapshotLookupReq{}), + reflect.TypeOf(msgs.SnapshotLookupResp{}), + }, // PRIVATE OPERATIONS -- These are safe operations, but we don't want the FS client itself // to perform them. TODO make privileged? { @@ -1278,8 +1268,8 @@ func main() { }, { 0x18, - reflect.TypeOf(msgs.IntraShardHardFileUnlinkReq{}), - reflect.TypeOf(msgs.IntraShardHardFileUnlinkResp{}), + reflect.TypeOf(msgs.SameShardHardFileUnlinkReq{}), + reflect.TypeOf(msgs.SameShardHardFileUnlinkResp{}), }, { 0x19, @@ -1377,8 +1367,8 @@ func main() { }, { 0x06, - reflect.TypeOf(msgs.HardUnlinkFileReq{}), - reflect.TypeOf(msgs.HardUnlinkFileResp{}), + reflect.TypeOf(msgs.CrossShardHardUnlinkFileReq{}), + reflect.TypeOf(msgs.CrossShardHardUnlinkFileResp{}), }, } @@ -1399,6 +1389,7 @@ func main() { reflect.TypeOf(msgs.FullReadDirCursor{}), reflect.TypeOf(msgs.EntryBlockService{}), reflect.TypeOf(msgs.EntryNewBlockInfo{}), + reflect.TypeOf(msgs.SnapshotLookupEdge{}), } goCode := generateGo(errors, shardReqResps, cdcReqResps, extras) diff --git a/go/cli/cli.go b/go/cli/cli.go index a3049551..f2ffd21e 100644 --- a/go/cli/cli.go +++ b/go/cli/cli.go @@ -10,10 +10,17 @@ import ( ) func badCommand() { - fmt.Printf("expected 'collect', 'destruct', or 'migrate' subcommand\n") + fmt.Fprintf(os.Stderr, "expected 'collect', 'destruct', or 'migrate' subcommand\n") os.Exit(2) } +func noRunawayArgs() { + if flag.NArg() > 0 { + fmt.Fprintf(os.Stderr, "Unexpected extra arguments %v\n", flag.Args()) + os.Exit(2) + } +} + func main() { collectCmd := flag.NewFlagSet("collect", flag.ExitOnError) collectDirIdU64 := collectCmd.Uint64("dir", 0, "Directory inode id to GC. If not present, they'll all be collected.") @@ -38,8 +45,9 @@ func main() { switch os.Args[1] { case "collect": collectCmd.Parse(os.Args[2:]) + noRunawayArgs() if *collectDirIdU64 == 0 { - if err := eggs.CollectDirectoriesInAllShards(log); err != nil { + if err := eggs.CollectDirectoriesInAllShards(log, nil); err != nil { panic(err) } } else { @@ -48,7 +56,7 @@ func main() { panic(fmt.Errorf("inode id %v is not a directory", dirId)) } shid := dirId.Shard() - client, err := eggs.NewShardSpecificClient(shid) + client, err := eggs.NewClient(&shid, nil, nil) if err != nil { panic(fmt.Errorf("could not create shard client: %v", err)) } @@ -62,8 +70,9 @@ func main() { } case "destruct": destructCmd.Parse(os.Args[2:]) + noRunawayArgs() if *destructFileIdU64 == 0 { - if err := eggs.DestructFilesInAllShards(log, nil); err != nil { + if err := eggs.DestructFilesInAllShards(log, nil, nil); err != nil { panic(err) } } else { @@ -72,7 +81,7 @@ func main() { panic(fmt.Errorf("inode id %v is not a file/symlink", fileId)) } shid := fileId.Shard() - client, err := eggs.NewShardSpecificClient(shid) + client, err := eggs.NewClient(&shid, nil, nil) if err != nil { panic(err) } @@ -88,6 +97,7 @@ func main() { } case "migrate": migrateCmd.Parse(os.Args[2:]) + noRunawayArgs() if *migrateBlockService == 0 { migrateCmd.Usage() os.Exit(2) @@ -100,7 +110,7 @@ func main() { } else { fileId := msgs.InodeId(*migrateFileIdU64) shid := fileId.Shard() - client, err := eggs.NewShardSpecificClient(shid) + client, err := eggs.NewClient(&shid, nil, nil) if err != nil { panic(fmt.Errorf("could not create shard socket: %v", err)) } diff --git a/go/eggs/cdcreq.go b/go/eggs/cdcreq.go index 51bdb895..98293d36 100644 --- a/go/eggs/cdcreq.go +++ b/go/eggs/cdcreq.go @@ -3,40 +3,40 @@ package eggs import ( "fmt" - "io" "net" + "sync/atomic" "time" "xtx/eggsfs/bincode" "xtx/eggsfs/msgs" ) type cdcRequest struct { - RequestId uint64 - Body bincode.Packable + requestId uint64 + body msgs.CDCRequest } func (req *cdcRequest) Pack(buf *bincode.Buf) { buf.PackU32(msgs.CDC_REQ_PROTOCOL_VERSION) - buf.PackU64(req.RequestId) - buf.PackU8(uint8(msgs.GetCDCMessageKind(req.Body))) - req.Body.Pack(buf) + buf.PackU64(req.requestId) + buf.PackU8(uint8(req.body.CDCRequestKind())) + req.body.Pack(buf) } type CDCResponse struct { RequestId uint64 - Body bincode.Bincodable + Body msgs.CDCResponse } func (req *CDCResponse) Pack(buf *bincode.Buf) { buf.PackU32(msgs.CDC_RESP_PROTOCOL_VERSION) buf.PackU64(req.RequestId) - buf.PackU8(uint8(msgs.GetCDCMessageKind(req.Body))) + buf.PackU8(uint8(req.Body.CDCResponseKind())) req.Body.Pack(buf) } -type UnpackedCDCResponse struct { - RequestId uint64 - Body bincode.Unpackable +type unpackedCDCResponse struct { + requestId uint64 + body msgs.CDCResponse // This is where we could decode as far as decoding the request id, // but then errored after. We are interested in this case because // we can safely drop every erroring request that is not our request @@ -44,12 +44,12 @@ type UnpackedCDCResponse struct { // the request we're interested in. // // If this is non-nil, the body will be set to nil. - Error error + error error } -func (resp *UnpackedCDCResponse) Unpack(buf *bincode.Buf) error { +func (resp *unpackedCDCResponse) Unpack(buf *bincode.Buf) error { // panic immediately if we get passed a bogus body - expectedKind := msgs.GetCDCMessageKind(resp.Body) + expectedKind := resp.body.CDCResponseKind() // decode message header var ver uint32 if err := buf.UnpackU32(&ver); err != nil { @@ -58,118 +58,209 @@ func (resp *UnpackedCDCResponse) Unpack(buf *bincode.Buf) error { if ver != msgs.CDC_RESP_PROTOCOL_VERSION { return fmt.Errorf("expected protocol version %v, but got %v", msgs.CDC_RESP_PROTOCOL_VERSION, ver) } - if err := buf.UnpackU64(&resp.RequestId); err != nil { + if err := buf.UnpackU64(&resp.requestId); err != nil { return err } // We've made it with the request id, from now on if we fail we set // the error inside the object, rather than returning an error. - body := resp.Body - resp.Body = nil + body := resp.body + resp.body = nil var kind uint8 if err := buf.UnpackU8(&kind); err != nil { - resp.Error = fmt.Errorf("could not decode response kind: %w", err) + resp.error = fmt.Errorf("could not decode response kind: %w", err) return nil } if kind == msgs.ERROR_KIND { var errCode msgs.ErrCode if err := errCode.Unpack(buf); err != nil { - resp.Error = fmt.Errorf("could not decode error body: %w", err) + resp.error = fmt.Errorf("could not decode error body: %w", err) return nil } - resp.Error = errCode + resp.error = errCode return nil } if msgs.CDCMessageKind(kind) != expectedKind { - resp.Error = fmt.Errorf("expected body of kind %v, got %v instead", expectedKind, kind) + resp.error = fmt.Errorf("expected body of kind %v, got %v instead", expectedKind, kind) return nil } if err := body.Unpack(buf); err != nil { - resp.Error = fmt.Errorf("could not decode response body: %w", err) + resp.error = fmt.Errorf("could not decode response body: %w", err) return nil } - resp.Body = body - resp.Error = nil + resp.body = body + resp.error = nil return nil } -func CDCRequest( +const cdcSingleTimeout = 100 * time.Millisecond +const cdcMaxElapsed = 10 * time.Second + +func (c *Client) checkRepeatedCDCRequestError( logger LogLevels, - writer io.Writer, - reader io.Reader, - requestId uint64, - reqBody bincode.Packable, + // these are already filled in by now + req cdcRequest, + resp msgs.CDCResponse, + respErr msgs.ErrCode, +) error { + switch reqBody := req.body.(type) { + case *msgs.RenameDirectoryReq: + // We repeat the request, but the previous had actually gone through: + // we need to check if we haven't created the thing already. + if respErr == msgs.EDGE_NOT_FOUND { + // Happens when a request succeeds, and then the response gets lost. + // We try to apply some heuristics to let this slide. See convo following + // . + // + // Specifically, check that the last snapshot edge is what we expect if + // we had just moved it, and that the target edge also exists. + logger.Info("following up on EDGE_NOT_FOUND after repeated RenameDirectoryReq %+v", reqBody) + if !c.checkDeletedEdge(logger, reqBody.OldOwnerId, reqBody.TargetId, reqBody.OldName, reqBody.OldCreationTime, false) { + return respErr + } + // Then we check the target edge, and update creation time + respBody := resp.(*msgs.RenameDirectoryResp) + if !c.checkNewEdgeAfterRename(logger, reqBody.NewOwnerId, reqBody.TargetId, reqBody.NewName, &respBody.CreationTime) { + return respErr + } + logger.Info("recovered from EDGE_NOT_FOUND, will fill in creation time") + return nil + } + // in a decent language this branch and the previous could be merged + case *msgs.RenameFileReq: + if respErr == msgs.EDGE_NOT_FOUND { + logger.Info("following up on EDGE_NOT_FOUND after repeated RenameFileReq %+v", reqBody) + if !c.checkDeletedEdge(logger, reqBody.OldOwnerId, reqBody.TargetId, reqBody.OldName, reqBody.OldCreationTime, false) { + return respErr + } + // Then we check the target edge, and update creation time + respBody := resp.(*msgs.RenameFileResp) + if !c.checkNewEdgeAfterRename(logger, reqBody.NewOwnerId, reqBody.TargetId, reqBody.NewName, &respBody.CreationTime) { + return respErr + } + logger.Info("recovered from EDGE_NOT_FOUND, will fill in creation time") + return nil + } + case *msgs.SoftUnlinkDirectoryReq: + if respErr == msgs.EDGE_NOT_FOUND { + logger.Info("following up on EDGE_NOT_FOUND after repeated SoftUnlinkDirectoryReq %+v", reqBody) + // Note that here we expect a non-owned edge, since we're deleting a directory. + if !c.checkDeletedEdge(logger, reqBody.OwnerId, reqBody.TargetId, reqBody.Name, reqBody.CreationTime, false) { + return respErr + } + return nil + } + } + return respErr +} + +func (c *Client) CDCRequest( + logger LogLevels, + reqBody msgs.CDCRequest, // Result will be written in here. If an error is returned, no guarantees // are made regarding the contents of `respBody`. - respBody bincode.Unpackable, + respBody msgs.CDCResponse, ) error { - if msgs.GetCDCMessageKind(reqBody) != msgs.GetCDCMessageKind(respBody) { + if reqBody.CDCRequestKind() != respBody.CDCResponseKind() { panic(fmt.Errorf("mismatching req %T and resp %T", reqBody, respBody)) } - req := cdcRequest{ - RequestId: requestId, - Body: reqBody, - } + sock := c.CDCSocket buffer := make([]byte, msgs.UDP_MTU) - // logger.Debug("about to send request %T to CDC", reqBody) - reqBytes := buffer - bincode.PackIntoBytes(&reqBytes, &req) - written, err := writer.Write(reqBytes) - if err != nil { - return fmt.Errorf("couldn't send request: %w", err) - } - if written < len(reqBytes) { - panic(fmt.Sprintf("incomplete send -- %v bytes written instead of %v", written, len(reqBytes))) - } - respBytes := buffer - // Keep going until we found the right request id -- - // we can't assume that what we get isn't some other - // request we thought was timed out. + attempts := 0 + startedAt := time.Now() + // will keep trying as long as we get timeouts for { - respBytes = respBytes[:cap(respBytes)] - read, err := reader.Read(respBytes) - respBytes = respBytes[:read] + elapsed := time.Since(startedAt) + if elapsed > cdcMaxElapsed { + logger.RaiseAlert(fmt.Errorf("giving up on request to CDC after waiting for %v", elapsed)) + return msgs.TIMEOUT + } + requestId := newRequestId() + req := cdcRequest{ + requestId: requestId, + body: reqBody, + } + reqBytes := buffer + bincode.PackIntoBytes(&reqBytes, &req) + logger.Debug("about to send request id %v (%T) to CDC, after %v attempts", requestId, reqBody, attempts) + written, err := sock.Write(reqBytes) if err != nil { - // pipe is broken, terminate with this err - return err + return fmt.Errorf("couldn't send request: %w", err) } - resp := UnpackedCDCResponse{ - Body: respBody, + if written < len(reqBytes) { + panic(fmt.Sprintf("incomplete send -- %v bytes written instead of %v", written, len(reqBytes))) } - if err := bincode.UnpackFromBytes(&resp, respBytes); err != nil { - logger.RaiseAlert(fmt.Errorf("could not decode response to request %v, will continue waiting for responses: %w", req.RequestId, err)) - continue + // Keep going until we found the right request id -- + // we can't assume that what we get isn't some other + // request we thought was timed out. + sock.SetReadDeadline(time.Now().Add(cdcSingleTimeout)) + for { + respBytes := buffer + read, err := sock.Read(respBytes) + respBytes = respBytes[:read] + if err != nil { + isTimeout := false + switch netErr := err.(type) { + case net.Error: + isTimeout = netErr.Timeout() + } + if isTimeout { + logger.Debug("got network timeout error %v, will try to retry", err) + break // keep trying + } + // pipe is broken somehow, terminate immediately with this err + return err + } + resp := unpackedCDCResponse{ + body: respBody, + } + if err := bincode.UnpackFromBytes(&resp, respBytes); err != nil { + logger.RaiseAlert(fmt.Errorf("could not decode response to request %v, will continue waiting for responses: %w", req.requestId, err)) + continue + } + if resp.requestId != req.requestId { + logger.RaiseAlert(fmt.Errorf("dropping response %v, since we expected request id %v. body: %v, error: %v", resp.requestId, req.requestId, resp.body, resp.error)) + continue + } + // we've gotten a response + elapsed := time.Since(startedAt) + if c.Counters != nil { + msgKind := reqBody.CDCRequestKind() + atomic.AddInt64(&c.Counters.CDCReqsCounts[msgKind], 1) + atomic.AddInt64(&c.Counters.CDCReqsNanos[msgKind], elapsed.Nanoseconds()) + } + respErr := resp.error + if respErr != nil { + isTimeout := false + switch eggsErr := err.(type) { + case msgs.ErrCode: + isTimeout = eggsErr == msgs.TIMEOUT + } + if isTimeout { + logger.Debug("got resp timeout error %v, will try to retry", err) + break // keep trying + } + } + switch eggsErr := respErr.(type) { + case msgs.ErrCode: + // If we're past the first attempt, there are cases where errors are not what they + // seem. + if attempts > 0 { + respErr = c.checkRepeatedCDCRequestError(logger, req, respBody, eggsErr) + } + } + // check if it's an error or not + if respErr != nil { + logger.Debug("got error %v (%T) from CDC (took %v)", respErr, respErr, elapsed) + return respErr + } + logger.Debug("got response %T from CDC (took %v)", respBody, elapsed) + return nil } - if resp.RequestId != req.RequestId { - logger.RaiseAlert(fmt.Errorf("dropping response %v, since we expected request id %v. body: %v, error: %w", resp.RequestId, req.RequestId, resp.Body, resp.Error)) - continue - } - // we managed to decode, we just need to check that it's not an error - if resp.Error != nil { - logger.Debug("got error %v from CDC", resp.Error) - return resp.Error - } - logger.Debug("got response %T from CDC", respBody) - return nil + attempts++ } } -// This function will set the deadline for the socket. -// TODO does the deadline persist -- i.e. are we permanently modifying this socket. -func CDCRequestSocket( - logger LogLevels, - sock *net.UDPConn, - timeout time.Duration, - reqBody bincode.Packable, - respBody bincode.Unpackable, -) error { - if timeout == time.Duration(0) { - panic("zero duration") - } - sock.SetReadDeadline(time.Now().Add(timeout)) - return CDCRequest(logger, sock, sock, uint64(msgs.Now()), reqBody, respBody) -} - -func CDCSocket() (*net.UDPConn, error) { +func CreateCDCSocket() (*net.UDPConn, error) { socket, err := net.DialUDP("udp4", nil, &net.UDPAddr{Port: msgs.CDC_PORT}) if err != nil { return nil, fmt.Errorf("could not create CDC socket: %w", err) diff --git a/go/eggs/client.go b/go/eggs/client.go index c6184af2..b375592c 100644 --- a/go/eggs/client.go +++ b/go/eggs/client.go @@ -1,124 +1,169 @@ package eggs import ( + "crypto/cipher" "fmt" "net" - "time" "xtx/eggsfs/bincode" "xtx/eggsfs/msgs" ) -type Client interface { - ShardRequest(log LogLevels, shid msgs.ShardId, req bincode.Packable, resp bincode.Unpackable) error - CDCRequest(log LogLevels, req bincode.Packable, resp bincode.Unpackable) error +type ShardSocketFactory interface { + GetShardSocket(shid msgs.ShardId) (*net.UDPConn, error) + ReleaseShardSocket(shid msgs.ShardId) +} + +type ClientCounters struct { + // these arrays are indexed by req type + ShardReqsCounts [256]int64 + ShardReqsNanos [256]int64 + CDCReqsCounts [256]int64 + CDCReqsNanos [256]int64 +} + +func (c *ClientCounters) TotalShardRequests() int64 { + total := int64(0) + for i := 0; i < 256; i++ { + total += c.ShardReqsCounts[i] + } + return total +} + +func (c *ClientCounters) TotalCDCRequests() int64 { + total := int64(0) + for i := 0; i < 256; i++ { + total += c.CDCReqsCounts[i] + } + return total +} + +type Client struct { + ShardSocketFactory ShardSocketFactory + CDCSocket *net.UDPConn + Counters *ClientCounters + CDCKey cipher.Block +} + +func NewClient(shid *msgs.ShardId, counters *ClientCounters, cdcKey cipher.Block) (*Client, error) { + var err error + c := Client{} + c.CDCSocket, err = CreateCDCSocket() + if err != nil { + return nil, err + } + if shid != nil { + c.ShardSocketFactory, err = NewShardSpecificFactory(*shid) + if err != nil { + c.CDCSocket.Close() + return nil, err + } + } else { + c.ShardSocketFactory, err = NewAllShardsFactory() + if err != nil { + c.CDCSocket.Close() + return nil, err + } + } + c.Counters = counters + c.CDCKey = cdcKey + return &c, nil +} + +func (c *Client) Close() { + switch factory := c.ShardSocketFactory.(type) { + case *AllShardsFactory: + factory.Close() + case *ShardSpecificFactory: + factory.Close() + default: + panic(fmt.Errorf("bad factory %T", c.ShardSocketFactory)) + } + if err := c.CDCSocket.Close(); err != nil { + panic(err) + } } // Holds sockets to all 256 shards -type AllShardsClient struct { - timeout time.Duration - shardSocks []*net.UDPConn - cdcSock *net.UDPConn +type AllShardsFactory struct { + shardSocks [256]*net.UDPConn } -func NewAllShardsClient() (*AllShardsClient, error) { +func NewAllShardsFactory() (*AllShardsFactory, error) { var err error - c := AllShardsClient{ - timeout: 10 * time.Second, - } - c.shardSocks = make([]*net.UDPConn, 256) + c := AllShardsFactory{} for i := 0; i < 256; i++ { - c.shardSocks[msgs.ShardId(i)], err = ShardSocket(msgs.ShardId(i)) + c.shardSocks[msgs.ShardId(i)], err = CreateShardSocket(msgs.ShardId(i)) if err != nil { return nil, err } } - c.cdcSock, err = CDCSocket() - if err != nil { - return nil, err - } return &c, nil } -// TODO probably convert these errors to stderr, we can't do much with them usually -// but they'd be worth knowing about -func (c *AllShardsClient) Close() error { +func (c *AllShardsFactory) Close() { for _, sock := range c.shardSocks { if err := sock.Close(); err != nil { - return err + panic(err) } } - if err := c.cdcSock.Close(); err != nil { - return err - } - return nil } -func (c *AllShardsClient) ShardRequest(log LogLevels, shid msgs.ShardId, req bincode.Packable, resp bincode.Unpackable) error { - return ShardRequestSocket(log, nil, c.shardSocks[shid], c.timeout, req, resp) +func (c *AllShardsFactory) GetShardSocket(shid msgs.ShardId) (*net.UDPConn, error) { + return c.shardSocks[int(shid)], nil } -func (c *AllShardsClient) CDCRequest(log LogLevels, req bincode.Packable, resp bincode.Unpackable) error { - return CDCRequestSocket(log, c.cdcSock, c.timeout, req, resp) -} +func (c *AllShardsFactory) ReleaseShardSocket(msgs.ShardId) {} // For when you almost always do requests to a single shard (e.g. in GC). -type ShardSpecificClient struct { - timeout time.Duration +type ShardSpecificFactory struct { shid msgs.ShardId shardSock *net.UDPConn - cdcSock *net.UDPConn } // TODO probably convert these errors to stderr, we can't do much with them usually // but they'd be worth knowing about -func (c *ShardSpecificClient) Close() error { +func (c *ShardSpecificFactory) Close() error { if err := c.shardSock.Close(); err != nil { return err } - if err := c.cdcSock.Close(); err != nil { - return err - } return nil } -func NewShardSpecificClient(shid msgs.ShardId) (*ShardSpecificClient, error) { - c := ShardSpecificClient{ - timeout: time.Second, - shid: shid, +func NewShardSpecificFactory(shid msgs.ShardId) (*ShardSpecificFactory, error) { + c := ShardSpecificFactory{ + shid: shid, } var err error - c.shardSock, err = ShardSocket(shid) - if err != nil { - return nil, err - } - c.cdcSock, err = CDCSocket() + c.shardSock, err = CreateShardSocket(shid) if err != nil { return nil, err } return &c, nil } -func (c *ShardSpecificClient) ShardRequest(log LogLevels, shid msgs.ShardId, req bincode.Packable, resp bincode.Unpackable) error { - var shardSock *net.UDPConn - var err error +func (c *ShardSpecificFactory) GetShardSocket(shid msgs.ShardId) (*net.UDPConn, error) { if shid == c.shid { - shardSock = c.shardSock + return c.shardSock, nil } else { - shardSock, err = ShardSocket(shid) + shardSock, err := CreateShardSocket(shid) if err != nil { - return err + return nil, err } - defer shardSock.Close() + return shardSock, nil } - return ShardRequestSocket(log, nil, shardSock, c.timeout, req, resp) } -func (c *ShardSpecificClient) CDCRequest(log LogLevels, req bincode.Packable, resp bincode.Unpackable) error { - return CDCRequestSocket(log, c.cdcSock, c.timeout, req, resp) +func (c *ShardSpecificFactory) ReleaseShardSocket(shid msgs.ShardId) { + if shid == c.shid { + return + } + if err := c.shardSock.Close(); err != nil { + panic(err) + } } // nil if the directory has no directory info (i.e. if it is inherited) -func GetDirectoryInfo(log LogLevels, c Client, id msgs.InodeId) (*msgs.DirectoryInfoBody, error) { +func GetDirectoryInfo(log LogLevels, c *Client, id msgs.InodeId) (*msgs.DirectoryInfoBody, error) { req := msgs.StatDirectoryReq{ Id: id, } @@ -136,7 +181,7 @@ func GetDirectoryInfo(log LogLevels, c Client, id msgs.InodeId) (*msgs.Directory return &info, nil } -func SetDirectoryInfo(log LogLevels, c Client, id msgs.InodeId, inherited bool, info *msgs.DirectoryInfoBody) error { +func SetDirectoryInfo(log LogLevels, c *Client, id msgs.InodeId, inherited bool, info *msgs.DirectoryInfoBody) error { var buf []byte if inherited { if info != nil { diff --git a/go/eggs/gc.go b/go/eggs/gc.go index 8afecd98..5dee7875 100644 --- a/go/eggs/gc.go +++ b/go/eggs/gc.go @@ -15,7 +15,7 @@ type DestructionStats struct { func DestructFile( log LogLevels, - client Client, + client *Client, blockServicesKeys map[msgs.BlockServiceId][16]byte, stats *DestructionStats, id msgs.InodeId, deadline msgs.EggsTime, cookie [8]byte, @@ -25,6 +25,7 @@ func DestructFile( now := msgs.Now() if now < deadline { log.Debug("%v: deadline not expired (deadline=%v, now=%v), not destructing", id, deadline, now) + return nil } // TODO need to think about transient files that already had dirty spans at the end. // Keep destructing spans until we have nothing @@ -86,7 +87,7 @@ func DestructFile( } func destructFilesInternal( - log LogLevels, client Client, shid msgs.ShardId, stats *DestructionStats, blockServicesKeys map[msgs.BlockServiceId][16]byte, + log LogLevels, client *Client, shid msgs.ShardId, stats *DestructionStats, blockServicesKeys map[msgs.BlockServiceId][16]byte, ) error { req := msgs.VisitTransientFilesReq{} resp := msgs.VisitTransientFilesResp{} @@ -117,9 +118,9 @@ func destructFilesInternal( // we'll just generate the proof ourselves and certify. This is only useful // for testing, obviously. func DestructFiles( - log LogLevels, shid msgs.ShardId, blockServicesKeys map[msgs.BlockServiceId][16]byte, + log LogLevels, counters *ClientCounters, shid msgs.ShardId, blockServicesKeys map[msgs.BlockServiceId][16]byte, ) error { - client, err := NewShardSpecificClient(shid) + client, err := NewClient(&shid, counters, nil) if err != nil { return err } @@ -133,9 +134,9 @@ func DestructFiles( } func DestructFilesInAllShards( - log LogLevels, blockServicesKeys map[msgs.BlockServiceId][16]byte, + log LogLevels, counters *ClientCounters, blockServicesKeys map[msgs.BlockServiceId][16]byte, ) error { - client, err := NewAllShardsClient() + client, err := NewClient(nil, counters, nil) if err != nil { return err } @@ -160,7 +161,7 @@ type CollectStats struct { // returns whether all the edges were removed func applyPolicy( - log LogLevels, client Client, stats *CollectStats, + log LogLevels, client *Client, stats *CollectStats, dirId msgs.InodeId, dirInfo *msgs.DirectoryInfoBody, edges []msgs.Edge, ) (bool, error) { policy := SnapshotPolicy{ @@ -182,23 +183,23 @@ func applyPolicy( // same shard, we can delete directly. We also know that this is not a directory (it's an // owned, but snapshot edge) log.Debug("%v: removing owned snapshot edge %+v", dirId, edge) - req := msgs.IntraShardHardFileUnlinkReq{ + req := msgs.SameShardHardFileUnlinkReq{ OwnerId: dirId, TargetId: edge.TargetId.Id(), Name: edge.Name, CreationTime: edge.CreationTime, } - err = client.ShardRequest(log, dirId.Shard(), &req, &msgs.IntraShardHardFileUnlinkResp{}) + err = client.ShardRequest(log, dirId.Shard(), &req, &msgs.SameShardHardFileUnlinkResp{}) } else { // different shard, we need to go through the CDC log.Debug("%v: removing cross-shard owned edge %+v", dirId, edge) - req := msgs.HardUnlinkFileReq{ + req := msgs.CrossShardHardUnlinkFileReq{ OwnerId: dirId, TargetId: edge.TargetId.Id(), Name: edge.Name, CreationTime: edge.CreationTime, } - err = client.CDCRequest(log, &req, &msgs.HardUnlinkFileResp{}) + err = client.CDCRequest(log, &req, &msgs.CrossShardHardUnlinkFileResp{}) } } else { // non-owned edge, we can just kill it without worrying about much. @@ -220,7 +221,7 @@ func applyPolicy( } func requestDirectoryInfo( - log LogLevels, client Client, dirInfoCache *DirInfoCache, dirId msgs.InodeId, + log LogLevels, client *Client, dirInfoCache *DirInfoCache, dirId msgs.InodeId, ) (*msgs.DirectoryInfoBody, error) { statResp := msgs.StatDirectoryResp{} err := client.ShardRequest(log, dirId.Shard(), &msgs.StatDirectoryReq{Id: dirId}, &statResp) @@ -232,7 +233,7 @@ func requestDirectoryInfo( } func resolveDirectoryInfo( - log LogLevels, client Client, dirInfoCache *DirInfoCache, dirId msgs.InodeId, statResp *msgs.StatDirectoryResp, + log LogLevels, client *Client, dirInfoCache *DirInfoCache, dirId msgs.InodeId, statResp *msgs.StatDirectoryResp, ) (*msgs.DirectoryInfoBody, error) { // we have the data directly in the stat response if len(statResp.Info) > 0 { @@ -260,7 +261,7 @@ func resolveDirectoryInfo( return dirInfoBody, nil } -func CollectDirectory(log LogLevels, client Client, dirInfoCache *DirInfoCache, stats *CollectStats, dirId msgs.InodeId) error { +func CollectDirectory(log LogLevels, client *Client, dirInfoCache *DirInfoCache, stats *CollectStats, dirId msgs.InodeId) error { log.Debug("%v: collecting", dirId) stats.VisitedDirectories++ @@ -337,7 +338,7 @@ func CollectDirectory(log LogLevels, client Client, dirInfoCache *DirInfoCache, return nil } -func collectDirectoriesInternal(log LogLevels, client Client, stats *CollectStats, shid msgs.ShardId) error { +func collectDirectoriesInternal(log LogLevels, client *Client, stats *CollectStats, shid msgs.ShardId) error { dirInfoCache := NewDirInfoCache() req := msgs.VisitDirectoriesReq{} resp := msgs.VisitDirectoriesResp{} @@ -365,8 +366,8 @@ func collectDirectoriesInternal(log LogLevels, client Client, stats *CollectStat return nil } -func CollectDirectories(log LogLevels, shid msgs.ShardId) error { - client, err := NewShardSpecificClient(shid) +func CollectDirectories(log LogLevels, counters *ClientCounters, shid msgs.ShardId) error { + client, err := NewClient(&shid, counters, nil) if err != nil { return err } @@ -379,8 +380,8 @@ func CollectDirectories(log LogLevels, shid msgs.ShardId) error { return nil } -func CollectDirectoriesInAllShards(log LogLevels) error { - client, err := NewAllShardsClient() +func CollectDirectoriesInAllShards(log LogLevels, counters *ClientCounters) error { + client, err := NewClient(nil, counters, nil) if err != nil { return err } diff --git a/go/eggs/managedprocess.go b/go/eggs/managedprocess.go index 8b83b8f3..d4056317 100644 --- a/go/eggs/managedprocess.go +++ b/go/eggs/managedprocess.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "net" "os" "os/exec" "os/signal" @@ -207,9 +206,9 @@ func (procs *ManagedProcesses) Close() { return } proc.cmd.Process.Signal(syscall.SIGTERM) - // wait at most 5 seconds for process to come down + // wait at most 20 seconds for process to come down go func() { - time.Sleep(5 * time.Second) + time.Sleep(20 * time.Second) fmt.Printf("process %s not terminating, killing it\n", proc.name) proc.cmd.Process.Kill() // ignoring error on purpose, there isn't much to do by now }() @@ -356,20 +355,28 @@ func WaitForShuckle(shuckleHost string, expectedBlockServices int, timeout time. } type ShardOpts struct { - Exe string - Dir string - Verbose bool - Shid msgs.ShardId - Valgrind bool - WaitForShuckle bool + Exe string + Dir string + Verbose bool + Shid msgs.ShardId + Valgrind bool + WaitForShuckle bool + Perf bool + IncomingPacketDrop float64 + OutgoingPacketDrop float64 } func (procs *ManagedProcesses) StartShard(opts *ShardOpts) { + if opts.Valgrind && opts.Perf { + panic(fmt.Errorf("cannot do valgrind and perf together")) + } createDataDir(opts.Dir) args := []string{ "--log-file", path.Join(opts.Dir, "log"), opts.Dir, fmt.Sprintf("%d", int(opts.Shid)), + "--incoming-packet-drop", fmt.Sprintf("%g", opts.IncomingPacketDrop), + "--outgoing-packet-drop", fmt.Sprintf("%g", opts.OutgoingPacketDrop), } if opts.Verbose { args = append(args, "--verbose") @@ -399,19 +406,37 @@ func (procs *ManagedProcesses) StartShard(opts *ShardOpts) { mpArgs.Args..., ) procs.Start(&mpArgs) + } else if opts.Perf { + mpArgs.Name = fmt.Sprintf("%s (perf)", mpArgs.Name) + mpArgs.Exe = "perf" + mpArgs.Args = append( + []string{ + "record", + fmt.Sprintf("--output=%s", path.Join(opts.Dir, "perf.data")), + opts.Exe, + }, + mpArgs.Args..., + ) + procs.Start(&mpArgs) } else { procs.Start(&mpArgs) } } type CDCOpts struct { - Exe string - Dir string - Verbose bool - Valgrind bool + Exe string + Dir string + Verbose bool + Valgrind bool + Perf bool + IncomingPacketDrop float64 + OutgoingPacketDrop float64 } func (procs *ManagedProcesses) StartCDC(opts *CDCOpts) { + if opts.Valgrind && opts.Perf { + panic(fmt.Errorf("cannot do valgrind and perf together")) + } createDataDir(opts.Dir) args := []string{ "--log-file", path.Join(opts.Dir, "log"), @@ -442,6 +467,18 @@ func (procs *ManagedProcesses) StartCDC(opts *CDCOpts) { mpArgs.Args..., ) procs.Start(&mpArgs) + } else if opts.Perf { + mpArgs.Name = fmt.Sprintf("%s (perf)", mpArgs.Name) + mpArgs.Exe = "perf" + mpArgs.Args = append( + []string{ + "record", + fmt.Sprintf("--output=%s", path.Join(opts.Dir, "perf.data")), + opts.Exe, + }, + mpArgs.Args..., + ) + procs.Start(&mpArgs) } else { procs.Start(&mpArgs) } @@ -450,27 +487,24 @@ func (procs *ManagedProcesses) StartCDC(opts *CDCOpts) { func WaitForShard(shid msgs.ShardId, timeout time.Duration) { t0 := time.Now() var err error - var sock *net.UDPConn + var client *Client for { t := time.Now() if t.Sub(t0) > timeout { panic(fmt.Errorf("giving up waiting for shard %v, last error: %w", shid, err)) } - sock, err = ShardSocket(shid) + client, err = NewClient(&shid, nil, nil) if err != nil { - sock.Close() time.Sleep(10 * time.Millisecond) continue } - err = ShardRequestSocket( + err = client.ShardRequest( LogBlackHole{}, - nil, - sock, - time.Second, + shid, &msgs.VisitDirectoriesReq{}, &msgs.VisitDirectoriesResp{}, ) - sock.Close() + client.Close() if err != nil { time.Sleep(10 * time.Millisecond) continue diff --git a/go/eggs/migrate.go b/go/eggs/migrate.go index 7ffbd92f..2b2b888a 100644 --- a/go/eggs/migrate.go +++ b/go/eggs/migrate.go @@ -12,7 +12,7 @@ type scratchFile struct { offset uint64 } -func ensureScratchFile(log LogLevels, client Client, migratingIn msgs.InodeId, file *scratchFile) error { +func ensureScratchFile(log LogLevels, client *Client, migratingIn msgs.InodeId, file *scratchFile) error { if file.id != msgs.NULL_INODE_ID { return nil } @@ -36,7 +36,7 @@ func ensureScratchFile(log LogLevels, client Client, migratingIn msgs.InodeId, f } func copyBlock( - log LogLevels, client Client, + log LogLevels, client *Client, file *scratchFile, blockServices []msgs.BlockService, blockSize uint64, storageClass msgs.StorageClass, block *msgs.FetchedBlock, ) (msgs.BlockId, error) { blockService := blockServices[block.BlockServiceIx] @@ -90,7 +90,7 @@ type MigrateStats struct { // it'll be recovered from the other. If possible, anyway. // // Returns the number of migrated blocks. -func MigrateBlocksInFile(log LogLevels, client Client, stats *MigrateStats, blockServiceId msgs.BlockServiceId, fileId msgs.InodeId) error { +func MigrateBlocksInFile(log LogLevels, client *Client, stats *MigrateStats, blockServiceId msgs.BlockServiceId, fileId msgs.InodeId) error { scratchFile := scratchFile{} stopHeartbeat := make(chan struct{}, 1) defer func() { stopHeartbeat <- struct{}{} }() @@ -186,7 +186,7 @@ func MigrateBlocksInFile(log LogLevels, client Client, stats *MigrateStats, bloc // Tries to migrate as many blocks as possible from that block service in a certain // shard. -func migrateBlocksInternal(log LogLevels, client Client, stats *MigrateStats, shid msgs.ShardId, blockServiceId msgs.BlockServiceId) error { +func migrateBlocksInternal(log LogLevels, client *Client, stats *MigrateStats, shid msgs.ShardId, blockServiceId msgs.BlockServiceId) error { filesReq := msgs.BlockServiceFilesReq{BlockServiceId: blockServiceId} filesResp := msgs.BlockServiceFilesResp{} for { @@ -208,7 +208,7 @@ func migrateBlocksInternal(log LogLevels, client Client, stats *MigrateStats, sh } func MigrateBlocks(log LogLevels, shid msgs.ShardId, blockServiceId msgs.BlockServiceId) error { - client, err := NewShardSpecificClient(shid) + client, err := NewClient(&shid, nil, nil) if err != nil { return err } @@ -222,7 +222,7 @@ func MigrateBlocks(log LogLevels, shid msgs.ShardId, blockServiceId msgs.BlockSe } func MigrateBlocksInAllShards(log LogLevels, blockServiceId msgs.BlockServiceId) error { - client, err := NewAllShardsClient() + client, err := NewClient(nil, nil, nil) if err != nil { return err } diff --git a/go/eggs/shard_test.go b/go/eggs/shard_test.go index 14a74a8e..f8144fa8 100644 --- a/go/eggs/shard_test.go +++ b/go/eggs/shard_test.go @@ -1,5 +1,6 @@ package eggs +/* import ( "bytes" "fmt" @@ -71,7 +72,7 @@ func TestReqOK(t *testing.T) { alerter := mockAlerter{} response := msgs.VisitDirectoriesResp{} err := ShardRequest( - &alerter, nil, new(bytes.Buffer), &responses, requestId, &request, &response, + &alerter, nil, nil, new(bytes.Buffer), &responses, requestId, &request, &response, ) for _, err := range alerter { fmt.Printf("err: %v\n", err) @@ -92,7 +93,8 @@ func TestReqTimeout(t *testing.T) { assert.Nil(t, err) defer sock.Close() err = ShardRequestSocket( - &mockAlerter{}, nil, sock, time.Millisecond, &msgs.VisitTransientFilesReq{}, &msgs.VisitTransientFilesResp{}, + &mockAlerter{}, nil, nil, sock, time.Millisecond, &msgs.VisitTransientFilesReq{}, &msgs.VisitTransientFilesResp{}, ) assert.NotNil(t, err) } +*/ diff --git a/go/eggs/shardreq.go b/go/eggs/shardreq.go index 84fbfb4f..0862dac2 100644 --- a/go/eggs/shardreq.go +++ b/go/eggs/shardreq.go @@ -3,8 +3,8 @@ package eggs import ( "crypto/cipher" "fmt" - "io" "net" + "sync/atomic" "time" "xtx/eggsfs/bincode" "xtx/eggsfs/msgs" @@ -12,19 +12,19 @@ import ( type shardRequest struct { requestId uint64 - body bincode.Packable + body msgs.ShardRequest } func (req *shardRequest) Pack(buf *bincode.Buf) { buf.PackU32(msgs.SHARD_REQ_PROTOCOL_VERSION) buf.PackU64(req.requestId) - buf.PackU8(uint8(msgs.GetShardMessageKind(req.body))) + buf.PackU8(uint8(req.body.ShardRequestKind())) req.body.Pack(buf) } func packShardRequest(out *[]byte, req *shardRequest, cdcKey cipher.Block) { written := bincode.PackToBytes(*out, req) - if (msgs.GetShardMessageKind(req.body) & 0x80) != 0 { + if (req.body.ShardRequestKind() & 0x80) != 0 { // privileged, we must have a key if cdcKey == nil { panic(fmt.Errorf("trying to encode request of privileged kind %T, but got no CDC key", req.body)) @@ -38,19 +38,19 @@ func packShardRequest(out *[]byte, req *shardRequest, cdcKey cipher.Block) { type ShardResponse struct { RequestId uint64 - Body bincode.Bincodable + Body msgs.ShardResponse } func (req *ShardResponse) Pack(buf *bincode.Buf) { buf.PackU32(msgs.SHARD_RESP_PROTOCOL_VERSION) buf.PackU64(req.RequestId) - buf.PackU8(uint8(msgs.GetShardMessageKind(req.Body))) + buf.PackU8(uint8(req.Body.ShardResponseKind())) req.Body.Pack(buf) } -type UnpackedShardResponse struct { +type unpackedShardResponse struct { RequestId uint64 - Body bincode.Unpackable + Body msgs.ShardResponse // This is where we could decode as far as decoding the request id, // but then errored after. We are interested in this case because // we can safely drop every erroring request that is not our request @@ -61,9 +61,9 @@ type UnpackedShardResponse struct { Error error } -func (resp *UnpackedShardResponse) Unpack(buf *bincode.Buf) error { +func (resp *unpackedShardResponse) Unpack(buf *bincode.Buf) error { // panic immediately if we get passed a bogus body - expectedKind := msgs.GetShardMessageKind(resp.Body) + expectedKind := resp.Body.ShardResponseKind() // decode message header var ver uint32 if err := buf.UnpackU32(&ver); err != nil { @@ -106,87 +106,222 @@ func (resp *UnpackedShardResponse) Unpack(buf *bincode.Buf) error { return nil } -func ShardRequest( +const shardSingleTimeout = 10 * time.Millisecond +const shardMaxElapsed = 1 * time.Second + +var requestIdGenerator = uint64(0) + +func newRequestId() uint64 { + return atomic.AddUint64(&requestIdGenerator, 1) +} + +func (c *Client) checkDeletedEdge( logger LogLevels, - cdcKey cipher.Block, - writer io.Writer, - reader io.Reader, - requestId uint64, - reqBody bincode.Packable, + dirId msgs.InodeId, + targetId msgs.InodeId, + name string, + creationTime msgs.EggsTime, + owned bool, +) bool { + // First we check the edge we expect to have moved away + snapshotResp := msgs.SnapshotLookupResp{} + err := c.ShardRequest(logger, dirId.Shard(), &msgs.SnapshotLookupReq{DirId: dirId, Name: name, StartFrom: creationTime}, &snapshotResp) + if err != nil { + logger.Info("failed to get snapshot edge (err %v), giving up and returning original error", err) + return false + } + if len(snapshotResp.Edges) != 2 { + logger.Info("expected 1 snapshot edges but got %v, giving up and returning original error", len(snapshotResp.Edges)) + return false + } + oldEdge := snapshotResp.Edges[0] + if oldEdge.TargetId.Extra() != owned || oldEdge.TargetId.Id() != targetId || oldEdge.CreationTime != creationTime { + logger.Info("got mismatched snapshot edge (%+v), giving up and returning original error", oldEdge) + return false + } + deleteEdge := snapshotResp.Edges[1] + if deleteEdge.TargetId.Id() != msgs.NULL_INODE_ID { + logger.Info("expected deletion edge but got %+v, giving up and returning original error", deleteEdge) + return false + } + return true +} + +func (c *Client) checkNewEdgeAfterRename( + logger LogLevels, + dirId msgs.InodeId, + targetId msgs.InodeId, + name string, + creationTime *msgs.EggsTime, +) bool { + // Then we check the target edge + lookupResp := msgs.LookupResp{} + err := c.ShardRequest(logger, dirId.Shard(), &msgs.LookupReq{DirId: dirId, Name: name}, &lookupResp) + if err != nil { + logger.Info("failed to get current edge (err %v), giving up and returning original error", err) + return false + } + if lookupResp.TargetId != targetId { + logger.Info("got mismatched current target (%v), giving up and returning original error", lookupResp.TargetId) + return false + } + *creationTime = lookupResp.CreationTime + return true +} + +func (c *Client) checkRepeatedShardRequestError( + logger LogLevels, + // these are already filled in by now + req shardRequest, + resp msgs.ShardResponse, + respErr msgs.ErrCode, +) error { + switch reqBody := req.body.(type) { + case *msgs.SameDirectoryRenameReq: + if respErr == msgs.EDGE_NOT_FOUND { + // Happens when a request succeeds, and then the response gets lost. + // We try to apply some heuristics to let this slide. See convo following + // . + // + // Specifically, check that the last snapshot edge is what we expect if + // we had just moved it, and that the target edge also exists. + logger.Info("following up on EDGE_NOT_FOUND after repeated SameDirectoryRenameReq %+v", reqBody) + if !c.checkDeletedEdge(logger, reqBody.DirId, reqBody.TargetId, reqBody.OldName, reqBody.OldCreationTime, false) { + return respErr + } + // Then we check the target edge, and update creation time + respBody := resp.(*msgs.SameDirectoryRenameResp) + if !c.checkNewEdgeAfterRename(logger, reqBody.DirId, reqBody.TargetId, reqBody.NewName, &respBody.NewCreationTime) { + return respErr + } + logger.Info("recovered from EDGE_NOT_FOUND, will fill in creation time") + return nil + } + case *msgs.SoftUnlinkFileReq: + if respErr == msgs.EDGE_NOT_FOUND { + logger.Info("following up on EDGE_NOT_FOUND after repeated SoftUnlinkFileReq %+v", reqBody) + if !c.checkDeletedEdge(logger, reqBody.OwnerId, reqBody.FileId, reqBody.Name, reqBody.CreationTime, true) { + return respErr + } + return nil + } + } + return respErr +} + +func (c *Client) ShardRequest( + logger LogLevels, + shid msgs.ShardId, + reqBody msgs.ShardRequest, // Result will be written in here. If an error is returned, no guarantees // are made regarding the contents of `respBody`. - respBody bincode.Unpackable, + respBody msgs.ShardResponse, ) error { - if msgs.GetShardMessageKind(reqBody) != msgs.GetShardMessageKind(respBody) { + if reqBody.ShardRequestKind() != respBody.ShardResponseKind() { panic(fmt.Errorf("mismatching req %T and resp %T", reqBody, respBody)) } - req := shardRequest{ - requestId: requestId, - body: reqBody, - } - buffer := make([]byte, msgs.UDP_MTU) - reqBytes := buffer - t0 := time.Now() - logger.Debug("about to send request %T to shard", reqBody) - packShardRequest(&reqBytes, &req, cdcKey) - written, err := writer.Write(reqBytes) + sock, err := c.ShardSocketFactory.GetShardSocket(shid) if err != nil { - return fmt.Errorf("couldn't send request: %w", err) + return err } - if written < len(reqBytes) { - panic(fmt.Sprintf("incomplete send -- %v bytes written instead of %v", written, len(reqBytes))) - } - respBytes := buffer - // Keep going until we found the right request id -- - // we can't assume that what we get isn't some other - // request we thought was timed out. + defer c.ShardSocketFactory.ReleaseShardSocket(shid) + buffer := make([]byte, msgs.UDP_MTU) + attempts := 0 + startedAt := time.Now() + // will keep trying as long as we get timeouts for { - respBytes = respBytes[:cap(respBytes)] - read, err := reader.Read(respBytes) - respBytes = respBytes[:read] + elapsed := time.Since(startedAt) + if elapsed > shardMaxElapsed { + logger.RaiseAlert(fmt.Errorf("giving up on request to shard after waiting for %v", elapsed)) + return msgs.TIMEOUT + } + requestId := newRequestId() + req := shardRequest{ + requestId: requestId, + body: reqBody, + } + reqBytes := buffer + packShardRequest(&reqBytes, &req, c.CDCKey) + logger.Debug("about to send request id %v (%T) to shard, after %v attempts", requestId, reqBody, attempts) + written, err := sock.Write(reqBytes) if err != nil { - // pipe is broken, terminate with this err - return err + return fmt.Errorf("couldn't send request: %w", err) } - resp := UnpackedShardResponse{ - Body: respBody, + if written < len(reqBytes) { + panic(fmt.Sprintf("incomplete send -- %v bytes written instead of %v", written, len(reqBytes))) } - if err := bincode.UnpackFromBytes(&resp, respBytes); err != nil { - logger.RaiseAlert(fmt.Errorf("could not decode response to request %v, will continue waiting for responses: %w", req.requestId, err)) - continue + // Keep going until we found the right request id -- + // we can't assume that what we get isn't some other + // request we thought was timed out. + sock.SetReadDeadline(time.Now().Add(shardSingleTimeout)) + for { + respBytes := buffer + read, err := sock.Read(respBytes) + respBytes = respBytes[:read] + if err != nil { + isTimeout := false + switch netErr := err.(type) { + case net.Error: + isTimeout = netErr.Timeout() + } + if isTimeout { + logger.Debug("got network timeout error %v, will try to retry", err) + break // keep trying + } + // pipe is broken somehow, terminate immediately with this err + return err + } + resp := unpackedShardResponse{ + Body: respBody, + } + if err := bincode.UnpackFromBytes(&resp, respBytes); err != nil { + logger.RaiseAlert(fmt.Errorf("could not decode response to request %v, will continue waiting for responses: %w", req.requestId, err)) + continue + } + if resp.RequestId != req.requestId { + logger.RaiseAlert(fmt.Errorf("dropping response %v, since we expected request id %v. body: %v, error: %v", resp.RequestId, req.requestId, resp.Body, resp.Error)) + continue + } + // we've gotten a response + elapsed := time.Since(startedAt) + if c.Counters != nil { + msgKind := reqBody.ShardRequestKind() + atomic.AddInt64(&c.Counters.ShardReqsCounts[msgKind], 1) + atomic.AddInt64(&c.Counters.ShardReqsNanos[msgKind], elapsed.Nanoseconds()) + } + respErr := resp.Error + if respErr != nil { + isTimeout := false + switch eggsErr := err.(type) { + case msgs.ErrCode: + isTimeout = eggsErr == msgs.TIMEOUT + } + if isTimeout { + logger.Debug("got resp timeout error %v, will try to retry", err) + break // keep trying + } + } + switch eggsErr := respErr.(type) { + case msgs.ErrCode: + // If we're past the first attempt, there are cases where errors are not what they + // seem. + if attempts > 0 { + respErr = c.checkRepeatedShardRequestError(logger, req, respBody, eggsErr) + } + } + // check if it's an error or not + if respErr != nil { + logger.Debug("got error %v from shard (took %v)", respErr, elapsed) + return respErr + } + logger.Debug("got response %T from shard (took %v)", respBody, elapsed) + return nil } - if resp.RequestId != req.requestId { - logger.RaiseAlert(fmt.Errorf("dropping response %v, since we expected request id %v. body: %v, error: %w", resp.RequestId, req.requestId, resp.Body, resp.Error)) - continue - } - // we managed to decode, we just need to check that it's not an error - if resp.Error != nil { - logger.Debug("got error %v from shard (took %v)", resp.Error, time.Since(t0)) - return resp.Error - } - logger.Debug("got response %T from shard (took %v)", respBody, time.Since(t0)) - return nil + attempts++ } } -// This function will set the deadline for the socket. -// TODO does the deadline persist -- i.e. are we permanently modifying this socket. -func ShardRequestSocket( - logger LogLevels, - cdcKey cipher.Block, - sock *net.UDPConn, - timeout time.Duration, - reqBody bincode.Packable, - respBody bincode.Unpackable, -) error { - if timeout == time.Duration(0) { - panic("zero duration") - } - sock.SetReadDeadline(time.Now().Add(timeout)) - return ShardRequest(logger, cdcKey, sock, sock, uint64(msgs.Now()), reqBody, respBody) -} - -func ShardSocket(shid msgs.ShardId) (*net.UDPConn, error) { +func CreateShardSocket(shid msgs.ShardId) (*net.UDPConn, error) { socket, err := net.DialUDP("udp4", nil, &net.UDPAddr{Port: shid.Port()}) if err != nil { return nil, fmt.Errorf("could not create shard socket: %w", err) diff --git a/go/gcdaemon/gcdaemon.go b/go/gcdaemon/gcdaemon.go index 4fb0e96a..1649ea5e 100644 --- a/go/gcdaemon/gcdaemon.go +++ b/go/gcdaemon/gcdaemon.go @@ -11,11 +11,19 @@ import ( "xtx/eggsfs/msgs" ) +func noRunawayArgs() { + if flag.NArg() > 0 { + fmt.Fprintf(os.Stderr, "Unexpected extra arguments %v\n", flag.Args()) + os.Exit(2) + } +} + func main() { verbose := flag.Bool("verbose", false, "Enables debug logging.") singleIteration := flag.Bool("single-iteration", false, "Whether to run a single iteration of GC and terminate.") logFile := flag.String("log-file", "", "File to log to, stdout if not provided.") flag.Parse() + noRunawayArgs() logOut := os.Stdout if *logFile != "" { @@ -69,7 +77,7 @@ func main() { fmt.Sprintf("shard%v", shard), func() { for { - eggs.CollectDirectories(log, shard) + eggs.CollectDirectories(log, nil, shard) if *singleIteration { break } diff --git a/go/integrationtest/cleanup.go b/go/integrationtest/cleanup.go index d69a2dd5..7a51b248 100644 --- a/go/integrationtest/cleanup.go +++ b/go/integrationtest/cleanup.go @@ -6,7 +6,7 @@ import ( "xtx/eggsfs/msgs" ) -func deleteDir(log eggs.LogLevels, client eggs.Client, ownerId msgs.InodeId, name string, dirId msgs.InodeId) { +func deleteDir(log eggs.LogLevels, client *eggs.Client, ownerId msgs.InodeId, name string, creationTime msgs.EggsTime, dirId msgs.InodeId) { readDirReq := msgs.ReadDirReq{ DirId: dirId, } @@ -17,10 +17,13 @@ func deleteDir(log eggs.LogLevels, client eggs.Client, ownerId msgs.InodeId, nam } for _, res := range readDirResp.Results { if res.TargetId.Type() == msgs.DIRECTORY { - deleteDir(log, client, dirId, res.Name, res.TargetId) + deleteDir(log, client, dirId, res.Name, res.CreationTime, res.TargetId) } else { if err := client.ShardRequest( - log, res.TargetId.Shard(), &msgs.SoftUnlinkFileReq{OwnerId: dirId, FileId: res.TargetId, Name: res.Name}, &msgs.SoftUnlinkFileResp{}, + log, + dirId.Shard(), + &msgs.SoftUnlinkFileReq{OwnerId: dirId, FileId: res.TargetId, Name: res.Name, CreationTime: res.CreationTime}, + &msgs.SoftUnlinkFileResp{}, ); err != nil { panic(err) } @@ -32,7 +35,7 @@ func deleteDir(log eggs.LogLevels, client eggs.Client, ownerId msgs.InodeId, nam } if ownerId != msgs.NULL_INODE_ID { if err := client.CDCRequest( - log, &msgs.SoftUnlinkDirectoryReq{OwnerId: ownerId, TargetId: dirId, Name: name}, &msgs.SoftUnlinkDirectoryResp{}, + log, &msgs.SoftUnlinkDirectoryReq{OwnerId: ownerId, TargetId: dirId, Name: name, CreationTime: creationTime}, &msgs.SoftUnlinkDirectoryResp{}, ); err != nil { panic(err) } @@ -40,16 +43,17 @@ func deleteDir(log eggs.LogLevels, client eggs.Client, ownerId msgs.InodeId, nam } func cleanupAfterTest( + log eggs.LogLevels, + counters *eggs.ClientCounters, blockServicesKeys map[msgs.BlockServiceId][16]byte, ) { - log := &eggs.LogToStdout{} - client, err := eggs.NewAllShardsClient() + client, err := eggs.NewClient(nil, counters, nil) if err != nil { panic(err) } defer client.Close() // Delete all current things - deleteDir(log, client, msgs.NULL_INODE_ID, "", msgs.ROOT_DIR_INODE_ID) + deleteDir(log, client, msgs.NULL_INODE_ID, "", 0, msgs.ROOT_DIR_INODE_ID) // Make all historical stuff die immediately for all directories for i := 0; i < 256; i++ { shid := msgs.ShardId(i) @@ -78,10 +82,10 @@ func cleanupAfterTest( } } // Collect everything - if err := eggs.CollectDirectoriesInAllShards(log); err != nil { + if err := eggs.CollectDirectoriesInAllShards(log, counters); err != nil { panic(err) } - if err := eggs.DestructFilesInAllShards(log, blockServicesKeys); err != nil { + if err := eggs.DestructFilesInAllShards(log, counters, blockServicesKeys); err != nil { panic(err) } // Make sure nothing is left @@ -103,10 +107,28 @@ func cleanupAfterTest( if len(visitFilesResp.Ids) > 0 { panic(fmt.Errorf("%v: unexpected files (%v) after cleanup", shid, len(visitFilesResp.Ids))) } - // No transient files + // No transient files. We might have some transient files + // left due to repeated calls to construct file because + // of packet drops. We check that at least they're empty. + visitTransientFilesReq := msgs.VisitTransientFilesReq{} visitTransientFilesResp := msgs.VisitTransientFilesResp{} - if err := client.ShardRequest(log, shid, &msgs.VisitTransientFilesReq{}, &visitTransientFilesResp); err != nil { - panic(fmt.Errorf("%v: unexpected transient files (%v) after cleanup", shid, len(visitTransientFilesResp.Files))) + for { + if err := client.ShardRequest(log, shid, &visitTransientFilesReq, &visitTransientFilesResp); err != nil { + panic(err) + } + for _, file := range visitTransientFilesResp.Files { + statResp := msgs.StatTransientFileResp{} + if err := client.ShardRequest(log, shid, &msgs.StatTransientFileReq{Id: file.Id}, &statResp); err != nil { + panic(err) + } + if statResp.Size > 0 { + panic(fmt.Errorf("unexpected non-empty transient file %v after cleanup", file)) + } + } + if visitTransientFilesResp.NextId == 0 { + break + } + visitTransientFilesReq.BeginId = visitTransientFilesResp.NextId } } // Nothing in root dir @@ -117,5 +139,4 @@ func cleanupAfterTest( if len(fullReadDirResp.Results) != 0 { panic(fmt.Errorf("unexpected stuff in root directory")) } - } diff --git a/go/integrationtest/filehistory.go b/go/integrationtest/filehistory.go index d1bd9412..28159f37 100644 --- a/go/integrationtest/filehistory.go +++ b/go/integrationtest/filehistory.go @@ -29,35 +29,55 @@ type renameFile struct { // trace type createdFile struct { - name string - id msgs.InodeId + name string + id msgs.InodeId + creationTime msgs.EggsTime +} + +type renamedFile struct { + oldName string + newName string + newCreationTime msgs.EggsTime } type checkpoint struct { time msgs.EggsTime } +type file struct { + name string + id msgs.InodeId + creationTime msgs.EggsTime +} type files struct { - names []string - ids []msgs.InodeId + files []file byName map[string]int } -func (files *files) addFile(name string, id msgs.InodeId) { +func (files *files) addFile(name string, id msgs.InodeId, creationTime msgs.EggsTime) { if _, wasPresent := files.byName[name]; wasPresent { panic(fmt.Errorf("unexpected overwrite of %s", name)) } - files.names = append(files.names, name) - files.ids = append(files.ids, id) - files.byName[name] = len(files.ids) - 1 + files.files = append(files.files, file{name: name, id: id, creationTime: creationTime}) + files.byName[name] = len(files.files) - 1 } +/* func (files *files) id(name string) msgs.InodeId { ix, present := files.byName[name] if !present { panic(fmt.Errorf("name not found %v", name)) } - return files.ids[ix] + return files.files[ix].id +} +*/ + +func (files *files) file(name string) file { + ix, present := files.byName[name] + if !present { + panic(fmt.Errorf("name not found %v", name)) + } + return files.files[ix] } func (files *files) deleteFile(name string) { @@ -65,14 +85,12 @@ func (files *files) deleteFile(name string) { panic(fmt.Errorf("name not found %v", name)) } ix := files.byName[name] - lastIx := len(files.ids) - 1 + lastIx := len(files.files) - 1 if ix != lastIx { - files.ids[ix] = files.ids[lastIx] - files.names[ix] = files.names[lastIx] - files.byName[files.names[ix]] = ix + files.files[ix] = files.files[lastIx] + files.byName[files.files[ix].name] = ix } - files.ids = files.ids[:lastIx] - files.names = files.names[:lastIx] + files.files = files.files[:lastIx] delete(files.byName, name) } @@ -93,7 +111,8 @@ func genCreateFile(filePrefix string, rand *rand.Rand, files *files) createFile } func genDeleteFile(filePrefix string, rand *rand.Rand, files *files) deleteFile { - return deleteFile{name: files.names[int(rand.Uint32())%len(files.names)]} + file := &files.files[int(rand.Uint32())%len(files.files)] + return deleteFile{name: file.name} } func genRenameFile(filePrefix string, rand *rand.Rand, files *files) renameFile { @@ -125,11 +144,11 @@ func checkCheckpoint(prefix string, files *files, allEdges []edge) { } edges = append(edges, edge) } - if len(edges) != len(files.names) { - panic(fmt.Errorf("expected %d edges, got %d", len(files.names), len(edges))) + if len(edges) != len(files.files) { + panic(fmt.Errorf("expected %d edges, got %d", len(files.files), len(edges))) } for _, edge := range edges { - id := files.id(edge.name) + id := files.file(edge.name).id if id != edge.targetId { panic(fmt.Errorf("expected targetId %v for edge %v, but got %v", id, edge.name, id)) } @@ -149,31 +168,37 @@ func runCheckpoint(harness *harness, prefix string, files *files) checkpoint { func runStep(harness *harness, files *files, stepAny any) any { switch step := stepAny.(type) { case createFile: - id := harness.createFile(msgs.ROOT_DIR_INODE_ID, step.name, step.size) - files.addFile(step.name, id) + id, creationTime := harness.createFile(msgs.ROOT_DIR_INODE_ID, step.name, step.size) + files.addFile(step.name, id, creationTime) return createdFile{ - name: step.name, - id: id, + name: step.name, + id: id, + creationTime: creationTime, } case deleteFile: - fileId := files.id(step.name) - harness.shardReq(msgs.ROOT_DIR_INODE_ID.Shard(), &msgs.SoftUnlinkFileReq{OwnerId: msgs.ROOT_DIR_INODE_ID, FileId: fileId, Name: step.name}, &msgs.SoftUnlinkFileResp{}) + f := files.file(step.name) + harness.shardReq( + msgs.ROOT_DIR_INODE_ID.Shard(), + &msgs.SoftUnlinkFileReq{OwnerId: msgs.ROOT_DIR_INODE_ID, FileId: f.id, CreationTime: f.creationTime, Name: step.name}, &msgs.SoftUnlinkFileResp{}, + ) files.deleteFile(step.name) return step case renameFile: - targetId := files.id(step.oldName) - harness.shardReq( - msgs.ROOT_DIR_INODE_ID.Shard(), - &msgs.SameDirectoryRenameReq{DirId: msgs.ROOT_DIR_INODE_ID, TargetId: targetId, OldName: step.oldName, NewName: step.newName}, - &msgs.SameDirectoryRenameResp{}, - ) + f := files.file(step.oldName) + req := msgs.SameDirectoryRenameReq{DirId: msgs.ROOT_DIR_INODE_ID, TargetId: f.id, OldCreationTime: f.creationTime, OldName: step.oldName, NewName: step.newName} + resp := msgs.SameDirectoryRenameResp{} + harness.shardReq(msgs.ROOT_DIR_INODE_ID.Shard(), &req, &resp) files.deleteFile(step.oldName) if _, wasPresent := files.byName[step.newName]; wasPresent { // overwrite files.deleteFile(step.newName) } - files.addFile(step.newName, targetId) - return step + files.addFile(step.newName, f.id, resp.NewCreationTime) + return renamedFile{ + oldName: step.oldName, + newName: step.newName, + newCreationTime: resp.NewCreationTime, + } default: panic(fmt.Errorf("bad step %T", stepAny)) } @@ -211,17 +236,17 @@ func replayCheckpoint(prefix string, files *files, fullEdges []fullEdge, t msgs. func replayStep(prefix string, files *files, fullEdges []fullEdge, stepAny any) { switch step := stepAny.(type) { case createdFile: - files.addFile(step.name, step.id) + files.addFile(step.name, step.id, step.creationTime) case deleteFile: files.deleteFile(step.name) - case renameFile: - targetId := files.id(step.oldName) + case renamedFile: + targetId := files.file(step.oldName).id files.deleteFile(step.oldName) if _, wasPresent := files.byName[step.newName]; wasPresent { // overwrite files.deleteFile(step.newName) } - files.addFile(step.newName, targetId) + files.addFile(step.newName, targetId, step.newCreationTime) case checkpoint: replayCheckpoint(prefix, files, fullEdges, step.time) default: @@ -229,7 +254,7 @@ func replayStep(prefix string, files *files, fullEdges []fullEdge, stepAny any) } } -func fileHistoryStepSingle(opts *fileHistoryTestOpts, harness *harness, seed int64, filePrefix string) { +func fileHistoryStepSingle(log eggs.LogLevels, opts *fileHistoryTestOpts, harness *harness, seed int64, filePrefix string) { // loop for n steps. at every step: // * if we have never reached the target files, then just create a file. // * if we have, create/delete/rename/rename with override at random. @@ -241,19 +266,19 @@ func fileHistoryStepSingle(opts *fileHistoryTestOpts, harness *harness, seed int reachedTargetFiles := false fls := files{ - names: []string{}, - ids: []msgs.InodeId{}, + files: []file{}, byName: make(map[string]int), } source := rand.NewSource(seed) rand := rand.New(source) for stepIx := 0; stepIx < opts.steps; stepIx++ { if stepIx%opts.checkpointEvery == 0 { + log.Info("%v: reached checkpoint at step %v", filePrefix, stepIx) checkpoint := runCheckpoint(harness, filePrefix, &fls) trace = append(trace, checkpoint) } var step any - if len(fls.names) < opts.lowFiles { + if len(fls.files) < opts.lowFiles { reachedTargetFiles = false } if !reachedTargetFiles { @@ -273,12 +298,11 @@ func fileHistoryStepSingle(opts *fileHistoryTestOpts, harness *harness, seed int panic(fmt.Errorf("bad which %d", which)) } } - reachedTargetFiles = reachedTargetFiles || len(fls.names) >= opts.targetFiles + reachedTargetFiles = reachedTargetFiles || len(fls.files) >= opts.targetFiles trace = append(trace, runStep(harness, &fls, step)) } fls = files{ - names: []string{}, - ids: []msgs.InodeId{}, + files: []file{}, byName: make(map[string]int), } fullEdges := harness.fullReadDir(msgs.ROOT_DIR_INODE_ID) @@ -298,7 +322,7 @@ type fileHistoryTestOpts struct { func fileHistoryTest( log eggs.LogLevels, opts *fileHistoryTestOpts, - stats *harnessStats, + counters *eggs.ClientCounters, blockServicesKeys map[msgs.BlockServiceId][16]byte, ) { terminateChan := make(chan any, 1) @@ -316,13 +340,14 @@ func fileHistoryTest( seed := int64(i) go func() { defer func() { handleRecover(log, terminateChan, recover()) }() - client, err := eggs.NewShardSpecificClient(msgs.ShardId(0)) + shid := msgs.ShardId(0) + client, err := eggs.NewClient(&shid, counters, nil) if err != nil { panic(err) } defer client.Close() - harness := newHarness(log, client, stats, blockServicesKeys) - fileHistoryStepSingle(opts, harness, seed, prefix) + harness := newHarness(log, client, blockServicesKeys) + fileHistoryStepSingle(log, opts, harness, seed, prefix) wait.Done() }() } diff --git a/go/integrationtest/fstest.go b/go/integrationtest/fstest.go index 253e1234..73e3ca3d 100644 --- a/go/integrationtest/fstest.go +++ b/go/integrationtest/fstest.go @@ -21,18 +21,23 @@ type fsTestDir struct { children fsTestChildren } +type fsTestChild[T any] struct { + creationTime msgs.EggsTime + body T +} + // We always use integers as names type fsTestChildren struct { - files map[int]msgs.InodeId - directories map[int]*fsTestDir + files map[int]fsTestChild[msgs.InodeId] + directories map[int]fsTestChild[*fsTestDir] } func newFsTestDir(id msgs.InodeId) *fsTestDir { return &fsTestDir{ id: id, children: fsTestChildren{ - files: make(map[int]msgs.InodeId), - directories: make(map[int]*fsTestDir), + files: make(map[int]fsTestChild[msgs.InodeId]), + directories: make(map[int]fsTestChild[*fsTestDir]), }, } } @@ -51,17 +56,25 @@ func (s *fsTestDir) dir(path []int) *fsTestDir { if !childFound { panic("dir not found") } - return child.dir(path[1:]) + return child.body.dir(path[1:]) } func (s *fsTestState) dir(path []int) *fsTestDir { return s.rootDir.dir(path) } -func (state *fsTestState) makeDir(log eggs.LogLevels, harness *harness, opts *fsTestOpts, parent []int, name int) []int { +func (state *fsTestState) incrementDirs(log eggs.LogLevels, opts *fsTestOpts) { if state.totalDirs >= opts.numDirs { panic("ran out of dirs!") } + state.totalDirs++ + if state.totalDirs%100 == 0 { + log.Info("%v out of %v dirs created", state.totalDirs, opts.numDirs) + } +} + +func (state *fsTestState) makeDir(log eggs.LogLevels, harness *harness, opts *fsTestOpts, parent []int, name int) []int { + state.incrementDirs(log, opts) dir := state.dir(parent) _, dirExists := dir.children.directories[name] if dirExists { @@ -79,16 +92,74 @@ func (state *fsTestState) makeDir(log eggs.LogLevels, harness *harness, opts *fs } resp := msgs.MakeDirectoryResp{} harness.cdcReq(&req, &resp) - dir.children.directories[name] = newFsTestDir(resp.Id) - state.totalDirs++ + dir.children.directories[name] = fsTestChild[*fsTestDir]{ + body: newFsTestDir(resp.Id), + creationTime: resp.CreationTime, + } path := append(parent, name) return path } -func (state *fsTestState) makeFile(log eggs.LogLevels, harness *harness, opts *fsTestOpts, rand *rand.Rand, dirPath []int, name int) { +func (state *fsTestState) makeDirFromTemp(log eggs.LogLevels, harness *harness, opts *fsTestOpts, parent []int, name int, tmpParent []int) []int { + state.incrementDirs(log, opts) + dir := state.dir(parent) + _, dirExists := dir.children.directories[name] + if dirExists { + panic("conflicting name (dir)") + } + _, fileExists := dir.children.files[name] + if fileExists { + panic("conflicting name (files)") + } + var id msgs.InodeId + var tmpCreationTime msgs.EggsTime + tmpParentId := state.dir(tmpParent).id + { + req := msgs.MakeDirectoryReq{ + OwnerId: tmpParentId, + Name: "tmp", + Info: msgs.SetDirectoryInfo{Inherited: true}, + } + resp := msgs.MakeDirectoryResp{} + harness.cdcReq(&req, &resp) + id = resp.Id + tmpCreationTime = resp.CreationTime + } + var creationTime msgs.EggsTime + { + parentId := dir.id + req := msgs.RenameDirectoryReq{ + TargetId: id, + OldOwnerId: tmpParentId, + OldCreationTime: tmpCreationTime, + OldName: "tmp", + NewOwnerId: parentId, + NewName: strconv.Itoa(name), + } + resp := msgs.RenameDirectoryResp{} + harness.cdcReq(&req, &resp) + creationTime = resp.CreationTime + } + dir.children.directories[name] = fsTestChild[*fsTestDir]{ + body: newFsTestDir(id), + creationTime: creationTime, + } + path := append(parent, name) + return path +} + +func (state *fsTestState) incrementFiles(log eggs.LogLevels, opts *fsTestOpts) { if state.totalFiles >= opts.numFiles { panic("ran out of files!") } + state.totalFiles++ + if state.totalFiles%100 == 0 { + log.Info("%v out of %v dirs created", state.totalFiles, opts.numFiles) + } +} + +func (state *fsTestState) makeFile(log eggs.LogLevels, harness *harness, opts *fsTestOpts, rand *rand.Rand, dirPath []int, name int) { + state.incrementFiles(log, opts) dir := state.dir(dirPath) _, dirExists := dir.children.directories[name] if dirExists { @@ -99,9 +170,55 @@ func (state *fsTestState) makeFile(log eggs.LogLevels, harness *harness, opts *f panic("conflicting name (files)") } size := rand.Uint64() % (uint64(100) << 20) // up to 20MiB - id := harness.createFile(dir.id, strconv.Itoa(name), size) - state.totalFiles++ - dir.children.files[name] = id + id, creationTime := harness.createFile(dir.id, strconv.Itoa(name), size) + dir.children.files[name] = fsTestChild[msgs.InodeId]{ + body: id, + creationTime: creationTime, + } +} + +func (state *fsTestState) makeFileFromTemp(log eggs.LogLevels, harness *harness, opts *fsTestOpts, rand *rand.Rand, dirPath []int, name int, tmpDirPath []int) { + state.incrementFiles(log, opts) + dir := state.dir(dirPath) + _, dirExists := dir.children.directories[name] + if dirExists { + panic("conflicting name (dir)") + } + _, fileExists := dir.children.files[name] + if fileExists { + panic("conflicting name (files)") + } + size := rand.Uint64() % (uint64(100) << 20) // up to 20MiB + tmpParentId := state.dir(tmpDirPath).id + id, creationTime := harness.createFile(tmpParentId, "tmp", size) + if tmpParentId == dir.id { + req := msgs.SameDirectoryRenameReq{ + DirId: dir.id, + TargetId: id, + OldName: "tmp", + OldCreationTime: creationTime, + NewName: strconv.Itoa(name), + } + resp := msgs.SameDirectoryRenameResp{} + harness.shardReq(dir.id.Shard(), &req, &resp) + creationTime = resp.NewCreationTime + } else { + req := msgs.RenameFileReq{ + TargetId: id, + OldOwnerId: tmpParentId, + OldCreationTime: creationTime, + OldName: "tmp", + NewOwnerId: dir.id, + NewName: strconv.Itoa(name), + } + resp := msgs.RenameFileResp{} + harness.cdcReq(&req, &resp) + creationTime = resp.CreationTime + } + dir.children.files[name] = fsTestChild[msgs.InodeId]{ + body: id, + creationTime: creationTime, + } } func (d *fsTestDir) check(log eggs.LogLevels, harness *harness) { @@ -128,28 +245,33 @@ func (d *fsTestDir) check(log eggs.LogLevels, harness *harness) { } // recurse down for _, dir := range d.children.directories { - dir.check(log, harness) + dir.body.check(log, harness) } } func fsTest( log eggs.LogLevels, opts *fsTestOpts, - stats *harnessStats, + counters *eggs.ClientCounters, blockServicesKeys map[msgs.BlockServiceId][16]byte, ) { - client, err := eggs.NewAllShardsClient() + client, err := eggs.NewClient(nil, counters, nil) if err != nil { panic(err) } defer client.Close() - harness := newHarness(log, client, stats, blockServicesKeys) + harness := newHarness(log, client, blockServicesKeys) state := fsTestState{ - rootDir: newFsTestDir(msgs.ROOT_DIR_INODE_ID), + totalDirs: 1, // root dir + rootDir: newFsTestDir(msgs.ROOT_DIR_INODE_ID), } branching := int(math.Log(float64(opts.numDirs)) / math.Log(float64(opts.depth))) + source := rand.NewSource(0) + rand := rand.New(source) // Create directories by first creating the first n-1 levels according to branching above - allDirs := [][]int{} + allDirs := [][]int{ + {}, // root + } lastLevelDirs := [][]int{} for depth := 1; depth <= opts.depth; depth++ { depthDirs := int(math.Pow(float64(branching), float64(depth))) @@ -160,24 +282,41 @@ func fsTest( j = j / branching parentPath = append([]int{j}, parentPath...) } - path := state.makeDir(log, harness, opts, parentPath, i) + var path []int + // create and then move for 1/5 of the dirs + if rand.Uint32()%5 == 0 { + tmpParentPath := allDirs[int(rand.Uint32())%len(allDirs)] + path = state.makeDirFromTemp(log, harness, opts, parentPath, i, tmpParentPath) + } else { + path = state.makeDir(log, harness, opts, parentPath, i) + } allDirs = append(allDirs, path) if depth == opts.depth { lastLevelDirs = append(lastLevelDirs, path) } } } - // then create the leaves at random - source := rand.NewSource(0) - rand := rand.New(source) + // Then create the leaves at random. To stimulate CDC paths (also afterwards in GC), create + // them and then move them. for state.totalDirs < opts.numDirs { parentPath := lastLevelDirs[int(rand.Uint32())%len(lastLevelDirs)] - state.makeDir(log, harness, opts, parentPath, state.totalDirs) + // create and then move for 1/5 of the dirs + if rand.Uint32()%5 == 0 { + tmpParentPath := allDirs[int(rand.Uint32())%len(allDirs)] + state.makeDirFromTemp(log, harness, opts, parentPath, state.totalDirs, tmpParentPath) + } else { + state.makeDir(log, harness, opts, parentPath, state.totalDirs) + } } // now create files, random locations for state.totalFiles < opts.numFiles { - dir := allDirs[int(rand.Uint32())%len(lastLevelDirs)] - state.makeFile(log, harness, opts, rand, dir, state.totalDirs+state.totalFiles) + dir := allDirs[int(rand.Uint32())%len(allDirs)] + if rand.Uint32()%5 == 0 { + tmpParentPath := allDirs[int(rand.Uint32())%len(allDirs)] + state.makeFileFromTemp(log, harness, opts, rand, dir, state.totalDirs+state.totalFiles, tmpParentPath) + } else { + state.makeFile(log, harness, opts, rand, dir, state.totalDirs+state.totalFiles) + } } // finally, check that our view of the world is the real view of the world state.rootDir.check(log, harness) diff --git a/go/integrationtest/harness.go b/go/integrationtest/harness.go index 84207a01..cd7a2cb3 100644 --- a/go/integrationtest/harness.go +++ b/go/integrationtest/harness.go @@ -2,9 +2,6 @@ package main import ( "fmt" - "sync/atomic" - "time" - "xtx/eggsfs/bincode" "xtx/eggsfs/eggs" "xtx/eggsfs/msgs" ) @@ -21,53 +18,27 @@ type fullEdge struct { creationTime msgs.EggsTime } -type harnessStats struct { - // these arrays are indexed by req type - shardReqsCounts [256]int64 - shardReqsNanos [256]int64 - cdcReqsCounts [256]int64 - cdcReqsNanos [256]int64 -} - type harness struct { log eggs.LogLevels - client eggs.Client - stats *harnessStats + client *eggs.Client blockServicesKeys map[msgs.BlockServiceId][16]byte } -func (h *harness) shardReq( - shid msgs.ShardId, - reqBody bincode.Packable, - respBody bincode.Unpackable, -) { - msgKind := msgs.GetShardMessageKind(reqBody) - atomic.AddInt64(&h.stats.shardReqsCounts[msgKind], 1) - t0 := time.Now() +func (h *harness) shardReq(shid msgs.ShardId, reqBody msgs.ShardRequest, respBody msgs.ShardResponse) { err := h.client.ShardRequest(h.log, shid, reqBody, respBody) if err != nil { panic(err) } - elapsed := time.Since(t0) - atomic.AddInt64(&h.stats.shardReqsNanos[msgKind], elapsed.Nanoseconds()) } -func (h *harness) cdcReq( - reqBody bincode.Packable, - respBody bincode.Unpackable, -) { - msgKind := msgs.GetCDCMessageKind(reqBody) - atomic.AddInt64(&h.stats.cdcReqsCounts[msgKind], 1) - t0 := time.Now() +func (h *harness) cdcReq(reqBody msgs.CDCRequest, respBody msgs.CDCResponse) { err := h.client.CDCRequest(h.log, reqBody, respBody) if err != nil { panic(err) } - elapsed := time.Since(t0) - atomic.AddInt64(&h.stats.cdcReqsNanos[msgKind], elapsed.Nanoseconds()) } -func (h *harness) createFile(dirId msgs.InodeId, name string, size uint64) msgs.InodeId { +func (h *harness) createFile(dirId msgs.InodeId, name string, size uint64) (id msgs.InodeId, creationTime msgs.EggsTime) { // construct constructReq := msgs.ConstructFileReq{ Type: msgs.FILE, @@ -130,8 +101,9 @@ func (h *harness) createFile(dirId msgs.InodeId, name string, size uint64) msgs. OwnerId: dirId, Name: name, } - h.shardReq(dirId.Shard(), &linkReq, &msgs.LinkFileResp{}) - return constructResp.Id + linkResp := msgs.LinkFileResp{} + h.shardReq(dirId.Shard(), &linkReq, &linkResp) + return constructResp.Id, linkResp.CreationTime } func (h *harness) readDir(dir msgs.InodeId) []edge { @@ -181,11 +153,10 @@ func (h *harness) fullReadDir(dirId msgs.InodeId) []fullEdge { return edges } -func newHarness(log eggs.LogLevels, client eggs.Client, stats *harnessStats, blockServicesKeys map[msgs.BlockServiceId][16]byte) *harness { +func newHarness(log eggs.LogLevels, client *eggs.Client, blockServicesKeys map[msgs.BlockServiceId][16]byte) *harness { return &harness{ log: log, client: client, - stats: stats, blockServicesKeys: blockServicesKeys, } } diff --git a/go/integrationtest/integrationtest.go b/go/integrationtest/integrationtest.go index 6fdd2b00..69d07f39 100644 --- a/go/integrationtest/integrationtest.go +++ b/go/integrationtest/integrationtest.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "path" + "regexp" "runtime/debug" "strings" "time" @@ -16,7 +17,7 @@ import ( func handleRecover(log eggs.LogLevels, terminateChan chan any, err any) { if err != nil { log.RaiseAlert(err.(error)) - log.Info("PANIC %v. Stacktrace:\n", err) + log.Info("PANIC %v. Stacktrace:", err) for _, line := range strings.Split(string(debug.Stack()), "\n") { log.Info(line) } @@ -25,50 +26,63 @@ func handleRecover(log eggs.LogLevels, terminateChan chan any, err any) { } } -func runTest(blockServicesKeys map[msgs.BlockServiceId][16]byte, what string, run func(stats *harnessStats)) { - stats := harnessStats{} - - fmt.Printf("running %s\n", what) - t0 := time.Now() - run(&stats) - elapsed := time.Since(t0) - totalShardRequests := int64(0) - totalShardNanos := int64(0) +func formatShardCounters(counters *eggs.ClientCounters) { + fmt.Printf(" shard reqs count/avg/total:\n") for i := 0; i < 256; i++ { - totalShardRequests += stats.shardReqsCounts[i] - totalShardNanos += stats.shardReqsNanos[i] - } - totalCDCRequests := int64(0) - totalCDCNanos := int64(0) - for i := 0; i < 256; i++ { - totalCDCRequests += stats.cdcReqsCounts[i] - totalCDCNanos += stats.cdcReqsNanos[i] - } - - fmt.Printf(" ran test in %v, %v shard requests performed, %v CDC requests performed\n", elapsed, totalShardRequests, totalCDCRequests) - if totalShardRequests > 0 { - fmt.Printf(" shard reqs avg times:\n") - for i := 0; i < 256; i++ { - if stats.shardReqsCounts[i] == 0 { - continue - } - fmt.Printf(" %-30v %10v %10v\n", msgs.ShardMessageKind(i), stats.shardReqsCounts[i], time.Duration(stats.shardReqsNanos[i]/stats.shardReqsCounts[i])) + if counters.ShardReqsCounts[i] == 0 { + continue } + fmt.Printf(" %-30v %10v %10v %v\n", msgs.ShardMessageKind(i), counters.ShardReqsCounts[i], time.Duration(counters.ShardReqsNanos[i]/counters.ShardReqsCounts[i]), time.Duration(counters.ShardReqsNanos[i])) } - if totalCDCRequests > 0 { - fmt.Printf(" CDC reqs avg times:\n") - for i := 0; i < 256; i++ { - if stats.cdcReqsCounts[i] == 0 { - continue - } - fmt.Printf(" %-30v %10v %10v\n", msgs.CDCMessageKind(i), stats.cdcReqsCounts[i], time.Duration(stats.cdcReqsNanos[i]/stats.cdcReqsCounts[i])) - } - } - - cleanupAfterTest(blockServicesKeys) } -func runTests(terminateChan chan any, log eggs.LogLevels, blockServices []eggs.BlockService) { +func formatCDCCounters(counters *eggs.ClientCounters) { + fmt.Printf(" CDC reqs count/avg/total:\n") + for i := 0; i < 256; i++ { + if counters.CDCReqsCounts[i] == 0 { + continue + } + fmt.Printf(" %-30v %10v %10v %v\n", msgs.CDCMessageKind(i), counters.CDCReqsCounts[i], time.Duration(counters.CDCReqsNanos[i]/counters.CDCReqsCounts[i]), time.Duration(counters.CDCReqsNanos[i])) + } +} + +func runTest(log eggs.LogLevels, blockServicesKeys map[msgs.BlockServiceId][16]byte, filter *regexp.Regexp, name string, extra string, run func(env *eggs.ClientCounters)) { + if !filter.Match([]byte(name)) { + fmt.Printf("skipping test %s\n", name) + return + } + + counters := &eggs.ClientCounters{} + + fmt.Printf("running %s, %s\n", name, extra) + t0 := time.Now() + run(counters) + elapsed := time.Since(t0) + + totalShardRequests := counters.TotalShardRequests() + totalCDCRequests := counters.TotalCDCRequests() + fmt.Printf(" ran test in %v, %v shard requests performed, %v CDC requests performed\n", elapsed, totalShardRequests, totalCDCRequests) + if totalShardRequests > 0 { + formatShardCounters(counters) + } + if totalCDCRequests > 0 { + formatCDCCounters(counters) + } + + counters = &eggs.ClientCounters{} + t0 = time.Now() + cleanupAfterTest(log, counters, blockServicesKeys) + elapsed = time.Since(t0) + fmt.Printf(" cleanup took %v\n", elapsed) + if counters.TotalShardRequests() > 0 { + formatShardCounters(counters) + } + if counters.TotalCDCRequests() > 0 { + formatCDCCounters(counters) + } +} + +func runTests(terminateChan chan any, log eggs.LogLevels, blockServices []eggs.BlockService, short bool, filter *regexp.Regexp) { defer func() { handleRecover(log, terminateChan, recover()) }() blockServicesKeys := make(map[msgs.BlockServiceId][16]byte) @@ -90,46 +104,72 @@ func runTests(terminateChan chan any, log eggs.LogLevels, blockServices []eggs.B checkpointEvery: 100, // get times every 100 actions targetFiles: 1000, // how many files we want lowFiles: 500, - threads: 5, + threads: 1, } runTest( + log, blockServicesKeys, - fmt.Sprintf("file history test, %v threads, %v steps", fileHistoryOpts.threads, fileHistoryOpts.steps), - func(stats *harnessStats) { - fileHistoryTest(log, &fileHistoryOpts, stats, blockServicesKeys) + filter, + "file history test", + fmt.Sprintf("%v threads, %v steps", fileHistoryOpts.threads, fileHistoryOpts.steps), + func(counters *eggs.ClientCounters) { + fileHistoryTest(log, &fileHistoryOpts, counters, blockServicesKeys) }, ) fsTestOpts := fsTestOpts{ - numDirs: 1 * 1000, // we need at least 256 directories, to have at least one dir per shard - numFiles: 100 * 1000, // around 100 files per dir + numDirs: 1 * 1000, // we need at least 256 directories, to have at least one dir per shard + numFiles: 20 * 1000, // around 20 files per dir depth: 4, } + if short { + fsTestOpts.numDirs = 200 + fsTestOpts.numFiles = 10 * 200 + } runTest( + log, blockServicesKeys, - fmt.Sprintf("simple fs test, %v dirs, %v files, %v depth", fsTestOpts.numDirs, fsTestOpts.numFiles, fsTestOpts.depth), - func(stats *harnessStats) { - fsTest(log, &fsTestOpts, stats, blockServicesKeys) + filter, + "simple fs test", + fmt.Sprintf("%v dirs, %v files, %v depth", fsTestOpts.numDirs, fsTestOpts.numFiles, fsTestOpts.depth), + func(counters *eggs.ClientCounters) { + fsTest(log, &fsTestOpts, counters, blockServicesKeys) }, ) terminateChan <- nil } +func noRunawayArgs() { + if flag.NArg() > 0 { + fmt.Fprintf(os.Stderr, "Unexpected extra arguments %v\n", flag.Args()) + os.Exit(2) + } +} + func main() { valgrind := flag.Bool("valgrind", false, "Whether to build for and run with valgrind.") sanitize := flag.Bool("sanitize", false, "Whether to build with sanitize.") - debug := flag.Bool("debug", false, "Whether to build without optimizations.") + debug := flag.Bool("debug", false, "Build without optimizations.") verbose := flag.Bool("verbose", false, "Note that verbose won't do much for the shard unless you build with debug.") dataDir := flag.String("data-dir", "", "Directory where to store the EggsFS data. If not present a temporary directory will be used.") preserveDbDir := flag.Bool("preserve-data-dir", false, "Whether to preserve the temp data dir (if we're using a temp data dir).") - coverage := flag.Bool("coverage", false, "Whether to build with coverage support. Right now applies only to the C++ shard code.") + coverage := flag.Bool("coverage", false, "Build with coverage support. Right now applies only to the C++ shard code.") + filter := flag.String("filter", "", "Regex to match against test names -- only matching ones will be ran.") + perf := flag.Bool("perf", false, "Run the C++ binaries (shard & CDC) with `perf record`") + incomingPacketDrop := flag.Float64("incoming-packet-drop", 0.0, "Simulate packet drop in shard & CDC (the argument is the probability that any packet will be dropped). This one will drop the requests on arrival.") + outgoingPacketDrop := flag.Float64("outgoing-packet-drop", 0.0, "Simulate packet drop in shard & CDC (the argument is the probability that any packet will be dropped). This one will process the requests, but drop the responses.") + short := flag.Bool("short", false, "Run a shorter version of the tests (useful with packet drop flags)") flag.Parse() + noRunawayArgs() if *verbose && !*debug { - panic("You asked me to build without -debug, and with -verbose-shard. This is almost certainly wrong.") + fmt.Fprintf(os.Stderr, "You asked me to build without -debug, and with -verbose. This is almost certainly wrong, since you won't get debug messages in the shard/cdc without -debug.") + os.Exit(2) } + filterRe := regexp.MustCompile(*filter) + cppBuildOpts := eggs.BuildCppOpts{ Valgrind: *valgrind, Sanitize: *sanitize, @@ -137,12 +177,6 @@ func main() { Coverage: *coverage, } - log := &eggs.LogToStdout{} - - shardExe := eggs.BuildShardExe(log, &cppBuildOpts) - cdcExe := eggs.BuildCDCExe(log, &cppBuildOpts) - shuckleExe := eggs.BuildShuckleExe(log) - cleanupDbDir := false tmpDataDir := *dataDir == "" if tmpDataDir { @@ -163,6 +197,26 @@ func main() { } }() + logFile := path.Join(*dataDir, "go-log") + var logOut *os.File + { + var err error + logOut, err = os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + fmt.Fprintf(os.Stderr, "could not open file %v: %v", logFile, err) + os.Exit(1) + } + defer logOut.Close() + } + log := &eggs.LogLogger{ + Verbose: *verbose, + Logger: eggs.NewLogger(logOut), + } + + shardExe := eggs.BuildShardExe(log, &cppBuildOpts) + cdcExe := eggs.BuildCDCExe(log, &cppBuildOpts) + shuckleExe := eggs.BuildShuckleExe(log) + terminateChan := make(chan any, 1) procs := eggs.NewManagedProcesses(terminateChan) @@ -197,12 +251,19 @@ func main() { }) } + if *incomingPacketDrop > 0 || *outgoingPacketDrop > 0 { + fmt.Printf("will drop %0.2f%% packets\n", (*incomingPacketDrop+*outgoingPacketDrop)*100.0) + } + // Start CDC procs.StartCDC(&eggs.CDCOpts{ - Exe: cdcExe, - Dir: path.Join(*dataDir, "cdc"), - Verbose: *verbose, - Valgrind: *valgrind, + Exe: cdcExe, + Dir: path.Join(*dataDir, "cdc"), + Verbose: *verbose, + Valgrind: *valgrind, + Perf: *perf, + IncomingPacketDrop: *incomingPacketDrop, + OutgoingPacketDrop: *outgoingPacketDrop, }) waitShuckleFor := 10 * time.Second @@ -213,12 +274,15 @@ func main() { for i := 0; i < 256; i++ { shid := msgs.ShardId(i) procs.StartShard(&eggs.ShardOpts{ - Exe: shardExe, - Dir: path.Join(*dataDir, fmt.Sprintf("shard_%03d", i)), - Verbose: *verbose, - Shid: shid, - Valgrind: *valgrind, - WaitForShuckle: true, + Exe: shardExe, + Dir: path.Join(*dataDir, fmt.Sprintf("shard_%03d", i)), + Verbose: *verbose, + Shid: shid, + Valgrind: *valgrind, + WaitForShuckle: true, + Perf: *perf, + IncomingPacketDrop: *incomingPacketDrop, + OutgoingPacketDrop: *outgoingPacketDrop, }) } @@ -231,7 +295,7 @@ func main() { fmt.Printf("operational 🤖\n") // start tests - go func() { runTests(terminateChan, log, blockServices) }() + go func() { runTests(terminateChan, log, blockServices, *short, filterRe) }() // wait for things to finish err := <-terminateChan diff --git a/go/msgs/msgs.go b/go/msgs/msgs.go index 8c7bbfb8..7812b663 100644 --- a/go/msgs/msgs.go +++ b/go/msgs/msgs.go @@ -198,6 +198,18 @@ func (parity Parity) ParityBlocks() int { // -------------------------------------------------------------------- // Shard requests/responses +type ShardRequest interface { + bincode.Packable + bincode.Unpackable + ShardRequestKind() ShardMessageKind +} + +type ShardResponse interface { + bincode.Packable + bincode.Unpackable + ShardResponseKind() ShardMessageKind +} + const ( EMPTY_STORAGE StorageClass = 0 INLINE_STORAGE StorageClass = 1 @@ -218,6 +230,28 @@ type LookupResp struct { CreationTime EggsTime } +// This request is only needed to recover from error resulting from repeated +// calls to things moving edges (e.g. SameDirectoryRenameReq & friends). +// +// TODO this and the response are very ad-hoc, it'd possibly be nicer to fold +// it in FullReadDir +type SnapshotLookupReq struct { + DirId InodeId + Name string + StartFrom EggsTime +} + +type SnapshotLookupEdge struct { + // If the extra bit is set, it's owned. + TargetId InodeIdExtra + CreationTime EggsTime +} + +type SnapshotLookupResp struct { + NextTime EggsTime // 0 for done + Edges []SnapshotLookupEdge +} + // Does not consider transient files. Might return snapshot files: // we don't really have a way of knowing if a file is snapshot just by // looking at it, unlike directories. @@ -417,13 +451,18 @@ type LinkFileReq struct { Name string } -type LinkFileResp struct{} +type LinkFileResp struct { + CreationTime EggsTime +} // turns a current outgoing edge into a snapshot owning edge. type SoftUnlinkFileReq struct { OwnerId InodeId FileId InodeId Name string + // See comment in `SameDirectoryRenameReq` for an idication of why + // we have this here even if it's not strictly needed. + CreationTime EggsTime } type SoftUnlinkFileResp struct{} @@ -479,10 +518,26 @@ type SameDirectoryRenameReq struct { TargetId InodeId DirId InodeId OldName string - NewName string + // This request is a bit annoying in the presence of packet + // loss. Consider this scenario: a client performs a + // `SameDirectoryRenameReq`, which goes through, but the + // response is dropped. + // + // In this case the client must retry but genuine failures + // (for example because the file does not exist) are indistinguishable + // from failures due to the previous request going through. + // + // For this reason we include the creation time here (even if we + // don't strictly needed because current edges are uniquely + // identified by name) so that the shard can implement heuristics + // to let likely repeated calls through in the name of idempotency. + OldCreationTime EggsTime + NewName string } -type SameDirectoryRenameResp struct{} +type SameDirectoryRenameResp struct { + NewCreationTime EggsTime +} type VisitDirectoriesReq struct { BeginId InodeId @@ -604,31 +659,35 @@ type RemoveEdgesReq struct { // if we want to retry things safely. We might create the edge without realizing // that we did (e.g. timeouts), and somebody might move it away in the meantime (with // some shard-local operation). +// TODO also add comment regarding that locking edges is safe only because +// we coordinate things from the CDC type CreateLockedCurrentEdgeReq struct { DirId InodeId Name string TargetId InodeId - // We need this because we want idempotency (retrying this request should - // not create spurious edges when overriding files), and we want to guarantee - // that the current edge is newest. +} + +type CreateLockedCurrentEdgeResp struct { CreationTime EggsTime } -type CreateLockedCurrentEdgeResp struct{} - type LockCurrentEdgeReq struct { - DirId InodeId - Name string - TargetId InodeId + DirId InodeId + TargetId InodeId + CreationTime EggsTime + Name string } type LockCurrentEdgeResp struct{} -// This also lets us turn edges into snapshot. +// This also lets us turn edges into snapshot, through `WasMoved`. type UnlockCurrentEdgeReq struct { - DirId InodeId - Name string - TargetId InodeId + DirId InodeId + Name string + CreationTime EggsTime + TargetId InodeId + // Turn the current edge into a snapshot edge, and create a deletion + // edge with the same name. WasMoved bool } @@ -647,14 +706,14 @@ type RemoveNonOwnedEdgeResp struct{} // Will remove the snapshot, owned edge; and move the file to transient in one // go. -type IntraShardHardFileUnlinkReq struct { +type SameShardHardFileUnlinkReq struct { OwnerId InodeId TargetId InodeId Name string CreationTime EggsTime } -type IntraShardHardFileUnlinkResp struct{} +type SameShardHardFileUnlinkResp struct{} // This is needed to implement inter-shard hard file unlinking, and it is unsafe, since // we must make sure that the owned file is made transient in its shard. @@ -683,7 +742,7 @@ type SetDirectoryInfoReq struct { type SetDirectoryInfoResp struct{} -// TODO this works with transient files, but don't require a cookie -- it's a bit +// TODO this works with transient files, but doesn't require a cookie -- it's a bit // inconsistent. type SwapBlocksReq struct { FileId1 InodeId @@ -715,6 +774,18 @@ type DirectoryEmptyReq struct { // -------------------------------------------------------------------- // CDC requests/responses +type CDCRequest interface { + bincode.Packable + bincode.Unpackable + CDCRequestKind() CDCMessageKind +} + +type CDCResponse interface { + bincode.Packable + bincode.Unpackable + CDCResponseKind() CDCMessageKind +} + type MakeDirectoryReq struct { OwnerId InodeId Name string @@ -722,36 +793,44 @@ type MakeDirectoryReq struct { } type MakeDirectoryResp struct { - Id InodeId + Id InodeId + CreationTime EggsTime } type RenameFileReq struct { - TargetId InodeId - OldOwnerId InodeId - OldName string - NewOwnerId InodeId - NewName string + TargetId InodeId + OldOwnerId InodeId + OldName string + OldCreationTime EggsTime + NewOwnerId InodeId + NewName string } -type RenameFileResp struct{} +type RenameFileResp struct { + CreationTime EggsTime +} type SoftUnlinkDirectoryReq struct { - OwnerId InodeId - TargetId InodeId - Name string + OwnerId InodeId + TargetId InodeId + CreationTime EggsTime + Name string } type SoftUnlinkDirectoryResp struct{} type RenameDirectoryReq struct { - TargetId InodeId - OldOwnerId InodeId - OldName string - NewOwnerId InodeId - NewName string + TargetId InodeId + OldOwnerId InodeId + OldName string + OldCreationTime EggsTime + NewOwnerId InodeId + NewName string } -type RenameDirectoryResp struct{} +type RenameDirectoryResp struct { + CreationTime EggsTime +} // This operation is safe for files: we can check that it has no spans, // and that it is transient. @@ -771,14 +850,14 @@ type HardUnlinkDirectoryReq struct { type HardUnlinkDirectoryResp struct{} -type HardUnlinkFileReq struct { +type CrossShardHardUnlinkFileReq struct { OwnerId InodeId TargetId InodeId Name string CreationTime EggsTime } -type HardUnlinkFileResp struct{} +type CrossShardHardUnlinkFileResp struct{} // -------------------------------------------------------------------- // directory info @@ -871,16 +950,18 @@ type LinkFileEntry struct { } type SameDirectoryRenameEntry struct { - TargetId InodeId - DirId InodeId - OldName string - NewName string + DirId InodeId + TargetId InodeId + OldName string + OldCreationTime EggsTime + NewName string } type SoftUnlinkFileEntry struct { - OwnerId InodeId - FileId InodeId - Name string + OwnerId InodeId + FileId InodeId + Name string + CreationTime EggsTime } type CreateDirectoryInodeEntry struct { @@ -890,23 +971,28 @@ type CreateDirectoryInodeEntry struct { } type CreateLockedCurrentEdgeEntry struct { - DirId InodeId - Name string - TargetId InodeId - CreationTime EggsTime + DirId InodeId + Name string + TargetId InodeId } type UnlockCurrentEdgeEntry struct { - DirId InodeId - Name string - TargetId InodeId - WasMoved bool + DirId InodeId + Name string + // Here the `CreationTime` is currently not strictly needed, since we have the + // locking mechanism + CDC synchronization anyway, which offer stronger guarantees + // which means we never need heuristics for this. But we include it for consistency + // and to better detect bugs. + CreationTime EggsTime + TargetId InodeId + WasMoved bool } type LockCurrentEdgeEntry struct { - DirId InodeId - Name string - TargetId InodeId + DirId InodeId + Name string + CreationTime EggsTime + TargetId InodeId } type RemoveDirectoryOwnerEntry struct { @@ -935,7 +1021,7 @@ type RemoveNonOwnedEdgeEntry struct { CreationTime EggsTime } -type IntraShardHardFileUnlinkEntry struct { +type SameShardHardFileUnlinkEntry struct { OwnerId InodeId TargetId InodeId Name string diff --git a/go/msgs/msgs_bincode.go b/go/msgs/msgs_bincode.go index a6020fe1..cebdc36c 100644 --- a/go/msgs/msgs_bincode.go +++ b/go/msgs/msgs_bincode.go @@ -16,31 +16,31 @@ const ( FILE_NOT_FOUND ErrCode = 17 DIRECTORY_NOT_FOUND ErrCode = 18 NAME_NOT_FOUND ErrCode = 19 - TYPE_IS_DIRECTORY ErrCode = 20 - TYPE_IS_NOT_DIRECTORY ErrCode = 21 - BAD_COOKIE ErrCode = 22 - INCONSISTENT_STORAGE_CLASS_PARITY ErrCode = 23 - LAST_SPAN_STATE_NOT_CLEAN ErrCode = 24 - COULD_NOT_PICK_BLOCK_SERVICES ErrCode = 25 - BAD_SPAN_BODY ErrCode = 26 - SPAN_NOT_FOUND ErrCode = 27 - BLOCK_SERVICE_NOT_FOUND ErrCode = 28 - CANNOT_CERTIFY_BLOCKLESS_SPAN ErrCode = 29 - BAD_NUMBER_OF_BLOCKS_PROOFS ErrCode = 30 - BAD_BLOCK_PROOF ErrCode = 31 - CANNOT_OVERRIDE_NAME ErrCode = 32 - NAME_IS_LOCKED ErrCode = 33 - OLD_NAME_IS_LOCKED ErrCode = 34 - NEW_NAME_IS_LOCKED ErrCode = 35 + EDGE_NOT_FOUND ErrCode = 20 + EDGE_IS_LOCKED ErrCode = 21 + TYPE_IS_DIRECTORY ErrCode = 22 + TYPE_IS_NOT_DIRECTORY ErrCode = 23 + BAD_COOKIE ErrCode = 24 + INCONSISTENT_STORAGE_CLASS_PARITY ErrCode = 25 + LAST_SPAN_STATE_NOT_CLEAN ErrCode = 26 + COULD_NOT_PICK_BLOCK_SERVICES ErrCode = 27 + BAD_SPAN_BODY ErrCode = 28 + SPAN_NOT_FOUND ErrCode = 29 + BLOCK_SERVICE_NOT_FOUND ErrCode = 30 + CANNOT_CERTIFY_BLOCKLESS_SPAN ErrCode = 31 + BAD_NUMBER_OF_BLOCKS_PROOFS ErrCode = 32 + BAD_BLOCK_PROOF ErrCode = 33 + CANNOT_OVERRIDE_NAME ErrCode = 34 + NAME_IS_LOCKED ErrCode = 35 MTIME_IS_TOO_RECENT ErrCode = 36 MISMATCHING_TARGET ErrCode = 37 MISMATCHING_OWNER ErrCode = 38 - DIRECTORY_NOT_EMPTY ErrCode = 39 - FILE_IS_TRANSIENT ErrCode = 40 - OLD_DIRECTORY_NOT_FOUND ErrCode = 41 - NEW_DIRECTORY_NOT_FOUND ErrCode = 42 - LOOP_IN_DIRECTORY_RENAME ErrCode = 43 - EDGE_NOT_FOUND ErrCode = 44 + MISMATCHING_CREATION_TIME ErrCode = 39 + DIRECTORY_NOT_EMPTY ErrCode = 40 + FILE_IS_TRANSIENT ErrCode = 41 + OLD_DIRECTORY_NOT_FOUND ErrCode = 42 + NEW_DIRECTORY_NOT_FOUND ErrCode = 43 + LOOP_IN_DIRECTORY_RENAME ErrCode = 44 DIRECTORY_HAS_OWNER ErrCode = 45 FILE_IS_NOT_TRANSIENT ErrCode = 46 FILE_NOT_EMPTY ErrCode = 47 @@ -52,10 +52,10 @@ const ( MORE_RECENT_SNAPSHOT_EDGE ErrCode = 53 MORE_RECENT_CURRENT_EDGE ErrCode = 54 BAD_DIRECTORY_INFO ErrCode = 55 - CREATION_TIME_TOO_RECENT ErrCode = 56 - DEADLINE_NOT_PASSED ErrCode = 57 - SAME_SOURCE_AND_DESTINATION ErrCode = 58 - SAME_DIRECTORIES ErrCode = 59 + DEADLINE_NOT_PASSED ErrCode = 56 + SAME_SOURCE_AND_DESTINATION ErrCode = 57 + SAME_DIRECTORIES ErrCode = 58 + SAME_SHARD ErrCode = 59 ) func (err ErrCode) String() string { @@ -81,37 +81,37 @@ func (err ErrCode) String() string { case 19: return "NAME_NOT_FOUND" case 20: - return "TYPE_IS_DIRECTORY" + return "EDGE_NOT_FOUND" case 21: - return "TYPE_IS_NOT_DIRECTORY" + return "EDGE_IS_LOCKED" case 22: - return "BAD_COOKIE" + return "TYPE_IS_DIRECTORY" case 23: - return "INCONSISTENT_STORAGE_CLASS_PARITY" + return "TYPE_IS_NOT_DIRECTORY" case 24: - return "LAST_SPAN_STATE_NOT_CLEAN" + return "BAD_COOKIE" case 25: - return "COULD_NOT_PICK_BLOCK_SERVICES" + return "INCONSISTENT_STORAGE_CLASS_PARITY" case 26: - return "BAD_SPAN_BODY" + return "LAST_SPAN_STATE_NOT_CLEAN" case 27: - return "SPAN_NOT_FOUND" + return "COULD_NOT_PICK_BLOCK_SERVICES" case 28: - return "BLOCK_SERVICE_NOT_FOUND" + return "BAD_SPAN_BODY" case 29: - return "CANNOT_CERTIFY_BLOCKLESS_SPAN" + return "SPAN_NOT_FOUND" case 30: - return "BAD_NUMBER_OF_BLOCKS_PROOFS" + return "BLOCK_SERVICE_NOT_FOUND" case 31: - return "BAD_BLOCK_PROOF" + return "CANNOT_CERTIFY_BLOCKLESS_SPAN" case 32: - return "CANNOT_OVERRIDE_NAME" + return "BAD_NUMBER_OF_BLOCKS_PROOFS" case 33: - return "NAME_IS_LOCKED" + return "BAD_BLOCK_PROOF" case 34: - return "OLD_NAME_IS_LOCKED" + return "CANNOT_OVERRIDE_NAME" case 35: - return "NEW_NAME_IS_LOCKED" + return "NAME_IS_LOCKED" case 36: return "MTIME_IS_TOO_RECENT" case 37: @@ -119,17 +119,17 @@ func (err ErrCode) String() string { case 38: return "MISMATCHING_OWNER" case 39: - return "DIRECTORY_NOT_EMPTY" + return "MISMATCHING_CREATION_TIME" case 40: - return "FILE_IS_TRANSIENT" + return "DIRECTORY_NOT_EMPTY" case 41: - return "OLD_DIRECTORY_NOT_FOUND" + return "FILE_IS_TRANSIENT" case 42: - return "NEW_DIRECTORY_NOT_FOUND" + return "OLD_DIRECTORY_NOT_FOUND" case 43: - return "LOOP_IN_DIRECTORY_RENAME" + return "NEW_DIRECTORY_NOT_FOUND" case 44: - return "EDGE_NOT_FOUND" + return "LOOP_IN_DIRECTORY_RENAME" case 45: return "DIRECTORY_HAS_OWNER" case 46: @@ -153,91 +153,18 @@ func (err ErrCode) String() string { case 55: return "BAD_DIRECTORY_INFO" case 56: - return "CREATION_TIME_TOO_RECENT" - case 57: return "DEADLINE_NOT_PASSED" - case 58: + case 57: return "SAME_SOURCE_AND_DESTINATION" - case 59: + case 58: return "SAME_DIRECTORIES" + case 59: + return "SAME_SHARD" default: return fmt.Sprintf("ErrCode(%d)", err) } } -func GetShardMessageKind(body any) ShardMessageKind { - switch body.(type) { - case ErrCode: - return 0 - case *LookupReq, *LookupResp: - return LOOKUP - case *StatFileReq, *StatFileResp: - return STAT_FILE - case *StatTransientFileReq, *StatTransientFileResp: - return STAT_TRANSIENT_FILE - case *StatDirectoryReq, *StatDirectoryResp: - return STAT_DIRECTORY - case *ReadDirReq, *ReadDirResp: - return READ_DIR - case *ConstructFileReq, *ConstructFileResp: - return CONSTRUCT_FILE - case *AddSpanInitiateReq, *AddSpanInitiateResp: - return ADD_SPAN_INITIATE - case *AddSpanCertifyReq, *AddSpanCertifyResp: - return ADD_SPAN_CERTIFY - case *LinkFileReq, *LinkFileResp: - return LINK_FILE - case *SoftUnlinkFileReq, *SoftUnlinkFileResp: - return SOFT_UNLINK_FILE - case *FileSpansReq, *FileSpansResp: - return FILE_SPANS - case *SameDirectoryRenameReq, *SameDirectoryRenameResp: - return SAME_DIRECTORY_RENAME - case *SetDirectoryInfoReq, *SetDirectoryInfoResp: - return SET_DIRECTORY_INFO - case *VisitDirectoriesReq, *VisitDirectoriesResp: - return VISIT_DIRECTORIES - case *VisitFilesReq, *VisitFilesResp: - return VISIT_FILES - case *VisitTransientFilesReq, *VisitTransientFilesResp: - return VISIT_TRANSIENT_FILES - case *FullReadDirReq, *FullReadDirResp: - return FULL_READ_DIR - case *RemoveNonOwnedEdgeReq, *RemoveNonOwnedEdgeResp: - return REMOVE_NON_OWNED_EDGE - case *IntraShardHardFileUnlinkReq, *IntraShardHardFileUnlinkResp: - return INTRA_SHARD_HARD_FILE_UNLINK - case *RemoveSpanInitiateReq, *RemoveSpanInitiateResp: - return REMOVE_SPAN_INITIATE - case *RemoveSpanCertifyReq, *RemoveSpanCertifyResp: - return REMOVE_SPAN_CERTIFY - case *SwapBlocksReq, *SwapBlocksResp: - return SWAP_BLOCKS - case *BlockServiceFilesReq, *BlockServiceFilesResp: - return BLOCK_SERVICE_FILES - case *RemoveInodeReq, *RemoveInodeResp: - return REMOVE_INODE - case *CreateDirectoryInodeReq, *CreateDirectoryInodeResp: - return CREATE_DIRECTORY_INODE - case *SetDirectoryOwnerReq, *SetDirectoryOwnerResp: - return SET_DIRECTORY_OWNER - case *RemoveDirectoryOwnerReq, *RemoveDirectoryOwnerResp: - return REMOVE_DIRECTORY_OWNER - case *CreateLockedCurrentEdgeReq, *CreateLockedCurrentEdgeResp: - return CREATE_LOCKED_CURRENT_EDGE - case *LockCurrentEdgeReq, *LockCurrentEdgeResp: - return LOCK_CURRENT_EDGE - case *UnlockCurrentEdgeReq, *UnlockCurrentEdgeResp: - return UNLOCK_CURRENT_EDGE - case *RemoveOwnedSnapshotFileEdgeReq, *RemoveOwnedSnapshotFileEdgeResp: - return REMOVE_OWNED_SNAPSHOT_FILE_EDGE - case *MakeFileTransientReq, *MakeFileTransientResp: - return MAKE_FILE_TRANSIENT - default: - panic(fmt.Sprintf("bad shard req/resp body %T", body)) - } -} - func (k ShardMessageKind) String() string { switch k { case 1: @@ -266,6 +193,8 @@ func (k ShardMessageKind) String() string { return "SAME_DIRECTORY_RENAME" case 15: return "SET_DIRECTORY_INFO" + case 9: + return "SNAPSHOT_LOOKUP" case 21: return "VISIT_DIRECTORIES" case 32: @@ -277,7 +206,7 @@ func (k ShardMessageKind) String() string { case 23: return "REMOVE_NON_OWNED_EDGE" case 24: - return "INTRA_SHARD_HARD_FILE_UNLINK" + return "SAME_SHARD_HARD_FILE_UNLINK" case 25: return "REMOVE_SPAN_INITIATE" case 26: @@ -324,12 +253,13 @@ const ( FILE_SPANS ShardMessageKind = 0xD SAME_DIRECTORY_RENAME ShardMessageKind = 0xE SET_DIRECTORY_INFO ShardMessageKind = 0xF + SNAPSHOT_LOOKUP ShardMessageKind = 0x9 VISIT_DIRECTORIES ShardMessageKind = 0x15 VISIT_FILES ShardMessageKind = 0x20 VISIT_TRANSIENT_FILES ShardMessageKind = 0x16 FULL_READ_DIR ShardMessageKind = 0x21 REMOVE_NON_OWNED_EDGE ShardMessageKind = 0x17 - INTRA_SHARD_HARD_FILE_UNLINK ShardMessageKind = 0x18 + SAME_SHARD_HARD_FILE_UNLINK ShardMessageKind = 0x18 REMOVE_SPAN_INITIATE ShardMessageKind = 0x19 REMOVE_SPAN_CERTIFY ShardMessageKind = 0x1A SWAP_BLOCKS ShardMessageKind = 0x22 @@ -345,27 +275,6 @@ const ( MAKE_FILE_TRANSIENT ShardMessageKind = 0x87 ) -func GetCDCMessageKind(body any) CDCMessageKind { - switch body.(type) { - case ErrCode: - return 0 - case *MakeDirectoryReq, *MakeDirectoryResp: - return MAKE_DIRECTORY - case *RenameFileReq, *RenameFileResp: - return RENAME_FILE - case *SoftUnlinkDirectoryReq, *SoftUnlinkDirectoryResp: - return SOFT_UNLINK_DIRECTORY - case *RenameDirectoryReq, *RenameDirectoryResp: - return RENAME_DIRECTORY - case *HardUnlinkDirectoryReq, *HardUnlinkDirectoryResp: - return HARD_UNLINK_DIRECTORY - case *HardUnlinkFileReq, *HardUnlinkFileResp: - return HARD_UNLINK_FILE - default: - panic(fmt.Sprintf("bad shard req/resp body %T", body)) - } -} - func (k CDCMessageKind) String() string { switch k { case 1: @@ -379,7 +288,7 @@ func (k CDCMessageKind) String() string { case 5: return "HARD_UNLINK_DIRECTORY" case 6: - return "HARD_UNLINK_FILE" + return "CROSS_SHARD_HARD_UNLINK_FILE" default: return fmt.Sprintf("CDCMessageKind(%d)", k) } @@ -392,9 +301,13 @@ const ( SOFT_UNLINK_DIRECTORY CDCMessageKind = 0x3 RENAME_DIRECTORY CDCMessageKind = 0x4 HARD_UNLINK_DIRECTORY CDCMessageKind = 0x5 - HARD_UNLINK_FILE CDCMessageKind = 0x6 + CROSS_SHARD_HARD_UNLINK_FILE CDCMessageKind = 0x6 ) +func (v *LookupReq) ShardRequestKind() ShardMessageKind { + return LOOKUP +} + func (v *LookupReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) buf.PackBytes([]byte(v.Name)) @@ -410,6 +323,10 @@ func (v *LookupReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *LookupResp) ShardResponseKind() ShardMessageKind { + return LOOKUP +} + func (v *LookupResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.TargetId)) buf.PackU64(uint64(v.CreationTime)) @@ -425,6 +342,10 @@ func (v *LookupResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *StatFileReq) ShardRequestKind() ShardMessageKind { + return STAT_FILE +} + func (v *StatFileReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) } @@ -436,6 +357,10 @@ func (v *StatFileReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *StatFileResp) ShardResponseKind() ShardMessageKind { + return STAT_FILE +} + func (v *StatFileResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Mtime)) buf.PackU64(uint64(v.Size)) @@ -451,6 +376,10 @@ func (v *StatFileResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *StatTransientFileReq) ShardRequestKind() ShardMessageKind { + return STAT_TRANSIENT_FILE +} + func (v *StatTransientFileReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) } @@ -462,6 +391,10 @@ func (v *StatTransientFileReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *StatTransientFileResp) ShardResponseKind() ShardMessageKind { + return STAT_TRANSIENT_FILE +} + func (v *StatTransientFileResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Mtime)) buf.PackU64(uint64(v.Size)) @@ -481,6 +414,10 @@ func (v *StatTransientFileResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *StatDirectoryReq) ShardRequestKind() ShardMessageKind { + return STAT_DIRECTORY +} + func (v *StatDirectoryReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) } @@ -492,6 +429,10 @@ func (v *StatDirectoryReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *StatDirectoryResp) ShardResponseKind() ShardMessageKind { + return STAT_DIRECTORY +} + func (v *StatDirectoryResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Mtime)) buf.PackU64(uint64(v.Owner)) @@ -511,6 +452,10 @@ func (v *StatDirectoryResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *ReadDirReq) ShardRequestKind() ShardMessageKind { + return READ_DIR +} + func (v *ReadDirReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) buf.PackU64(uint64(v.StartHash)) @@ -526,6 +471,10 @@ func (v *ReadDirReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *ReadDirResp) ShardResponseKind() ShardMessageKind { + return READ_DIR +} + func (v *ReadDirResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.NextHash)) len1 := len(v.Results) @@ -552,6 +501,10 @@ func (v *ReadDirResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *ConstructFileReq) ShardRequestKind() ShardMessageKind { + return CONSTRUCT_FILE +} + func (v *ConstructFileReq) Pack(buf *bincode.Buf) { buf.PackU8(uint8(v.Type)) buf.PackBytes([]byte(v.Note)) @@ -567,6 +520,10 @@ func (v *ConstructFileReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *ConstructFileResp) ShardResponseKind() ShardMessageKind { + return CONSTRUCT_FILE +} + func (v *ConstructFileResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) buf.PackFixedBytes(8, v.Cookie[:]) @@ -582,6 +539,10 @@ func (v *ConstructFileResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *AddSpanInitiateReq) ShardRequestKind() ShardMessageKind { + return ADD_SPAN_INITIATE +} + func (v *AddSpanInitiateReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.FileId)) buf.PackFixedBytes(8, v.Cookie[:]) @@ -655,6 +616,10 @@ func (v *AddSpanInitiateReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *AddSpanInitiateResp) ShardResponseKind() ShardMessageKind { + return ADD_SPAN_INITIATE +} + func (v *AddSpanInitiateResp) Pack(buf *bincode.Buf) { len1 := len(v.Blocks) buf.PackLength(len1) @@ -677,6 +642,10 @@ func (v *AddSpanInitiateResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *AddSpanCertifyReq) ShardRequestKind() ShardMessageKind { + return ADD_SPAN_CERTIFY +} + func (v *AddSpanCertifyReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.FileId)) buf.PackFixedBytes(8, v.Cookie[:]) @@ -711,6 +680,10 @@ func (v *AddSpanCertifyReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *AddSpanCertifyResp) ShardResponseKind() ShardMessageKind { + return ADD_SPAN_CERTIFY +} + func (v *AddSpanCertifyResp) Pack(buf *bincode.Buf) { } @@ -718,6 +691,10 @@ func (v *AddSpanCertifyResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *LinkFileReq) ShardRequestKind() ShardMessageKind { + return LINK_FILE +} + func (v *LinkFileReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.FileId)) buf.PackFixedBytes(8, v.Cookie[:]) @@ -741,17 +718,30 @@ func (v *LinkFileReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *LinkFileResp) ShardResponseKind() ShardMessageKind { + return LINK_FILE +} + func (v *LinkFileResp) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.CreationTime)) } func (v *LinkFileResp) Unpack(buf *bincode.Buf) error { + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } return nil } +func (v *SoftUnlinkFileReq) ShardRequestKind() ShardMessageKind { + return SOFT_UNLINK_FILE +} + func (v *SoftUnlinkFileReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.OwnerId)) buf.PackU64(uint64(v.FileId)) buf.PackBytes([]byte(v.Name)) + buf.PackU64(uint64(v.CreationTime)) } func (v *SoftUnlinkFileReq) Unpack(buf *bincode.Buf) error { @@ -764,9 +754,16 @@ func (v *SoftUnlinkFileReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackString(&v.Name); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } return nil } +func (v *SoftUnlinkFileResp) ShardResponseKind() ShardMessageKind { + return SOFT_UNLINK_FILE +} + func (v *SoftUnlinkFileResp) Pack(buf *bincode.Buf) { } @@ -774,6 +771,10 @@ func (v *SoftUnlinkFileResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *FileSpansReq) ShardRequestKind() ShardMessageKind { + return FILE_SPANS +} + func (v *FileSpansReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.FileId)) buf.PackVarU61(uint64(v.ByteOffset)) @@ -789,6 +790,10 @@ func (v *FileSpansReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *FileSpansResp) ShardResponseKind() ShardMessageKind { + return FILE_SPANS +} + func (v *FileSpansResp) Pack(buf *bincode.Buf) { buf.PackVarU61(uint64(v.NextOffset)) len1 := len(v.BlockServices) @@ -830,10 +835,15 @@ func (v *FileSpansResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *SameDirectoryRenameReq) ShardRequestKind() ShardMessageKind { + return SAME_DIRECTORY_RENAME +} + func (v *SameDirectoryRenameReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.TargetId)) buf.PackU64(uint64(v.DirId)) buf.PackBytes([]byte(v.OldName)) + buf.PackU64(uint64(v.OldCreationTime)) buf.PackBytes([]byte(v.NewName)) } @@ -847,19 +857,34 @@ func (v *SameDirectoryRenameReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackString(&v.OldName); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.OldCreationTime)); err != nil { + return err + } if err := buf.UnpackString(&v.NewName); err != nil { return err } return nil } +func (v *SameDirectoryRenameResp) ShardResponseKind() ShardMessageKind { + return SAME_DIRECTORY_RENAME +} + func (v *SameDirectoryRenameResp) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.NewCreationTime)) } func (v *SameDirectoryRenameResp) Unpack(buf *bincode.Buf) error { + if err := buf.UnpackU64((*uint64)(&v.NewCreationTime)); err != nil { + return err + } return nil } +func (v *SetDirectoryInfoReq) ShardRequestKind() ShardMessageKind { + return SET_DIRECTORY_INFO +} + func (v *SetDirectoryInfoReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) v.Info.Pack(buf) @@ -875,6 +900,10 @@ func (v *SetDirectoryInfoReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *SetDirectoryInfoResp) ShardResponseKind() ShardMessageKind { + return SET_DIRECTORY_INFO +} + func (v *SetDirectoryInfoResp) Pack(buf *bincode.Buf) { } @@ -882,6 +911,63 @@ func (v *SetDirectoryInfoResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *SnapshotLookupReq) ShardRequestKind() ShardMessageKind { + return SNAPSHOT_LOOKUP +} + +func (v *SnapshotLookupReq) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.DirId)) + buf.PackBytes([]byte(v.Name)) + buf.PackU64(uint64(v.StartFrom)) +} + +func (v *SnapshotLookupReq) Unpack(buf *bincode.Buf) error { + if err := buf.UnpackU64((*uint64)(&v.DirId)); err != nil { + return err + } + if err := buf.UnpackString(&v.Name); err != nil { + return err + } + if err := buf.UnpackU64((*uint64)(&v.StartFrom)); err != nil { + return err + } + return nil +} + +func (v *SnapshotLookupResp) ShardResponseKind() ShardMessageKind { + return SNAPSHOT_LOOKUP +} + +func (v *SnapshotLookupResp) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.NextTime)) + len1 := len(v.Edges) + buf.PackLength(len1) + for i := 0; i < len1; i++ { + v.Edges[i].Pack(buf) + } +} + +func (v *SnapshotLookupResp) Unpack(buf *bincode.Buf) error { + if err := buf.UnpackU64((*uint64)(&v.NextTime)); err != nil { + return err + } + var len1 int + if err := buf.UnpackLength(&len1); err != nil { + return err + } + bincode.EnsureLength(&v.Edges, len1) + for i := 0; i < len1; i++ { + if err := v.Edges[i].Unpack(buf); err != nil { + return err + } + } + return nil +} + +func (v *VisitDirectoriesReq) ShardRequestKind() ShardMessageKind { + return VISIT_DIRECTORIES +} + func (v *VisitDirectoriesReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.BeginId)) } @@ -893,6 +979,10 @@ func (v *VisitDirectoriesReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *VisitDirectoriesResp) ShardResponseKind() ShardMessageKind { + return VISIT_DIRECTORIES +} + func (v *VisitDirectoriesResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.NextId)) len1 := len(v.Ids) @@ -919,6 +1009,10 @@ func (v *VisitDirectoriesResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *VisitFilesReq) ShardRequestKind() ShardMessageKind { + return VISIT_FILES +} + func (v *VisitFilesReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.BeginId)) } @@ -930,6 +1024,10 @@ func (v *VisitFilesReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *VisitFilesResp) ShardResponseKind() ShardMessageKind { + return VISIT_FILES +} + func (v *VisitFilesResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.NextId)) len1 := len(v.Ids) @@ -956,6 +1054,10 @@ func (v *VisitFilesResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *VisitTransientFilesReq) ShardRequestKind() ShardMessageKind { + return VISIT_TRANSIENT_FILES +} + func (v *VisitTransientFilesReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.BeginId)) } @@ -967,6 +1069,10 @@ func (v *VisitTransientFilesReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *VisitTransientFilesResp) ShardResponseKind() ShardMessageKind { + return VISIT_TRANSIENT_FILES +} + func (v *VisitTransientFilesResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.NextId)) len1 := len(v.Files) @@ -993,6 +1099,10 @@ func (v *VisitTransientFilesResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *FullReadDirReq) ShardRequestKind() ShardMessageKind { + return FULL_READ_DIR +} + func (v *FullReadDirReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) v.Cursor.Pack(buf) @@ -1008,6 +1118,10 @@ func (v *FullReadDirReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *FullReadDirResp) ShardResponseKind() ShardMessageKind { + return FULL_READ_DIR +} + func (v *FullReadDirResp) Pack(buf *bincode.Buf) { v.Next.Pack(buf) len1 := len(v.Results) @@ -1034,6 +1148,10 @@ func (v *FullReadDirResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveNonOwnedEdgeReq) ShardRequestKind() ShardMessageKind { + return REMOVE_NON_OWNED_EDGE +} + func (v *RemoveNonOwnedEdgeReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) buf.PackU64(uint64(v.TargetId)) @@ -1057,6 +1175,10 @@ func (v *RemoveNonOwnedEdgeReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveNonOwnedEdgeResp) ShardResponseKind() ShardMessageKind { + return REMOVE_NON_OWNED_EDGE +} + func (v *RemoveNonOwnedEdgeResp) Pack(buf *bincode.Buf) { } @@ -1064,14 +1186,18 @@ func (v *RemoveNonOwnedEdgeResp) Unpack(buf *bincode.Buf) error { return nil } -func (v *IntraShardHardFileUnlinkReq) Pack(buf *bincode.Buf) { +func (v *SameShardHardFileUnlinkReq) ShardRequestKind() ShardMessageKind { + return SAME_SHARD_HARD_FILE_UNLINK +} + +func (v *SameShardHardFileUnlinkReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.OwnerId)) buf.PackU64(uint64(v.TargetId)) buf.PackBytes([]byte(v.Name)) buf.PackU64(uint64(v.CreationTime)) } -func (v *IntraShardHardFileUnlinkReq) Unpack(buf *bincode.Buf) error { +func (v *SameShardHardFileUnlinkReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackU64((*uint64)(&v.OwnerId)); err != nil { return err } @@ -1087,13 +1213,21 @@ func (v *IntraShardHardFileUnlinkReq) Unpack(buf *bincode.Buf) error { return nil } -func (v *IntraShardHardFileUnlinkResp) Pack(buf *bincode.Buf) { +func (v *SameShardHardFileUnlinkResp) ShardResponseKind() ShardMessageKind { + return SAME_SHARD_HARD_FILE_UNLINK } -func (v *IntraShardHardFileUnlinkResp) Unpack(buf *bincode.Buf) error { +func (v *SameShardHardFileUnlinkResp) Pack(buf *bincode.Buf) { +} + +func (v *SameShardHardFileUnlinkResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveSpanInitiateReq) ShardRequestKind() ShardMessageKind { + return REMOVE_SPAN_INITIATE +} + func (v *RemoveSpanInitiateReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.FileId)) buf.PackFixedBytes(8, v.Cookie[:]) @@ -1109,6 +1243,10 @@ func (v *RemoveSpanInitiateReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveSpanInitiateResp) ShardResponseKind() ShardMessageKind { + return REMOVE_SPAN_INITIATE +} + func (v *RemoveSpanInitiateResp) Pack(buf *bincode.Buf) { buf.PackVarU61(uint64(v.ByteOffset)) len1 := len(v.Blocks) @@ -1135,6 +1273,10 @@ func (v *RemoveSpanInitiateResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveSpanCertifyReq) ShardRequestKind() ShardMessageKind { + return REMOVE_SPAN_CERTIFY +} + func (v *RemoveSpanCertifyReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.FileId)) buf.PackFixedBytes(8, v.Cookie[:]) @@ -1169,6 +1311,10 @@ func (v *RemoveSpanCertifyReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveSpanCertifyResp) ShardResponseKind() ShardMessageKind { + return REMOVE_SPAN_CERTIFY +} + func (v *RemoveSpanCertifyResp) Pack(buf *bincode.Buf) { } @@ -1176,6 +1322,10 @@ func (v *RemoveSpanCertifyResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *SwapBlocksReq) ShardRequestKind() ShardMessageKind { + return SWAP_BLOCKS +} + func (v *SwapBlocksReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.FileId1)) buf.PackU64(uint64(v.ByteOffset1)) @@ -1207,6 +1357,10 @@ func (v *SwapBlocksReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *SwapBlocksResp) ShardResponseKind() ShardMessageKind { + return SWAP_BLOCKS +} + func (v *SwapBlocksResp) Pack(buf *bincode.Buf) { } @@ -1214,6 +1368,10 @@ func (v *SwapBlocksResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *BlockServiceFilesReq) ShardRequestKind() ShardMessageKind { + return BLOCK_SERVICE_FILES +} + func (v *BlockServiceFilesReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.BlockServiceId)) buf.PackU64(uint64(v.StartFrom)) @@ -1229,6 +1387,10 @@ func (v *BlockServiceFilesReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *BlockServiceFilesResp) ShardResponseKind() ShardMessageKind { + return BLOCK_SERVICE_FILES +} + func (v *BlockServiceFilesResp) Pack(buf *bincode.Buf) { len1 := len(v.FileIds) buf.PackLength(len1) @@ -1251,6 +1413,10 @@ func (v *BlockServiceFilesResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveInodeReq) ShardRequestKind() ShardMessageKind { + return REMOVE_INODE +} + func (v *RemoveInodeReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) } @@ -1262,6 +1428,10 @@ func (v *RemoveInodeReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveInodeResp) ShardResponseKind() ShardMessageKind { + return REMOVE_INODE +} + func (v *RemoveInodeResp) Pack(buf *bincode.Buf) { } @@ -1269,6 +1439,10 @@ func (v *RemoveInodeResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *CreateDirectoryInodeReq) ShardRequestKind() ShardMessageKind { + return CREATE_DIRECTORY_INODE +} + func (v *CreateDirectoryInodeReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) buf.PackU64(uint64(v.OwnerId)) @@ -1288,6 +1462,10 @@ func (v *CreateDirectoryInodeReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *CreateDirectoryInodeResp) ShardResponseKind() ShardMessageKind { + return CREATE_DIRECTORY_INODE +} + func (v *CreateDirectoryInodeResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Mtime)) } @@ -1299,6 +1477,10 @@ func (v *CreateDirectoryInodeResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *SetDirectoryOwnerReq) ShardRequestKind() ShardMessageKind { + return SET_DIRECTORY_OWNER +} + func (v *SetDirectoryOwnerReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) buf.PackU64(uint64(v.OwnerId)) @@ -1314,6 +1496,10 @@ func (v *SetDirectoryOwnerReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *SetDirectoryOwnerResp) ShardResponseKind() ShardMessageKind { + return SET_DIRECTORY_OWNER +} + func (v *SetDirectoryOwnerResp) Pack(buf *bincode.Buf) { } @@ -1321,6 +1507,10 @@ func (v *SetDirectoryOwnerResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveDirectoryOwnerReq) ShardRequestKind() ShardMessageKind { + return REMOVE_DIRECTORY_OWNER +} + func (v *RemoveDirectoryOwnerReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) buf.PackBytes([]byte(v.Info)) @@ -1336,6 +1526,10 @@ func (v *RemoveDirectoryOwnerReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveDirectoryOwnerResp) ShardResponseKind() ShardMessageKind { + return REMOVE_DIRECTORY_OWNER +} + func (v *RemoveDirectoryOwnerResp) Pack(buf *bincode.Buf) { } @@ -1343,11 +1537,14 @@ func (v *RemoveDirectoryOwnerResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *CreateLockedCurrentEdgeReq) ShardRequestKind() ShardMessageKind { + return CREATE_LOCKED_CURRENT_EDGE +} + func (v *CreateLockedCurrentEdgeReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) buf.PackBytes([]byte(v.Name)) buf.PackU64(uint64(v.TargetId)) - buf.PackU64(uint64(v.CreationTime)) } func (v *CreateLockedCurrentEdgeReq) Unpack(buf *bincode.Buf) error { @@ -1360,38 +1557,55 @@ func (v *CreateLockedCurrentEdgeReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackU64((*uint64)(&v.TargetId)); err != nil { return err } + return nil +} + +func (v *CreateLockedCurrentEdgeResp) ShardResponseKind() ShardMessageKind { + return CREATE_LOCKED_CURRENT_EDGE +} + +func (v *CreateLockedCurrentEdgeResp) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.CreationTime)) +} + +func (v *CreateLockedCurrentEdgeResp) Unpack(buf *bincode.Buf) error { if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { return err } return nil } -func (v *CreateLockedCurrentEdgeResp) Pack(buf *bincode.Buf) { -} - -func (v *CreateLockedCurrentEdgeResp) Unpack(buf *bincode.Buf) error { - return nil +func (v *LockCurrentEdgeReq) ShardRequestKind() ShardMessageKind { + return LOCK_CURRENT_EDGE } func (v *LockCurrentEdgeReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) - buf.PackBytes([]byte(v.Name)) buf.PackU64(uint64(v.TargetId)) + buf.PackU64(uint64(v.CreationTime)) + buf.PackBytes([]byte(v.Name)) } func (v *LockCurrentEdgeReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackU64((*uint64)(&v.DirId)); err != nil { return err } - if err := buf.UnpackString(&v.Name); err != nil { - return err - } if err := buf.UnpackU64((*uint64)(&v.TargetId)); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } + if err := buf.UnpackString(&v.Name); err != nil { + return err + } return nil } +func (v *LockCurrentEdgeResp) ShardResponseKind() ShardMessageKind { + return LOCK_CURRENT_EDGE +} + func (v *LockCurrentEdgeResp) Pack(buf *bincode.Buf) { } @@ -1399,9 +1613,14 @@ func (v *LockCurrentEdgeResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *UnlockCurrentEdgeReq) ShardRequestKind() ShardMessageKind { + return UNLOCK_CURRENT_EDGE +} + func (v *UnlockCurrentEdgeReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) buf.PackBytes([]byte(v.Name)) + buf.PackU64(uint64(v.CreationTime)) buf.PackU64(uint64(v.TargetId)) buf.PackBool(bool(v.WasMoved)) } @@ -1413,6 +1632,9 @@ func (v *UnlockCurrentEdgeReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackString(&v.Name); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } if err := buf.UnpackU64((*uint64)(&v.TargetId)); err != nil { return err } @@ -1422,6 +1644,10 @@ func (v *UnlockCurrentEdgeReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *UnlockCurrentEdgeResp) ShardResponseKind() ShardMessageKind { + return UNLOCK_CURRENT_EDGE +} + func (v *UnlockCurrentEdgeResp) Pack(buf *bincode.Buf) { } @@ -1429,6 +1655,10 @@ func (v *UnlockCurrentEdgeResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveOwnedSnapshotFileEdgeReq) ShardRequestKind() ShardMessageKind { + return REMOVE_OWNED_SNAPSHOT_FILE_EDGE +} + func (v *RemoveOwnedSnapshotFileEdgeReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.OwnerId)) buf.PackU64(uint64(v.TargetId)) @@ -1452,6 +1682,10 @@ func (v *RemoveOwnedSnapshotFileEdgeReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RemoveOwnedSnapshotFileEdgeResp) ShardResponseKind() ShardMessageKind { + return REMOVE_OWNED_SNAPSHOT_FILE_EDGE +} + func (v *RemoveOwnedSnapshotFileEdgeResp) Pack(buf *bincode.Buf) { } @@ -1459,6 +1693,10 @@ func (v *RemoveOwnedSnapshotFileEdgeResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *MakeFileTransientReq) ShardRequestKind() ShardMessageKind { + return MAKE_FILE_TRANSIENT +} + func (v *MakeFileTransientReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) buf.PackBytes([]byte(v.Note)) @@ -1474,6 +1712,10 @@ func (v *MakeFileTransientReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *MakeFileTransientResp) ShardResponseKind() ShardMessageKind { + return MAKE_FILE_TRANSIENT +} + func (v *MakeFileTransientResp) Pack(buf *bincode.Buf) { } @@ -1481,6 +1723,10 @@ func (v *MakeFileTransientResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *MakeDirectoryReq) CDCRequestKind() CDCMessageKind { + return MAKE_DIRECTORY +} + func (v *MakeDirectoryReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.OwnerId)) buf.PackBytes([]byte(v.Name)) @@ -1500,21 +1746,34 @@ func (v *MakeDirectoryReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *MakeDirectoryResp) CDCResponseKind() CDCMessageKind { + return MAKE_DIRECTORY +} + func (v *MakeDirectoryResp) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.Id)) + buf.PackU64(uint64(v.CreationTime)) } func (v *MakeDirectoryResp) Unpack(buf *bincode.Buf) error { if err := buf.UnpackU64((*uint64)(&v.Id)); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } return nil } +func (v *RenameFileReq) CDCRequestKind() CDCMessageKind { + return RENAME_FILE +} + func (v *RenameFileReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.TargetId)) buf.PackU64(uint64(v.OldOwnerId)) buf.PackBytes([]byte(v.OldName)) + buf.PackU64(uint64(v.OldCreationTime)) buf.PackU64(uint64(v.NewOwnerId)) buf.PackBytes([]byte(v.NewName)) } @@ -1529,6 +1788,9 @@ func (v *RenameFileReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackString(&v.OldName); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.OldCreationTime)); err != nil { + return err + } if err := buf.UnpackU64((*uint64)(&v.NewOwnerId)); err != nil { return err } @@ -1538,16 +1800,29 @@ func (v *RenameFileReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RenameFileResp) CDCResponseKind() CDCMessageKind { + return RENAME_FILE +} + func (v *RenameFileResp) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.CreationTime)) } func (v *RenameFileResp) Unpack(buf *bincode.Buf) error { + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } return nil } +func (v *SoftUnlinkDirectoryReq) CDCRequestKind() CDCMessageKind { + return SOFT_UNLINK_DIRECTORY +} + func (v *SoftUnlinkDirectoryReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.OwnerId)) buf.PackU64(uint64(v.TargetId)) + buf.PackU64(uint64(v.CreationTime)) buf.PackBytes([]byte(v.Name)) } @@ -1558,12 +1833,19 @@ func (v *SoftUnlinkDirectoryReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackU64((*uint64)(&v.TargetId)); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } if err := buf.UnpackString(&v.Name); err != nil { return err } return nil } +func (v *SoftUnlinkDirectoryResp) CDCResponseKind() CDCMessageKind { + return SOFT_UNLINK_DIRECTORY +} + func (v *SoftUnlinkDirectoryResp) Pack(buf *bincode.Buf) { } @@ -1571,10 +1853,15 @@ func (v *SoftUnlinkDirectoryResp) Unpack(buf *bincode.Buf) error { return nil } +func (v *RenameDirectoryReq) CDCRequestKind() CDCMessageKind { + return RENAME_DIRECTORY +} + func (v *RenameDirectoryReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.TargetId)) buf.PackU64(uint64(v.OldOwnerId)) buf.PackBytes([]byte(v.OldName)) + buf.PackU64(uint64(v.OldCreationTime)) buf.PackU64(uint64(v.NewOwnerId)) buf.PackBytes([]byte(v.NewName)) } @@ -1589,6 +1876,9 @@ func (v *RenameDirectoryReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackString(&v.OldName); err != nil { return err } + if err := buf.UnpackU64((*uint64)(&v.OldCreationTime)); err != nil { + return err + } if err := buf.UnpackU64((*uint64)(&v.NewOwnerId)); err != nil { return err } @@ -1598,13 +1888,25 @@ func (v *RenameDirectoryReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *RenameDirectoryResp) CDCResponseKind() CDCMessageKind { + return RENAME_DIRECTORY +} + func (v *RenameDirectoryResp) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.CreationTime)) } func (v *RenameDirectoryResp) Unpack(buf *bincode.Buf) error { + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } return nil } +func (v *HardUnlinkDirectoryReq) CDCRequestKind() CDCMessageKind { + return HARD_UNLINK_DIRECTORY +} + func (v *HardUnlinkDirectoryReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.DirId)) } @@ -1616,6 +1918,10 @@ func (v *HardUnlinkDirectoryReq) Unpack(buf *bincode.Buf) error { return nil } +func (v *HardUnlinkDirectoryResp) CDCResponseKind() CDCMessageKind { + return HARD_UNLINK_DIRECTORY +} + func (v *HardUnlinkDirectoryResp) Pack(buf *bincode.Buf) { } @@ -1623,14 +1929,18 @@ func (v *HardUnlinkDirectoryResp) Unpack(buf *bincode.Buf) error { return nil } -func (v *HardUnlinkFileReq) Pack(buf *bincode.Buf) { +func (v *CrossShardHardUnlinkFileReq) CDCRequestKind() CDCMessageKind { + return CROSS_SHARD_HARD_UNLINK_FILE +} + +func (v *CrossShardHardUnlinkFileReq) Pack(buf *bincode.Buf) { buf.PackU64(uint64(v.OwnerId)) buf.PackU64(uint64(v.TargetId)) buf.PackBytes([]byte(v.Name)) buf.PackU64(uint64(v.CreationTime)) } -func (v *HardUnlinkFileReq) Unpack(buf *bincode.Buf) error { +func (v *CrossShardHardUnlinkFileReq) Unpack(buf *bincode.Buf) error { if err := buf.UnpackU64((*uint64)(&v.OwnerId)); err != nil { return err } @@ -1646,10 +1956,14 @@ func (v *HardUnlinkFileReq) Unpack(buf *bincode.Buf) error { return nil } -func (v *HardUnlinkFileResp) Pack(buf *bincode.Buf) { +func (v *CrossShardHardUnlinkFileResp) CDCResponseKind() CDCMessageKind { + return CROSS_SHARD_HARD_UNLINK_FILE } -func (v *HardUnlinkFileResp) Unpack(buf *bincode.Buf) error { +func (v *CrossShardHardUnlinkFileResp) Pack(buf *bincode.Buf) { +} + +func (v *CrossShardHardUnlinkFileResp) Unpack(buf *bincode.Buf) error { return nil } @@ -2023,3 +2337,18 @@ func (v *EntryNewBlockInfo) Unpack(buf *bincode.Buf) error { return nil } +func (v *SnapshotLookupEdge) Pack(buf *bincode.Buf) { + buf.PackU64(uint64(v.TargetId)) + buf.PackU64(uint64(v.CreationTime)) +} + +func (v *SnapshotLookupEdge) Unpack(buf *bincode.Buf) error { + if err := buf.UnpackU64((*uint64)(&v.TargetId)); err != nil { + return err + } + if err := buf.UnpackU64((*uint64)(&v.CreationTime)); err != nil { + return err + } + return nil +} + diff --git a/go/runeggs/runeggs.go b/go/runeggs/runeggs.go index 13b4a5c8..ac072e73 100644 --- a/go/runeggs/runeggs.go +++ b/go/runeggs/runeggs.go @@ -12,6 +12,13 @@ import ( "xtx/eggsfs/msgs" ) +func noRunawayArgs() { + if flag.NArg() > 0 { + fmt.Fprintf(os.Stderr, "Unexpected extra arguments %v\n", flag.Args()) + os.Exit(2) + } +} + func main() { dataDir := flag.String("dir", "", "Directory where to store all the databases. If not present a tmp dir will be used.") valgrind := flag.Bool("valgrind", false, "Whether to build/run with valgrind.") @@ -21,6 +28,7 @@ func main() { hddBlockServices := flag.Uint("hdd-block-services", 10, "Number of HDD block services (default 10).") flashBlockServices := flag.Uint("flash-block-services", 5, "Number of HDD block services (default 5).") flag.Parse() + noRunawayArgs() if *verbose && !*debug { panic("You asked me to build without -debug, and with -verbose. This is almost certainly wrong.") diff --git a/go/shuckle/shuckle.go b/go/shuckle/shuckle.go index 61602670..09898adf 100644 --- a/go/shuckle/shuckle.go +++ b/go/shuckle/shuckle.go @@ -78,11 +78,19 @@ func handleRegisterBlockService(ll eggs.LogLevels, bss *blockServices, w http.Re bss.services[bs.Id] = &bs } +func noRunawayArgs() { + if flag.NArg() > 0 { + fmt.Fprintf(os.Stderr, "Unexpected extra arguments %v\n", flag.Args()) + os.Exit(2) + } +} + func main() { port := flag.Uint("port", 5000, "Port on which to run on.") logFile := flag.String("log-file", "", "File in which to write logs (or stdout)") verbose := flag.Bool("verbose", false, "") flag.Parse() + noRunawayArgs() logOut := os.Stdout if *logFile != "" { diff --git a/python/error.py b/python/error.py index a888844a..cd1d5ec8 100644 --- a/python/error.py +++ b/python/error.py @@ -44,8 +44,6 @@ ERR_CODE_TO_ERRNO: Dict[ErrCode, int] = { ErrCode.BAD_BLOCK_PROOF: errno.EINVAL, ErrCode.CANNOT_OVERRIDE_NAME: errno.EEXIST, ErrCode.NAME_IS_LOCKED: errno.EEXIST, - ErrCode.OLD_NAME_IS_LOCKED: errno.EBUSY, - ErrCode.NEW_NAME_IS_LOCKED: errno.EBUSY, ErrCode.MTIME_IS_TOO_RECENT: errno.EBUSY, # reasonable? ErrCode.MISMATCHING_TARGET: errno.EINVAL, ErrCode.MISMATCHING_OWNER: errno.EINVAL, diff --git a/python/msgs.py b/python/msgs.py index b7f7fb23..9a3d0e6d 100644 --- a/python/msgs.py +++ b/python/msgs.py @@ -18,31 +18,31 @@ class ErrCode(enum.IntEnum): FILE_NOT_FOUND = 17 DIRECTORY_NOT_FOUND = 18 NAME_NOT_FOUND = 19 - TYPE_IS_DIRECTORY = 20 - TYPE_IS_NOT_DIRECTORY = 21 - BAD_COOKIE = 22 - INCONSISTENT_STORAGE_CLASS_PARITY = 23 - LAST_SPAN_STATE_NOT_CLEAN = 24 - COULD_NOT_PICK_BLOCK_SERVICES = 25 - BAD_SPAN_BODY = 26 - SPAN_NOT_FOUND = 27 - BLOCK_SERVICE_NOT_FOUND = 28 - CANNOT_CERTIFY_BLOCKLESS_SPAN = 29 - BAD_NUMBER_OF_BLOCKS_PROOFS = 30 - BAD_BLOCK_PROOF = 31 - CANNOT_OVERRIDE_NAME = 32 - NAME_IS_LOCKED = 33 - OLD_NAME_IS_LOCKED = 34 - NEW_NAME_IS_LOCKED = 35 + EDGE_NOT_FOUND = 20 + EDGE_IS_LOCKED = 21 + TYPE_IS_DIRECTORY = 22 + TYPE_IS_NOT_DIRECTORY = 23 + BAD_COOKIE = 24 + INCONSISTENT_STORAGE_CLASS_PARITY = 25 + LAST_SPAN_STATE_NOT_CLEAN = 26 + COULD_NOT_PICK_BLOCK_SERVICES = 27 + BAD_SPAN_BODY = 28 + SPAN_NOT_FOUND = 29 + BLOCK_SERVICE_NOT_FOUND = 30 + CANNOT_CERTIFY_BLOCKLESS_SPAN = 31 + BAD_NUMBER_OF_BLOCKS_PROOFS = 32 + BAD_BLOCK_PROOF = 33 + CANNOT_OVERRIDE_NAME = 34 + NAME_IS_LOCKED = 35 MTIME_IS_TOO_RECENT = 36 MISMATCHING_TARGET = 37 MISMATCHING_OWNER = 38 - DIRECTORY_NOT_EMPTY = 39 - FILE_IS_TRANSIENT = 40 - OLD_DIRECTORY_NOT_FOUND = 41 - NEW_DIRECTORY_NOT_FOUND = 42 - LOOP_IN_DIRECTORY_RENAME = 43 - EDGE_NOT_FOUND = 44 + MISMATCHING_CREATION_TIME = 39 + DIRECTORY_NOT_EMPTY = 40 + FILE_IS_TRANSIENT = 41 + OLD_DIRECTORY_NOT_FOUND = 42 + NEW_DIRECTORY_NOT_FOUND = 43 + LOOP_IN_DIRECTORY_RENAME = 44 DIRECTORY_HAS_OWNER = 45 FILE_IS_NOT_TRANSIENT = 46 FILE_NOT_EMPTY = 47 @@ -54,10 +54,10 @@ class ErrCode(enum.IntEnum): MORE_RECENT_SNAPSHOT_EDGE = 53 MORE_RECENT_CURRENT_EDGE = 54 BAD_DIRECTORY_INFO = 55 - CREATION_TIME_TOO_RECENT = 56 - DEADLINE_NOT_PASSED = 57 - SAME_SOURCE_AND_DESTINATION = 58 - SAME_DIRECTORIES = 59 + DEADLINE_NOT_PASSED = 56 + SAME_SOURCE_AND_DESTINATION = 57 + SAME_DIRECTORIES = 58 + SAME_SHARD = 59 class ShardMessageKind(enum.IntEnum): LOOKUP = 0x1 @@ -73,12 +73,13 @@ class ShardMessageKind(enum.IntEnum): FILE_SPANS = 0xD SAME_DIRECTORY_RENAME = 0xE SET_DIRECTORY_INFO = 0xF + SNAPSHOT_LOOKUP = 0x9 VISIT_DIRECTORIES = 0x15 VISIT_FILES = 0x20 VISIT_TRANSIENT_FILES = 0x16 FULL_READ_DIR = 0x21 REMOVE_NON_OWNED_EDGE = 0x17 - INTRA_SHARD_HARD_FILE_UNLINK = 0x18 + SAME_SHARD_HARD_FILE_UNLINK = 0x18 REMOVE_SPAN_INITIATE = 0x19 REMOVE_SPAN_CERTIFY = 0x1A SWAP_BLOCKS = 0x22 @@ -99,7 +100,7 @@ class CDCMessageKind(enum.IntEnum): SOFT_UNLINK_DIRECTORY = 0x3 RENAME_DIRECTORY = 0x4 HARD_UNLINK_DIRECTORY = 0x5 - HARD_UNLINK_FILE = 0x6 + CROSS_SHARD_HARD_UNLINK_FILE = 0x6 @dataclass class TransientFile(bincode.Packable): @@ -594,6 +595,29 @@ class EntryNewBlockInfo(bincode.Packable): _size += 4 # crc32 return _size +@dataclass +class SnapshotLookupEdge(bincode.Packable): + STATIC_SIZE: ClassVar[int] = 8 + 8 # target_id + creation_time + target_id: InodeIdWithExtra + creation_time: int + + def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.target_id, b) + bincode.pack_u64_into(self.creation_time, b) + return None + + @staticmethod + def unpack(u: bincode.UnpackWrapper) -> 'SnapshotLookupEdge': + target_id = InodeIdWithExtra(bincode.unpack_u64(u)) + creation_time = bincode.unpack_u64(u) + return SnapshotLookupEdge(target_id, creation_time) + + def calc_packed_size(self) -> int: + _size = 0 + _size += 8 # target_id + _size += 8 # creation_time + return _size + @dataclass class LookupReq(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.LOOKUP @@ -1077,31 +1101,37 @@ class LinkFileReq(bincode.Packable): @dataclass class LinkFileResp(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.LINK_FILE - STATIC_SIZE: ClassVar[int] = 0 # + STATIC_SIZE: ClassVar[int] = 8 # creation_time + creation_time: int def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.creation_time, b) return None @staticmethod def unpack(u: bincode.UnpackWrapper) -> 'LinkFileResp': - return LinkFileResp() + creation_time = bincode.unpack_u64(u) + return LinkFileResp(creation_time) def calc_packed_size(self) -> int: _size = 0 + _size += 8 # creation_time return _size @dataclass class SoftUnlinkFileReq(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.SOFT_UNLINK_FILE - STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 # owner_id + file_id + len(name) + STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 # owner_id + file_id + len(name) + creation_time owner_id: int file_id: int name: bytes + creation_time: int def pack_into(self, b: bytearray) -> None: bincode.pack_u64_into(self.owner_id, b) bincode.pack_u64_into(self.file_id, b) bincode.pack_bytes_into(self.name, b) + bincode.pack_u64_into(self.creation_time, b) return None @staticmethod @@ -1109,7 +1139,8 @@ class SoftUnlinkFileReq(bincode.Packable): owner_id = bincode.unpack_u64(u) file_id = bincode.unpack_u64(u) name = bincode.unpack_bytes(u) - return SoftUnlinkFileReq(owner_id, file_id, name) + creation_time = bincode.unpack_u64(u) + return SoftUnlinkFileReq(owner_id, file_id, name, creation_time) def calc_packed_size(self) -> int: _size = 0 @@ -1117,6 +1148,7 @@ class SoftUnlinkFileReq(bincode.Packable): _size += 8 # file_id _size += 1 # len(name) _size += len(self.name) # name contents + _size += 8 # creation_time return _size @dataclass @@ -1202,16 +1234,18 @@ class FileSpansResp(bincode.Packable): @dataclass class SameDirectoryRenameReq(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.SAME_DIRECTORY_RENAME - STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 1 # target_id + dir_id + len(old_name) + len(new_name) + STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 + 1 # target_id + dir_id + len(old_name) + old_creation_time + len(new_name) target_id: int dir_id: int old_name: bytes + old_creation_time: int new_name: bytes def pack_into(self, b: bytearray) -> None: bincode.pack_u64_into(self.target_id, b) bincode.pack_u64_into(self.dir_id, b) bincode.pack_bytes_into(self.old_name, b) + bincode.pack_u64_into(self.old_creation_time, b) bincode.pack_bytes_into(self.new_name, b) return None @@ -1220,8 +1254,9 @@ class SameDirectoryRenameReq(bincode.Packable): target_id = bincode.unpack_u64(u) dir_id = bincode.unpack_u64(u) old_name = bincode.unpack_bytes(u) + old_creation_time = bincode.unpack_u64(u) new_name = bincode.unpack_bytes(u) - return SameDirectoryRenameReq(target_id, dir_id, old_name, new_name) + return SameDirectoryRenameReq(target_id, dir_id, old_name, old_creation_time, new_name) def calc_packed_size(self) -> int: _size = 0 @@ -1229,6 +1264,7 @@ class SameDirectoryRenameReq(bincode.Packable): _size += 8 # dir_id _size += 1 # len(old_name) _size += len(self.old_name) # old_name contents + _size += 8 # old_creation_time _size += 1 # len(new_name) _size += len(self.new_name) # new_name contents return _size @@ -1236,17 +1272,21 @@ class SameDirectoryRenameReq(bincode.Packable): @dataclass class SameDirectoryRenameResp(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.SAME_DIRECTORY_RENAME - STATIC_SIZE: ClassVar[int] = 0 # + STATIC_SIZE: ClassVar[int] = 8 # new_creation_time + new_creation_time: int def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.new_creation_time, b) return None @staticmethod def unpack(u: bincode.UnpackWrapper) -> 'SameDirectoryRenameResp': - return SameDirectoryRenameResp() + new_creation_time = bincode.unpack_u64(u) + return SameDirectoryRenameResp(new_creation_time) def calc_packed_size(self) -> int: _size = 0 + _size += 8 # new_creation_time return _size @dataclass @@ -1289,6 +1329,65 @@ class SetDirectoryInfoResp(bincode.Packable): _size = 0 return _size +@dataclass +class SnapshotLookupReq(bincode.Packable): + KIND: ClassVar[ShardMessageKind] = ShardMessageKind.SNAPSHOT_LOOKUP + STATIC_SIZE: ClassVar[int] = 8 + 1 + 8 # dir_id + len(name) + start_from + dir_id: int + name: bytes + start_from: int + + def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.dir_id, b) + bincode.pack_bytes_into(self.name, b) + bincode.pack_u64_into(self.start_from, b) + return None + + @staticmethod + def unpack(u: bincode.UnpackWrapper) -> 'SnapshotLookupReq': + dir_id = bincode.unpack_u64(u) + name = bincode.unpack_bytes(u) + start_from = bincode.unpack_u64(u) + return SnapshotLookupReq(dir_id, name, start_from) + + def calc_packed_size(self) -> int: + _size = 0 + _size += 8 # dir_id + _size += 1 # len(name) + _size += len(self.name) # name contents + _size += 8 # start_from + return _size + +@dataclass +class SnapshotLookupResp(bincode.Packable): + KIND: ClassVar[ShardMessageKind] = ShardMessageKind.SNAPSHOT_LOOKUP + STATIC_SIZE: ClassVar[int] = 8 + 2 # next_time + len(edges) + next_time: int + edges: List[SnapshotLookupEdge] + + def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.next_time, b) + bincode.pack_u16_into(len(self.edges), b) + for i in range(len(self.edges)): + self.edges[i].pack_into(b) + return None + + @staticmethod + def unpack(u: bincode.UnpackWrapper) -> 'SnapshotLookupResp': + next_time = bincode.unpack_u64(u) + edges: List[Any] = [None]*bincode.unpack_u16(u) + for i in range(len(edges)): + edges[i] = SnapshotLookupEdge.unpack(u) + return SnapshotLookupResp(next_time, edges) + + def calc_packed_size(self) -> int: + _size = 0 + _size += 8 # next_time + _size += 2 # len(edges) + for i in range(len(self.edges)): + _size += self.edges[i].calc_packed_size() # edges[i] + return _size + @dataclass class VisitDirectoriesReq(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.VISIT_DIRECTORIES @@ -1543,8 +1642,8 @@ class RemoveNonOwnedEdgeResp(bincode.Packable): return _size @dataclass -class IntraShardHardFileUnlinkReq(bincode.Packable): - KIND: ClassVar[ShardMessageKind] = ShardMessageKind.INTRA_SHARD_HARD_FILE_UNLINK +class SameShardHardFileUnlinkReq(bincode.Packable): + KIND: ClassVar[ShardMessageKind] = ShardMessageKind.SAME_SHARD_HARD_FILE_UNLINK STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 # owner_id + target_id + len(name) + creation_time owner_id: int target_id: int @@ -1559,12 +1658,12 @@ class IntraShardHardFileUnlinkReq(bincode.Packable): return None @staticmethod - def unpack(u: bincode.UnpackWrapper) -> 'IntraShardHardFileUnlinkReq': + def unpack(u: bincode.UnpackWrapper) -> 'SameShardHardFileUnlinkReq': owner_id = bincode.unpack_u64(u) target_id = bincode.unpack_u64(u) name = bincode.unpack_bytes(u) creation_time = bincode.unpack_u64(u) - return IntraShardHardFileUnlinkReq(owner_id, target_id, name, creation_time) + return SameShardHardFileUnlinkReq(owner_id, target_id, name, creation_time) def calc_packed_size(self) -> int: _size = 0 @@ -1576,16 +1675,16 @@ class IntraShardHardFileUnlinkReq(bincode.Packable): return _size @dataclass -class IntraShardHardFileUnlinkResp(bincode.Packable): - KIND: ClassVar[ShardMessageKind] = ShardMessageKind.INTRA_SHARD_HARD_FILE_UNLINK +class SameShardHardFileUnlinkResp(bincode.Packable): + KIND: ClassVar[ShardMessageKind] = ShardMessageKind.SAME_SHARD_HARD_FILE_UNLINK STATIC_SIZE: ClassVar[int] = 0 # def pack_into(self, b: bytearray) -> None: return None @staticmethod - def unpack(u: bincode.UnpackWrapper) -> 'IntraShardHardFileUnlinkResp': - return IntraShardHardFileUnlinkResp() + def unpack(u: bincode.UnpackWrapper) -> 'SameShardHardFileUnlinkResp': + return SameShardHardFileUnlinkResp() def calc_packed_size(self) -> int: _size = 0 @@ -1973,55 +2072,6 @@ class RemoveDirectoryOwnerResp(bincode.Packable): @dataclass class CreateLockedCurrentEdgeReq(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.CREATE_LOCKED_CURRENT_EDGE - STATIC_SIZE: ClassVar[int] = 8 + 1 + 8 + 8 # dir_id + len(name) + target_id + creation_time - dir_id: int - name: bytes - target_id: int - creation_time: int - - def pack_into(self, b: bytearray) -> None: - bincode.pack_u64_into(self.dir_id, b) - bincode.pack_bytes_into(self.name, b) - bincode.pack_u64_into(self.target_id, b) - bincode.pack_u64_into(self.creation_time, b) - return None - - @staticmethod - def unpack(u: bincode.UnpackWrapper) -> 'CreateLockedCurrentEdgeReq': - dir_id = bincode.unpack_u64(u) - name = bincode.unpack_bytes(u) - target_id = bincode.unpack_u64(u) - creation_time = bincode.unpack_u64(u) - return CreateLockedCurrentEdgeReq(dir_id, name, target_id, creation_time) - - def calc_packed_size(self) -> int: - _size = 0 - _size += 8 # dir_id - _size += 1 # len(name) - _size += len(self.name) # name contents - _size += 8 # target_id - _size += 8 # creation_time - return _size - -@dataclass -class CreateLockedCurrentEdgeResp(bincode.Packable): - KIND: ClassVar[ShardMessageKind] = ShardMessageKind.CREATE_LOCKED_CURRENT_EDGE - STATIC_SIZE: ClassVar[int] = 0 # - - def pack_into(self, b: bytearray) -> None: - return None - - @staticmethod - def unpack(u: bincode.UnpackWrapper) -> 'CreateLockedCurrentEdgeResp': - return CreateLockedCurrentEdgeResp() - - def calc_packed_size(self) -> int: - _size = 0 - return _size - -@dataclass -class LockCurrentEdgeReq(bincode.Packable): - KIND: ClassVar[ShardMessageKind] = ShardMessageKind.LOCK_CURRENT_EDGE STATIC_SIZE: ClassVar[int] = 8 + 1 + 8 # dir_id + len(name) + target_id dir_id: int name: bytes @@ -2034,11 +2084,11 @@ class LockCurrentEdgeReq(bincode.Packable): return None @staticmethod - def unpack(u: bincode.UnpackWrapper) -> 'LockCurrentEdgeReq': + def unpack(u: bincode.UnpackWrapper) -> 'CreateLockedCurrentEdgeReq': dir_id = bincode.unpack_u64(u) name = bincode.unpack_bytes(u) target_id = bincode.unpack_u64(u) - return LockCurrentEdgeReq(dir_id, name, target_id) + return CreateLockedCurrentEdgeReq(dir_id, name, target_id) def calc_packed_size(self) -> int: _size = 0 @@ -2048,6 +2098,59 @@ class LockCurrentEdgeReq(bincode.Packable): _size += 8 # target_id return _size +@dataclass +class CreateLockedCurrentEdgeResp(bincode.Packable): + KIND: ClassVar[ShardMessageKind] = ShardMessageKind.CREATE_LOCKED_CURRENT_EDGE + STATIC_SIZE: ClassVar[int] = 8 # creation_time + creation_time: int + + def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.creation_time, b) + return None + + @staticmethod + def unpack(u: bincode.UnpackWrapper) -> 'CreateLockedCurrentEdgeResp': + creation_time = bincode.unpack_u64(u) + return CreateLockedCurrentEdgeResp(creation_time) + + def calc_packed_size(self) -> int: + _size = 0 + _size += 8 # creation_time + return _size + +@dataclass +class LockCurrentEdgeReq(bincode.Packable): + KIND: ClassVar[ShardMessageKind] = ShardMessageKind.LOCK_CURRENT_EDGE + STATIC_SIZE: ClassVar[int] = 8 + 8 + 8 + 1 # dir_id + target_id + creation_time + len(name) + dir_id: int + target_id: int + creation_time: int + name: bytes + + def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.dir_id, b) + bincode.pack_u64_into(self.target_id, b) + bincode.pack_u64_into(self.creation_time, b) + bincode.pack_bytes_into(self.name, b) + return None + + @staticmethod + def unpack(u: bincode.UnpackWrapper) -> 'LockCurrentEdgeReq': + dir_id = bincode.unpack_u64(u) + target_id = bincode.unpack_u64(u) + creation_time = bincode.unpack_u64(u) + name = bincode.unpack_bytes(u) + return LockCurrentEdgeReq(dir_id, target_id, creation_time, name) + + def calc_packed_size(self) -> int: + _size = 0 + _size += 8 # dir_id + _size += 8 # target_id + _size += 8 # creation_time + _size += 1 # len(name) + _size += len(self.name) # name contents + return _size + @dataclass class LockCurrentEdgeResp(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.LOCK_CURRENT_EDGE @@ -2067,15 +2170,17 @@ class LockCurrentEdgeResp(bincode.Packable): @dataclass class UnlockCurrentEdgeReq(bincode.Packable): KIND: ClassVar[ShardMessageKind] = ShardMessageKind.UNLOCK_CURRENT_EDGE - STATIC_SIZE: ClassVar[int] = 8 + 1 + 8 + 1 # dir_id + len(name) + target_id + was_moved + STATIC_SIZE: ClassVar[int] = 8 + 1 + 8 + 8 + 1 # dir_id + len(name) + creation_time + target_id + was_moved dir_id: int name: bytes + creation_time: int target_id: int was_moved: bool def pack_into(self, b: bytearray) -> None: bincode.pack_u64_into(self.dir_id, b) bincode.pack_bytes_into(self.name, b) + bincode.pack_u64_into(self.creation_time, b) bincode.pack_u64_into(self.target_id, b) bincode.pack_u8_into(self.was_moved, b) return None @@ -2084,15 +2189,17 @@ class UnlockCurrentEdgeReq(bincode.Packable): def unpack(u: bincode.UnpackWrapper) -> 'UnlockCurrentEdgeReq': dir_id = bincode.unpack_u64(u) name = bincode.unpack_bytes(u) + creation_time = bincode.unpack_u64(u) target_id = bincode.unpack_u64(u) was_moved = bool(bincode.unpack_u8(u)) - return UnlockCurrentEdgeReq(dir_id, name, target_id, was_moved) + return UnlockCurrentEdgeReq(dir_id, name, creation_time, target_id, was_moved) def calc_packed_size(self) -> int: _size = 0 _size += 8 # dir_id _size += 1 # len(name) _size += len(self.name) # name contents + _size += 8 # creation_time _size += 8 # target_id _size += 1 # was_moved return _size @@ -2235,30 +2342,35 @@ class MakeDirectoryReq(bincode.Packable): @dataclass class MakeDirectoryResp(bincode.Packable): KIND: ClassVar[CDCMessageKind] = CDCMessageKind.MAKE_DIRECTORY - STATIC_SIZE: ClassVar[int] = 8 # id + STATIC_SIZE: ClassVar[int] = 8 + 8 # id + creation_time id: int + creation_time: int def pack_into(self, b: bytearray) -> None: bincode.pack_u64_into(self.id, b) + bincode.pack_u64_into(self.creation_time, b) return None @staticmethod def unpack(u: bincode.UnpackWrapper) -> 'MakeDirectoryResp': id = bincode.unpack_u64(u) - return MakeDirectoryResp(id) + creation_time = bincode.unpack_u64(u) + return MakeDirectoryResp(id, creation_time) def calc_packed_size(self) -> int: _size = 0 _size += 8 # id + _size += 8 # creation_time return _size @dataclass class RenameFileReq(bincode.Packable): KIND: ClassVar[CDCMessageKind] = CDCMessageKind.RENAME_FILE - STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 + 1 # target_id + old_owner_id + len(old_name) + new_owner_id + len(new_name) + STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 + 8 + 1 # target_id + old_owner_id + len(old_name) + old_creation_time + new_owner_id + len(new_name) target_id: int old_owner_id: int old_name: bytes + old_creation_time: int new_owner_id: int new_name: bytes @@ -2266,6 +2378,7 @@ class RenameFileReq(bincode.Packable): bincode.pack_u64_into(self.target_id, b) bincode.pack_u64_into(self.old_owner_id, b) bincode.pack_bytes_into(self.old_name, b) + bincode.pack_u64_into(self.old_creation_time, b) bincode.pack_u64_into(self.new_owner_id, b) bincode.pack_bytes_into(self.new_name, b) return None @@ -2275,9 +2388,10 @@ class RenameFileReq(bincode.Packable): target_id = bincode.unpack_u64(u) old_owner_id = bincode.unpack_u64(u) old_name = bincode.unpack_bytes(u) + old_creation_time = bincode.unpack_u64(u) new_owner_id = bincode.unpack_u64(u) new_name = bincode.unpack_bytes(u) - return RenameFileReq(target_id, old_owner_id, old_name, new_owner_id, new_name) + return RenameFileReq(target_id, old_owner_id, old_name, old_creation_time, new_owner_id, new_name) def calc_packed_size(self) -> int: _size = 0 @@ -2285,6 +2399,7 @@ class RenameFileReq(bincode.Packable): _size += 8 # old_owner_id _size += 1 # len(old_name) _size += len(self.old_name) # old_name contents + _size += 8 # old_creation_time _size += 8 # new_owner_id _size += 1 # len(new_name) _size += len(self.new_name) # new_name contents @@ -2293,30 +2408,36 @@ class RenameFileReq(bincode.Packable): @dataclass class RenameFileResp(bincode.Packable): KIND: ClassVar[CDCMessageKind] = CDCMessageKind.RENAME_FILE - STATIC_SIZE: ClassVar[int] = 0 # + STATIC_SIZE: ClassVar[int] = 8 # creation_time + creation_time: int def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.creation_time, b) return None @staticmethod def unpack(u: bincode.UnpackWrapper) -> 'RenameFileResp': - return RenameFileResp() + creation_time = bincode.unpack_u64(u) + return RenameFileResp(creation_time) def calc_packed_size(self) -> int: _size = 0 + _size += 8 # creation_time return _size @dataclass class SoftUnlinkDirectoryReq(bincode.Packable): KIND: ClassVar[CDCMessageKind] = CDCMessageKind.SOFT_UNLINK_DIRECTORY - STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 # owner_id + target_id + len(name) + STATIC_SIZE: ClassVar[int] = 8 + 8 + 8 + 1 # owner_id + target_id + creation_time + len(name) owner_id: int target_id: int + creation_time: int name: bytes def pack_into(self, b: bytearray) -> None: bincode.pack_u64_into(self.owner_id, b) bincode.pack_u64_into(self.target_id, b) + bincode.pack_u64_into(self.creation_time, b) bincode.pack_bytes_into(self.name, b) return None @@ -2324,13 +2445,15 @@ class SoftUnlinkDirectoryReq(bincode.Packable): def unpack(u: bincode.UnpackWrapper) -> 'SoftUnlinkDirectoryReq': owner_id = bincode.unpack_u64(u) target_id = bincode.unpack_u64(u) + creation_time = bincode.unpack_u64(u) name = bincode.unpack_bytes(u) - return SoftUnlinkDirectoryReq(owner_id, target_id, name) + return SoftUnlinkDirectoryReq(owner_id, target_id, creation_time, name) def calc_packed_size(self) -> int: _size = 0 _size += 8 # owner_id _size += 8 # target_id + _size += 8 # creation_time _size += 1 # len(name) _size += len(self.name) # name contents return _size @@ -2354,10 +2477,11 @@ class SoftUnlinkDirectoryResp(bincode.Packable): @dataclass class RenameDirectoryReq(bincode.Packable): KIND: ClassVar[CDCMessageKind] = CDCMessageKind.RENAME_DIRECTORY - STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 + 1 # target_id + old_owner_id + len(old_name) + new_owner_id + len(new_name) + STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 + 8 + 1 # target_id + old_owner_id + len(old_name) + old_creation_time + new_owner_id + len(new_name) target_id: int old_owner_id: int old_name: bytes + old_creation_time: int new_owner_id: int new_name: bytes @@ -2365,6 +2489,7 @@ class RenameDirectoryReq(bincode.Packable): bincode.pack_u64_into(self.target_id, b) bincode.pack_u64_into(self.old_owner_id, b) bincode.pack_bytes_into(self.old_name, b) + bincode.pack_u64_into(self.old_creation_time, b) bincode.pack_u64_into(self.new_owner_id, b) bincode.pack_bytes_into(self.new_name, b) return None @@ -2374,9 +2499,10 @@ class RenameDirectoryReq(bincode.Packable): target_id = bincode.unpack_u64(u) old_owner_id = bincode.unpack_u64(u) old_name = bincode.unpack_bytes(u) + old_creation_time = bincode.unpack_u64(u) new_owner_id = bincode.unpack_u64(u) new_name = bincode.unpack_bytes(u) - return RenameDirectoryReq(target_id, old_owner_id, old_name, new_owner_id, new_name) + return RenameDirectoryReq(target_id, old_owner_id, old_name, old_creation_time, new_owner_id, new_name) def calc_packed_size(self) -> int: _size = 0 @@ -2384,6 +2510,7 @@ class RenameDirectoryReq(bincode.Packable): _size += 8 # old_owner_id _size += 1 # len(old_name) _size += len(self.old_name) # old_name contents + _size += 8 # old_creation_time _size += 8 # new_owner_id _size += 1 # len(new_name) _size += len(self.new_name) # new_name contents @@ -2392,17 +2519,21 @@ class RenameDirectoryReq(bincode.Packable): @dataclass class RenameDirectoryResp(bincode.Packable): KIND: ClassVar[CDCMessageKind] = CDCMessageKind.RENAME_DIRECTORY - STATIC_SIZE: ClassVar[int] = 0 # + STATIC_SIZE: ClassVar[int] = 8 # creation_time + creation_time: int def pack_into(self, b: bytearray) -> None: + bincode.pack_u64_into(self.creation_time, b) return None @staticmethod def unpack(u: bincode.UnpackWrapper) -> 'RenameDirectoryResp': - return RenameDirectoryResp() + creation_time = bincode.unpack_u64(u) + return RenameDirectoryResp(creation_time) def calc_packed_size(self) -> int: _size = 0 + _size += 8 # creation_time return _size @dataclass @@ -2442,8 +2573,8 @@ class HardUnlinkDirectoryResp(bincode.Packable): return _size @dataclass -class HardUnlinkFileReq(bincode.Packable): - KIND: ClassVar[CDCMessageKind] = CDCMessageKind.HARD_UNLINK_FILE +class CrossShardHardUnlinkFileReq(bincode.Packable): + KIND: ClassVar[CDCMessageKind] = CDCMessageKind.CROSS_SHARD_HARD_UNLINK_FILE STATIC_SIZE: ClassVar[int] = 8 + 8 + 1 + 8 # owner_id + target_id + len(name) + creation_time owner_id: int target_id: int @@ -2458,12 +2589,12 @@ class HardUnlinkFileReq(bincode.Packable): return None @staticmethod - def unpack(u: bincode.UnpackWrapper) -> 'HardUnlinkFileReq': + def unpack(u: bincode.UnpackWrapper) -> 'CrossShardHardUnlinkFileReq': owner_id = bincode.unpack_u64(u) target_id = bincode.unpack_u64(u) name = bincode.unpack_bytes(u) creation_time = bincode.unpack_u64(u) - return HardUnlinkFileReq(owner_id, target_id, name, creation_time) + return CrossShardHardUnlinkFileReq(owner_id, target_id, name, creation_time) def calc_packed_size(self) -> int: _size = 0 @@ -2475,23 +2606,23 @@ class HardUnlinkFileReq(bincode.Packable): return _size @dataclass -class HardUnlinkFileResp(bincode.Packable): - KIND: ClassVar[CDCMessageKind] = CDCMessageKind.HARD_UNLINK_FILE +class CrossShardHardUnlinkFileResp(bincode.Packable): + KIND: ClassVar[CDCMessageKind] = CDCMessageKind.CROSS_SHARD_HARD_UNLINK_FILE STATIC_SIZE: ClassVar[int] = 0 # def pack_into(self, b: bytearray) -> None: return None @staticmethod - def unpack(u: bincode.UnpackWrapper) -> 'HardUnlinkFileResp': - return HardUnlinkFileResp() + def unpack(u: bincode.UnpackWrapper) -> 'CrossShardHardUnlinkFileResp': + return CrossShardHardUnlinkFileResp() def calc_packed_size(self) -> int: _size = 0 return _size -ShardRequestBody = Union[LookupReq, StatFileReq, StatTransientFileReq, StatDirectoryReq, ReadDirReq, ConstructFileReq, AddSpanInitiateReq, AddSpanCertifyReq, LinkFileReq, SoftUnlinkFileReq, FileSpansReq, SameDirectoryRenameReq, SetDirectoryInfoReq, VisitDirectoriesReq, VisitFilesReq, VisitTransientFilesReq, FullReadDirReq, RemoveNonOwnedEdgeReq, IntraShardHardFileUnlinkReq, RemoveSpanInitiateReq, RemoveSpanCertifyReq, SwapBlocksReq, BlockServiceFilesReq, RemoveInodeReq, CreateDirectoryInodeReq, SetDirectoryOwnerReq, RemoveDirectoryOwnerReq, CreateLockedCurrentEdgeReq, LockCurrentEdgeReq, UnlockCurrentEdgeReq, RemoveOwnedSnapshotFileEdgeReq, MakeFileTransientReq] -ShardResponseBody = Union[LookupResp, StatFileResp, StatTransientFileResp, StatDirectoryResp, ReadDirResp, ConstructFileResp, AddSpanInitiateResp, AddSpanCertifyResp, LinkFileResp, SoftUnlinkFileResp, FileSpansResp, SameDirectoryRenameResp, SetDirectoryInfoResp, VisitDirectoriesResp, VisitFilesResp, VisitTransientFilesResp, FullReadDirResp, RemoveNonOwnedEdgeResp, IntraShardHardFileUnlinkResp, RemoveSpanInitiateResp, RemoveSpanCertifyResp, SwapBlocksResp, BlockServiceFilesResp, RemoveInodeResp, CreateDirectoryInodeResp, SetDirectoryOwnerResp, RemoveDirectoryOwnerResp, CreateLockedCurrentEdgeResp, LockCurrentEdgeResp, UnlockCurrentEdgeResp, RemoveOwnedSnapshotFileEdgeResp, MakeFileTransientResp] +ShardRequestBody = Union[LookupReq, StatFileReq, StatTransientFileReq, StatDirectoryReq, ReadDirReq, ConstructFileReq, AddSpanInitiateReq, AddSpanCertifyReq, LinkFileReq, SoftUnlinkFileReq, FileSpansReq, SameDirectoryRenameReq, SetDirectoryInfoReq, SnapshotLookupReq, VisitDirectoriesReq, VisitFilesReq, VisitTransientFilesReq, FullReadDirReq, RemoveNonOwnedEdgeReq, SameShardHardFileUnlinkReq, RemoveSpanInitiateReq, RemoveSpanCertifyReq, SwapBlocksReq, BlockServiceFilesReq, RemoveInodeReq, CreateDirectoryInodeReq, SetDirectoryOwnerReq, RemoveDirectoryOwnerReq, CreateLockedCurrentEdgeReq, LockCurrentEdgeReq, UnlockCurrentEdgeReq, RemoveOwnedSnapshotFileEdgeReq, MakeFileTransientReq] +ShardResponseBody = Union[LookupResp, StatFileResp, StatTransientFileResp, StatDirectoryResp, ReadDirResp, ConstructFileResp, AddSpanInitiateResp, AddSpanCertifyResp, LinkFileResp, SoftUnlinkFileResp, FileSpansResp, SameDirectoryRenameResp, SetDirectoryInfoResp, SnapshotLookupResp, VisitDirectoriesResp, VisitFilesResp, VisitTransientFilesResp, FullReadDirResp, RemoveNonOwnedEdgeResp, SameShardHardFileUnlinkResp, RemoveSpanInitiateResp, RemoveSpanCertifyResp, SwapBlocksResp, BlockServiceFilesResp, RemoveInodeResp, CreateDirectoryInodeResp, SetDirectoryOwnerResp, RemoveDirectoryOwnerResp, CreateLockedCurrentEdgeResp, LockCurrentEdgeResp, UnlockCurrentEdgeResp, RemoveOwnedSnapshotFileEdgeResp, MakeFileTransientResp] SHARD_REQUESTS: Dict[ShardMessageKind, Tuple[Type[ShardRequestBody], Type[ShardResponseBody]]] = { ShardMessageKind.LOOKUP: (LookupReq, LookupResp), @@ -2507,12 +2638,13 @@ SHARD_REQUESTS: Dict[ShardMessageKind, Tuple[Type[ShardRequestBody], Type[ShardR ShardMessageKind.FILE_SPANS: (FileSpansReq, FileSpansResp), ShardMessageKind.SAME_DIRECTORY_RENAME: (SameDirectoryRenameReq, SameDirectoryRenameResp), ShardMessageKind.SET_DIRECTORY_INFO: (SetDirectoryInfoReq, SetDirectoryInfoResp), + ShardMessageKind.SNAPSHOT_LOOKUP: (SnapshotLookupReq, SnapshotLookupResp), ShardMessageKind.VISIT_DIRECTORIES: (VisitDirectoriesReq, VisitDirectoriesResp), ShardMessageKind.VISIT_FILES: (VisitFilesReq, VisitFilesResp), ShardMessageKind.VISIT_TRANSIENT_FILES: (VisitTransientFilesReq, VisitTransientFilesResp), ShardMessageKind.FULL_READ_DIR: (FullReadDirReq, FullReadDirResp), ShardMessageKind.REMOVE_NON_OWNED_EDGE: (RemoveNonOwnedEdgeReq, RemoveNonOwnedEdgeResp), - ShardMessageKind.INTRA_SHARD_HARD_FILE_UNLINK: (IntraShardHardFileUnlinkReq, IntraShardHardFileUnlinkResp), + ShardMessageKind.SAME_SHARD_HARD_FILE_UNLINK: (SameShardHardFileUnlinkReq, SameShardHardFileUnlinkResp), ShardMessageKind.REMOVE_SPAN_INITIATE: (RemoveSpanInitiateReq, RemoveSpanInitiateResp), ShardMessageKind.REMOVE_SPAN_CERTIFY: (RemoveSpanCertifyReq, RemoveSpanCertifyResp), ShardMessageKind.SWAP_BLOCKS: (SwapBlocksReq, SwapBlocksResp), @@ -2528,8 +2660,8 @@ SHARD_REQUESTS: Dict[ShardMessageKind, Tuple[Type[ShardRequestBody], Type[ShardR ShardMessageKind.MAKE_FILE_TRANSIENT: (MakeFileTransientReq, MakeFileTransientResp), } -CDCRequestBody = Union[MakeDirectoryReq, RenameFileReq, SoftUnlinkDirectoryReq, RenameDirectoryReq, HardUnlinkDirectoryReq, HardUnlinkFileReq] -CDCResponseBody = Union[MakeDirectoryResp, RenameFileResp, SoftUnlinkDirectoryResp, RenameDirectoryResp, HardUnlinkDirectoryResp, HardUnlinkFileResp] +CDCRequestBody = Union[MakeDirectoryReq, RenameFileReq, SoftUnlinkDirectoryReq, RenameDirectoryReq, HardUnlinkDirectoryReq, CrossShardHardUnlinkFileReq] +CDCResponseBody = Union[MakeDirectoryResp, RenameFileResp, SoftUnlinkDirectoryResp, RenameDirectoryResp, HardUnlinkDirectoryResp, CrossShardHardUnlinkFileResp] CDC_REQUESTS: Dict[CDCMessageKind, Tuple[Type[CDCRequestBody], Type[CDCResponseBody]]] = { CDCMessageKind.MAKE_DIRECTORY: (MakeDirectoryReq, MakeDirectoryResp), @@ -2537,6 +2669,6 @@ CDC_REQUESTS: Dict[CDCMessageKind, Tuple[Type[CDCRequestBody], Type[CDCResponseB CDCMessageKind.SOFT_UNLINK_DIRECTORY: (SoftUnlinkDirectoryReq, SoftUnlinkDirectoryResp), CDCMessageKind.RENAME_DIRECTORY: (RenameDirectoryReq, RenameDirectoryResp), CDCMessageKind.HARD_UNLINK_DIRECTORY: (HardUnlinkDirectoryReq, HardUnlinkDirectoryResp), - CDCMessageKind.HARD_UNLINK_FILE: (HardUnlinkFileReq, HardUnlinkFileResp), + CDCMessageKind.CROSS_SHARD_HARD_UNLINK_FILE: (CrossShardHardUnlinkFileReq, CrossShardHardUnlinkFileResp), } diff --git a/tests.sh b/tests.sh index 42432518..248dc155 100755 --- a/tests.sh +++ b/tests.sh @@ -12,6 +12,9 @@ echo "$(tput bold)integration tests$(tput sgr0)" echo "$(tput bold)integration tests, sanitized$(tput sgr0)" (cd go/integrationtest && go run . -sanitize) +echo "$(tput bold)integration tests, packet drop$(tput sgr0)" +(cd go/integrationtest && go run . -sanitize -outgoing-packet-drop 0.1 -short) + # # Both ./cpp/run-tests.sh and the integration test will write coverage info # # rm -f ./cpp/build/debug-cov-sanitize/*.gcda ./cpp/build/debug-cov-sanitize/*.gcov