From 40f229b6f5ea7098668d88e00693c679bfb78c6b Mon Sep 17 00:00:00 2001 From: Francesco Mazzoli Date: Mon, 14 Aug 2023 19:40:02 +0000 Subject: [PATCH] Add endpoint to specify which file to get the "reference" block services from See comments for more details. --- cpp/core/MsgsGen.cpp | 255 +++++++++++++++++++++++++----------- cpp/core/MsgsGen.hpp | 50 ++++++- cpp/shard/Shard.cpp | 12 +- cpp/shard/ShardDB.cpp | 123 ++++++++++------- cpp/shard/ShardDB.hpp | 2 +- cpp/tests/tests.cpp | 10 +- go/bincodegen/bincodegen.go | 5 + go/eggstests/cleanup.go | 2 +- go/lib/metadatareq.go | 2 +- go/lib/migrate.go | 148 +++++---------------- go/lib/scratch.go | 89 +++++++++++++ go/msgs/msgs.go | 14 ++ go/msgs/msgs_bincode.go | 48 +++++++ 13 files changed, 505 insertions(+), 255 deletions(-) create mode 100644 go/lib/scratch.go diff --git a/cpp/core/MsgsGen.cpp b/cpp/core/MsgsGen.cpp index 2bcf70fd..a6bd7329 100644 --- a/cpp/core/MsgsGen.cpp +++ b/cpp/core/MsgsGen.cpp @@ -274,6 +274,9 @@ std::ostream& operator<<(std::ostream& out, ShardMessageKind kind) { case ShardMessageKind::REMOVE_INODE: out << "REMOVE_INODE"; break; + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: + out << "ADD_SPAN_INITIATE_WITH_REFERENCE"; + break; case ShardMessageKind::CREATE_DIRECTORY_INODE: out << "CREATE_DIRECTORY_INODE"; break; @@ -2382,6 +2385,46 @@ std::ostream& operator<<(std::ostream& out, const RemoveInodeResp& x) { return out; } +void AddSpanInitiateWithReferenceReq::pack(BincodeBuf& buf) const { + req.pack(buf); + reference.pack(buf); +} +void AddSpanInitiateWithReferenceReq::unpack(BincodeBuf& buf) { + req.unpack(buf); + reference.unpack(buf); +} +void AddSpanInitiateWithReferenceReq::clear() { + req.clear(); + reference = InodeId(); +} +bool AddSpanInitiateWithReferenceReq::operator==(const AddSpanInitiateWithReferenceReq& rhs) const { + if (req != rhs.req) { return false; }; + if ((InodeId)this->reference != (InodeId)rhs.reference) { return false; }; + return true; +} +std::ostream& operator<<(std::ostream& out, const AddSpanInitiateWithReferenceReq& x) { + out << "AddSpanInitiateWithReferenceReq(" << "Req=" << x.req << ", " << "Reference=" << x.reference << ")"; + return out; +} + +void AddSpanInitiateWithReferenceResp::pack(BincodeBuf& buf) const { + resp.pack(buf); +} +void AddSpanInitiateWithReferenceResp::unpack(BincodeBuf& buf) { + resp.unpack(buf); +} +void AddSpanInitiateWithReferenceResp::clear() { + resp.clear(); +} +bool AddSpanInitiateWithReferenceResp::operator==(const AddSpanInitiateWithReferenceResp& rhs) const { + if (resp != rhs.resp) { return false; }; + return true; +} +std::ostream& operator<<(std::ostream& out, const AddSpanInitiateWithReferenceResp& x) { + out << "AddSpanInitiateWithReferenceResp(" << "Resp=" << x.resp << ")"; + return out; +} + void CreateDirectoryInodeReq::pack(BincodeBuf& buf) const { id.pack(buf); ownerId.pack(buf); @@ -3892,83 +3935,93 @@ RemoveInodeReq& ShardReqContainer::setRemoveInode() { x.clear(); return x; } +const AddSpanInitiateWithReferenceReq& ShardReqContainer::getAddSpanInitiateWithReference() const { + ALWAYS_ASSERT(_kind == ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE, "%s != %s", _kind, ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE); + return std::get<27>(_data); +} +AddSpanInitiateWithReferenceReq& ShardReqContainer::setAddSpanInitiateWithReference() { + _kind = ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE; + auto& x = std::get<27>(_data); + x.clear(); + return x; +} const CreateDirectoryInodeReq& ShardReqContainer::getCreateDirectoryInode() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_DIRECTORY_INODE, "%s != %s", _kind, ShardMessageKind::CREATE_DIRECTORY_INODE); - return std::get<27>(_data); + return std::get<28>(_data); } CreateDirectoryInodeReq& ShardReqContainer::setCreateDirectoryInode() { _kind = ShardMessageKind::CREATE_DIRECTORY_INODE; - auto& x = std::get<27>(_data); + auto& x = std::get<28>(_data); x.clear(); return x; } const SetDirectoryOwnerReq& ShardReqContainer::getSetDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::SET_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::SET_DIRECTORY_OWNER); - return std::get<28>(_data); + return std::get<29>(_data); } SetDirectoryOwnerReq& ShardReqContainer::setSetDirectoryOwner() { _kind = ShardMessageKind::SET_DIRECTORY_OWNER; - auto& x = std::get<28>(_data); + auto& x = std::get<29>(_data); x.clear(); return x; } const RemoveDirectoryOwnerReq& ShardReqContainer::getRemoveDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::REMOVE_DIRECTORY_OWNER); - return std::get<29>(_data); + return std::get<30>(_data); } RemoveDirectoryOwnerReq& ShardReqContainer::setRemoveDirectoryOwner() { _kind = ShardMessageKind::REMOVE_DIRECTORY_OWNER; - auto& x = std::get<29>(_data); + auto& x = std::get<30>(_data); x.clear(); return x; } const CreateLockedCurrentEdgeReq& ShardReqContainer::getCreateLockedCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE); - return std::get<30>(_data); + return std::get<31>(_data); } CreateLockedCurrentEdgeReq& ShardReqContainer::setCreateLockedCurrentEdge() { _kind = ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE; - auto& x = std::get<30>(_data); + auto& x = std::get<31>(_data); x.clear(); return x; } const LockCurrentEdgeReq& ShardReqContainer::getLockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::LOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::LOCK_CURRENT_EDGE); - return std::get<31>(_data); + return std::get<32>(_data); } LockCurrentEdgeReq& ShardReqContainer::setLockCurrentEdge() { _kind = ShardMessageKind::LOCK_CURRENT_EDGE; - auto& x = std::get<31>(_data); + auto& x = std::get<32>(_data); x.clear(); return x; } const UnlockCurrentEdgeReq& ShardReqContainer::getUnlockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::UNLOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::UNLOCK_CURRENT_EDGE); - return std::get<32>(_data); + return std::get<33>(_data); } UnlockCurrentEdgeReq& ShardReqContainer::setUnlockCurrentEdge() { _kind = ShardMessageKind::UNLOCK_CURRENT_EDGE; - auto& x = std::get<32>(_data); + auto& x = std::get<33>(_data); x.clear(); return x; } const RemoveOwnedSnapshotFileEdgeReq& ShardReqContainer::getRemoveOwnedSnapshotFileEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE, "%s != %s", _kind, ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE); - return std::get<33>(_data); + return std::get<34>(_data); } RemoveOwnedSnapshotFileEdgeReq& ShardReqContainer::setRemoveOwnedSnapshotFileEdge() { _kind = ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE; - auto& x = std::get<33>(_data); + auto& x = std::get<34>(_data); x.clear(); return x; } const MakeFileTransientReq& ShardReqContainer::getMakeFileTransient() const { ALWAYS_ASSERT(_kind == ShardMessageKind::MAKE_FILE_TRANSIENT, "%s != %s", _kind, ShardMessageKind::MAKE_FILE_TRANSIENT); - return std::get<34>(_data); + return std::get<35>(_data); } MakeFileTransientReq& ShardReqContainer::setMakeFileTransient() { _kind = ShardMessageKind::MAKE_FILE_TRANSIENT; - auto& x = std::get<34>(_data); + auto& x = std::get<35>(_data); x.clear(); return x; } @@ -4064,6 +4117,9 @@ void ShardReqContainer::operator=(const ShardReqContainer& other) { case ShardMessageKind::REMOVE_INODE: setRemoveInode() = other.getRemoveInode(); break; + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: + setAddSpanInitiateWithReference() = other.getAddSpanInitiateWithReference(); + break; case ShardMessageKind::CREATE_DIRECTORY_INODE: setCreateDirectoryInode() = other.getCreateDirectoryInode(); break; @@ -4149,22 +4205,24 @@ size_t ShardReqContainer::packedSize() const { return std::get<25>(_data).packedSize(); case ShardMessageKind::REMOVE_INODE: return std::get<26>(_data).packedSize(); - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: return std::get<27>(_data).packedSize(); - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: return std::get<28>(_data).packedSize(); - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: return std::get<29>(_data).packedSize(); - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: return std::get<30>(_data).packedSize(); - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: return std::get<31>(_data).packedSize(); - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: return std::get<32>(_data).packedSize(); - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: return std::get<33>(_data).packedSize(); - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: return std::get<34>(_data).packedSize(); + case ShardMessageKind::MAKE_FILE_TRANSIENT: + return std::get<35>(_data).packedSize(); default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -4253,30 +4311,33 @@ void ShardReqContainer::pack(BincodeBuf& buf) const { case ShardMessageKind::REMOVE_INODE: std::get<26>(_data).pack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: std::get<27>(_data).pack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<28>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<29>(_data).pack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<30>(_data).pack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<31>(_data).pack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<32>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<33>(_data).pack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<34>(_data).pack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<35>(_data).pack(buf); + break; default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -4366,30 +4427,33 @@ void ShardReqContainer::unpack(BincodeBuf& buf, ShardMessageKind kind) { case ShardMessageKind::REMOVE_INODE: std::get<26>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: std::get<27>(_data).unpack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<28>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<29>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<30>(_data).unpack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<31>(_data).unpack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<32>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<33>(_data).unpack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<34>(_data).unpack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<35>(_data).unpack(buf); + break; default: throw BINCODE_EXCEPTION("bad ShardMessageKind kind %s", kind); } @@ -4453,6 +4517,8 @@ bool ShardReqContainer::operator==(const ShardReqContainer& other) const { return getBlockServiceFiles() == other.getBlockServiceFiles(); case ShardMessageKind::REMOVE_INODE: return getRemoveInode() == other.getRemoveInode(); + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: + return getAddSpanInitiateWithReference() == other.getAddSpanInitiateWithReference(); case ShardMessageKind::CREATE_DIRECTORY_INODE: return getCreateDirectoryInode() == other.getCreateDirectoryInode(); case ShardMessageKind::SET_DIRECTORY_OWNER: @@ -4557,6 +4623,9 @@ std::ostream& operator<<(std::ostream& out, const ShardReqContainer& x) { case ShardMessageKind::REMOVE_INODE: out << x.getRemoveInode(); break; + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: + out << x.getAddSpanInitiateWithReference(); + break; case ShardMessageKind::CREATE_DIRECTORY_INODE: out << x.getCreateDirectoryInode(); break; @@ -4857,83 +4926,93 @@ RemoveInodeResp& ShardRespContainer::setRemoveInode() { x.clear(); return x; } +const AddSpanInitiateWithReferenceResp& ShardRespContainer::getAddSpanInitiateWithReference() const { + ALWAYS_ASSERT(_kind == ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE, "%s != %s", _kind, ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE); + return std::get<27>(_data); +} +AddSpanInitiateWithReferenceResp& ShardRespContainer::setAddSpanInitiateWithReference() { + _kind = ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE; + auto& x = std::get<27>(_data); + x.clear(); + return x; +} const CreateDirectoryInodeResp& ShardRespContainer::getCreateDirectoryInode() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_DIRECTORY_INODE, "%s != %s", _kind, ShardMessageKind::CREATE_DIRECTORY_INODE); - return std::get<27>(_data); + return std::get<28>(_data); } CreateDirectoryInodeResp& ShardRespContainer::setCreateDirectoryInode() { _kind = ShardMessageKind::CREATE_DIRECTORY_INODE; - auto& x = std::get<27>(_data); + auto& x = std::get<28>(_data); x.clear(); return x; } const SetDirectoryOwnerResp& ShardRespContainer::getSetDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::SET_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::SET_DIRECTORY_OWNER); - return std::get<28>(_data); + return std::get<29>(_data); } SetDirectoryOwnerResp& ShardRespContainer::setSetDirectoryOwner() { _kind = ShardMessageKind::SET_DIRECTORY_OWNER; - auto& x = std::get<28>(_data); + auto& x = std::get<29>(_data); x.clear(); return x; } const RemoveDirectoryOwnerResp& ShardRespContainer::getRemoveDirectoryOwner() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_DIRECTORY_OWNER, "%s != %s", _kind, ShardMessageKind::REMOVE_DIRECTORY_OWNER); - return std::get<29>(_data); + return std::get<30>(_data); } RemoveDirectoryOwnerResp& ShardRespContainer::setRemoveDirectoryOwner() { _kind = ShardMessageKind::REMOVE_DIRECTORY_OWNER; - auto& x = std::get<29>(_data); + auto& x = std::get<30>(_data); x.clear(); return x; } const CreateLockedCurrentEdgeResp& ShardRespContainer::getCreateLockedCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE); - return std::get<30>(_data); + return std::get<31>(_data); } CreateLockedCurrentEdgeResp& ShardRespContainer::setCreateLockedCurrentEdge() { _kind = ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE; - auto& x = std::get<30>(_data); + auto& x = std::get<31>(_data); x.clear(); return x; } const LockCurrentEdgeResp& ShardRespContainer::getLockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::LOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::LOCK_CURRENT_EDGE); - return std::get<31>(_data); + return std::get<32>(_data); } LockCurrentEdgeResp& ShardRespContainer::setLockCurrentEdge() { _kind = ShardMessageKind::LOCK_CURRENT_EDGE; - auto& x = std::get<31>(_data); + auto& x = std::get<32>(_data); x.clear(); return x; } const UnlockCurrentEdgeResp& ShardRespContainer::getUnlockCurrentEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::UNLOCK_CURRENT_EDGE, "%s != %s", _kind, ShardMessageKind::UNLOCK_CURRENT_EDGE); - return std::get<32>(_data); + return std::get<33>(_data); } UnlockCurrentEdgeResp& ShardRespContainer::setUnlockCurrentEdge() { _kind = ShardMessageKind::UNLOCK_CURRENT_EDGE; - auto& x = std::get<32>(_data); + auto& x = std::get<33>(_data); x.clear(); return x; } const RemoveOwnedSnapshotFileEdgeResp& ShardRespContainer::getRemoveOwnedSnapshotFileEdge() const { ALWAYS_ASSERT(_kind == ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE, "%s != %s", _kind, ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE); - return std::get<33>(_data); + return std::get<34>(_data); } RemoveOwnedSnapshotFileEdgeResp& ShardRespContainer::setRemoveOwnedSnapshotFileEdge() { _kind = ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE; - auto& x = std::get<33>(_data); + auto& x = std::get<34>(_data); x.clear(); return x; } const MakeFileTransientResp& ShardRespContainer::getMakeFileTransient() const { ALWAYS_ASSERT(_kind == ShardMessageKind::MAKE_FILE_TRANSIENT, "%s != %s", _kind, ShardMessageKind::MAKE_FILE_TRANSIENT); - return std::get<34>(_data); + return std::get<35>(_data); } MakeFileTransientResp& ShardRespContainer::setMakeFileTransient() { _kind = ShardMessageKind::MAKE_FILE_TRANSIENT; - auto& x = std::get<34>(_data); + auto& x = std::get<35>(_data); x.clear(); return x; } @@ -5029,6 +5108,9 @@ void ShardRespContainer::operator=(const ShardRespContainer& other) { case ShardMessageKind::REMOVE_INODE: setRemoveInode() = other.getRemoveInode(); break; + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: + setAddSpanInitiateWithReference() = other.getAddSpanInitiateWithReference(); + break; case ShardMessageKind::CREATE_DIRECTORY_INODE: setCreateDirectoryInode() = other.getCreateDirectoryInode(); break; @@ -5114,22 +5196,24 @@ size_t ShardRespContainer::packedSize() const { return std::get<25>(_data).packedSize(); case ShardMessageKind::REMOVE_INODE: return std::get<26>(_data).packedSize(); - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: return std::get<27>(_data).packedSize(); - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: return std::get<28>(_data).packedSize(); - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: return std::get<29>(_data).packedSize(); - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: return std::get<30>(_data).packedSize(); - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: return std::get<31>(_data).packedSize(); - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: return std::get<32>(_data).packedSize(); - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: return std::get<33>(_data).packedSize(); - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: return std::get<34>(_data).packedSize(); + case ShardMessageKind::MAKE_FILE_TRANSIENT: + return std::get<35>(_data).packedSize(); default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -5218,30 +5302,33 @@ void ShardRespContainer::pack(BincodeBuf& buf) const { case ShardMessageKind::REMOVE_INODE: std::get<26>(_data).pack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: std::get<27>(_data).pack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<28>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<29>(_data).pack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<30>(_data).pack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<31>(_data).pack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<32>(_data).pack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<33>(_data).pack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<34>(_data).pack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<35>(_data).pack(buf); + break; default: throw EGGS_EXCEPTION("bad ShardMessageKind kind %s", _kind); } @@ -5331,30 +5418,33 @@ void ShardRespContainer::unpack(BincodeBuf& buf, ShardMessageKind kind) { case ShardMessageKind::REMOVE_INODE: std::get<26>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_DIRECTORY_INODE: + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: std::get<27>(_data).unpack(buf); break; - case ShardMessageKind::SET_DIRECTORY_OWNER: + case ShardMessageKind::CREATE_DIRECTORY_INODE: std::get<28>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_DIRECTORY_OWNER: + case ShardMessageKind::SET_DIRECTORY_OWNER: std::get<29>(_data).unpack(buf); break; - case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: + case ShardMessageKind::REMOVE_DIRECTORY_OWNER: std::get<30>(_data).unpack(buf); break; - case ShardMessageKind::LOCK_CURRENT_EDGE: + case ShardMessageKind::CREATE_LOCKED_CURRENT_EDGE: std::get<31>(_data).unpack(buf); break; - case ShardMessageKind::UNLOCK_CURRENT_EDGE: + case ShardMessageKind::LOCK_CURRENT_EDGE: std::get<32>(_data).unpack(buf); break; - case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: + case ShardMessageKind::UNLOCK_CURRENT_EDGE: std::get<33>(_data).unpack(buf); break; - case ShardMessageKind::MAKE_FILE_TRANSIENT: + case ShardMessageKind::REMOVE_OWNED_SNAPSHOT_FILE_EDGE: std::get<34>(_data).unpack(buf); break; + case ShardMessageKind::MAKE_FILE_TRANSIENT: + std::get<35>(_data).unpack(buf); + break; default: throw BINCODE_EXCEPTION("bad ShardMessageKind kind %s", kind); } @@ -5418,6 +5508,8 @@ bool ShardRespContainer::operator==(const ShardRespContainer& other) const { return getBlockServiceFiles() == other.getBlockServiceFiles(); case ShardMessageKind::REMOVE_INODE: return getRemoveInode() == other.getRemoveInode(); + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: + return getAddSpanInitiateWithReference() == other.getAddSpanInitiateWithReference(); case ShardMessageKind::CREATE_DIRECTORY_INODE: return getCreateDirectoryInode() == other.getCreateDirectoryInode(); case ShardMessageKind::SET_DIRECTORY_OWNER: @@ -5522,6 +5614,9 @@ std::ostream& operator<<(std::ostream& out, const ShardRespContainer& x) { case ShardMessageKind::REMOVE_INODE: out << x.getRemoveInode(); break; + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: + out << x.getAddSpanInitiateWithReference(); + break; case ShardMessageKind::CREATE_DIRECTORY_INODE: out << x.getCreateDirectoryInode(); break; diff --git a/cpp/core/MsgsGen.hpp b/cpp/core/MsgsGen.hpp index 5bc524b9..ecec4d50 100644 --- a/cpp/core/MsgsGen.hpp +++ b/cpp/core/MsgsGen.hpp @@ -162,6 +162,7 @@ enum class ShardMessageKind : uint8_t { SWAP_BLOCKS = 120, BLOCK_SERVICE_FILES = 121, REMOVE_INODE = 122, + ADD_SPAN_INITIATE_WITH_REFERENCE = 124, CREATE_DIRECTORY_INODE = 128, SET_DIRECTORY_OWNER = 129, REMOVE_DIRECTORY_OWNER = 137, @@ -200,6 +201,7 @@ const std::vector allShardMessageKind { ShardMessageKind::SWAP_BLOCKS, ShardMessageKind::BLOCK_SERVICE_FILES, ShardMessageKind::REMOVE_INODE, + ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE, ShardMessageKind::CREATE_DIRECTORY_INODE, ShardMessageKind::SET_DIRECTORY_OWNER, ShardMessageKind::REMOVE_DIRECTORY_OWNER, @@ -2191,6 +2193,46 @@ struct RemoveInodeResp { std::ostream& operator<<(std::ostream& out, const RemoveInodeResp& x); +struct AddSpanInitiateWithReferenceReq { + AddSpanInitiateReq req; + InodeId reference; + + static constexpr uint16_t STATIC_SIZE = AddSpanInitiateReq::STATIC_SIZE + 8; // req + reference + + AddSpanInitiateWithReferenceReq() { clear(); } + size_t packedSize() const { + size_t _size = 0; + _size += req.packedSize(); // req + _size += 8; // reference + return _size; + } + void pack(BincodeBuf& buf) const; + void unpack(BincodeBuf& buf); + void clear(); + bool operator==(const AddSpanInitiateWithReferenceReq&rhs) const; +}; + +std::ostream& operator<<(std::ostream& out, const AddSpanInitiateWithReferenceReq& x); + +struct AddSpanInitiateWithReferenceResp { + AddSpanInitiateResp resp; + + static constexpr uint16_t STATIC_SIZE = AddSpanInitiateResp::STATIC_SIZE; // resp + + AddSpanInitiateWithReferenceResp() { clear(); } + size_t packedSize() const { + size_t _size = 0; + _size += resp.packedSize(); // resp + return _size; + } + void pack(BincodeBuf& buf) const; + void unpack(BincodeBuf& buf); + void clear(); + bool operator==(const AddSpanInitiateWithReferenceResp&rhs) const; +}; + +std::ostream& operator<<(std::ostream& out, const AddSpanInitiateWithReferenceResp& x); + struct CreateDirectoryInodeReq { InodeId id; InodeId ownerId; @@ -3414,7 +3456,7 @@ std::ostream& operator<<(std::ostream& out, const TestWriteResp& x); struct ShardReqContainer { private: ShardMessageKind _kind = (ShardMessageKind)0; - std::tuple _data; + std::tuple _data; public: ShardReqContainer(); ShardReqContainer(const ShardReqContainer& other); @@ -3476,6 +3518,8 @@ public: BlockServiceFilesReq& setBlockServiceFiles(); const RemoveInodeReq& getRemoveInode() const; RemoveInodeReq& setRemoveInode(); + const AddSpanInitiateWithReferenceReq& getAddSpanInitiateWithReference() const; + AddSpanInitiateWithReferenceReq& setAddSpanInitiateWithReference(); const CreateDirectoryInodeReq& getCreateDirectoryInode() const; CreateDirectoryInodeReq& setCreateDirectoryInode(); const SetDirectoryOwnerReq& getSetDirectoryOwner() const; @@ -3506,7 +3550,7 @@ std::ostream& operator<<(std::ostream& out, const ShardReqContainer& x); struct ShardRespContainer { private: ShardMessageKind _kind = (ShardMessageKind)0; - std::tuple _data; + std::tuple _data; public: ShardRespContainer(); ShardRespContainer(const ShardRespContainer& other); @@ -3568,6 +3612,8 @@ public: BlockServiceFilesResp& setBlockServiceFiles(); const RemoveInodeResp& getRemoveInode() const; RemoveInodeResp& setRemoveInode(); + const AddSpanInitiateWithReferenceResp& getAddSpanInitiateWithReference() const; + AddSpanInitiateWithReferenceResp& setAddSpanInitiateWithReference(); const CreateDirectoryInodeResp& getCreateDirectoryInode() const; CreateDirectoryInodeResp& setCreateDirectoryInode(); const SetDirectoryOwnerResp& getSetDirectoryOwner() const; diff --git a/cpp/shard/Shard.cpp b/cpp/shard/Shard.cpp index 595ed9bb..3a97fb3f 100644 --- a/cpp/shard/Shard.cpp +++ b/cpp/shard/Shard.cpp @@ -52,16 +52,16 @@ public: } private: - EggsError _applyLogEntryLocked(const ShardLogEntry& logEntry, ShardRespContainer& resp) { - EggsError err = db.applyLogEntry(true, _currentLogIndex+1, logEntry, resp); + EggsError _applyLogEntryLocked(ShardMessageKind reqKind, const ShardLogEntry& logEntry, ShardRespContainer& resp) { + EggsError err = db.applyLogEntry(true, reqKind, _currentLogIndex+1, logEntry, resp); _currentLogIndex++; return err; } public: - EggsError applyLogEntry(const ShardLogEntry& logEntry, ShardRespContainer& resp) { + EggsError applyLogEntry(ShardMessageKind reqKind, const ShardLogEntry& logEntry, ShardRespContainer& resp) { std::lock_guard lock(_applyLock); - return _applyLogEntryLocked(logEntry, resp); + return _applyLogEntryLocked(reqKind, logEntry, resp); } EggsError prepareAndApplyLogEntry(ShardReqContainer& req, ShardLogEntry& logEntry, ShardRespContainer& resp) { @@ -72,7 +72,7 @@ public: std::lock_guard lock(_applyLock); EggsError err = db.prepareLogEntry(req, logEntry); if (err == NO_ERROR) { - err = _applyLogEntryLocked(logEntry, resp); + err = _applyLogEntryLocked(req.kind(), logEntry, resp); } return err; } @@ -406,7 +406,7 @@ public: _env.clearAlert(_alert); { - EggsError err = _shared.applyLogEntry(_logEntry, _respContainer); + EggsError err = _shared.applyLogEntry((ShardMessageKind)0, _logEntry, _respContainer); if (err != NO_ERROR) { RAISE_ALERT(_env, "unexpected failure when trying to update block services: %s", err); return false; diff --git a/cpp/shard/ShardDB.cpp b/cpp/shard/ShardDB.cpp index bc0025d3..40860958 100644 --- a/cpp/shard/ShardDB.cpp +++ b/cpp/shard/ShardDB.cpp @@ -1435,10 +1435,13 @@ struct ShardDBImpl { return NO_ERROR; } - EggsError _prepareAddSpanInitiate(EggsTime time, const AddSpanInitiateReq& req, AddSpanInitiateEntry& entry) { + EggsError _prepareAddSpanInitiate(EggsTime time, const AddSpanInitiateReq& req, InodeId reference, AddSpanInitiateEntry& entry) { if (req.fileId.type() != InodeType::FILE && req.fileId.type() != InodeType::SYMLINK) { return EggsError::TYPE_IS_DIRECTORY; } + if (reference.type() != InodeType::FILE && reference.type() != InodeType::SYMLINK) { + return EggsError::TYPE_IS_DIRECTORY; + } if (req.fileId.shard() != _shid) { return EggsError::BAD_SHARD; } @@ -1505,43 +1508,59 @@ struct ShardDBImpl { LOG_DEBUG(_env, "Starting out with %s block service candidates, parity %s", candidateBlockServices.size(), entry.parity); std::vector pickedBlockServices; pickedBlockServices.reserve(req.parity.blocks()); - // Try to get the first span to copy its block services -- this should be the - // very common case past the first span. - { - StaticValue k; - k().setFileId(req.fileId); - k().setOffset(0); - std::string v; - auto status = _db->Get({}, _spansCf, k.toSlice(), &v); - if (status.IsNotFound()) { - // no-op -- we'll just generate them all at random - } else { - ROCKS_DB_CHECKED(status); - ExternalValue span(v); - // TODO this means that if the first span is inline or smaller, all the other - // spans will get block services generated at random, which is not ideal. We - // should probably look further. - if (span().storageClass() != INLINE_STORAGE) { - const auto blocks = span().blocksBody(); - for ( - int i = 0; - i < blocks.parity().blocks() && pickedBlockServices.size() < req.parity.blocks() && candidateBlockServices.size() > 0; - i++ - ) { - const BlockBody spanBlock = blocks.block(i); - auto isCandidate = std::find(candidateBlockServices.begin(), candidateBlockServices.end(), spanBlock.blockService()); - if (isCandidate == candidateBlockServices.end()) { - continue; - } - LOG_DEBUG(_env, "(1) Picking block service candidate %s, failure domain %s", spanBlock.blockService(), GoLangQuotedStringFmt((const char*)_blockServicesCache.at(spanBlock.blockService().u64).failureDomain.data(), 16)); - BlockServiceId blockServiceId = spanBlock.blockService(); - pickedBlockServices.emplace_back(blockServiceId); - std::iter_swap(isCandidate, candidateBlockServices.end()-1); - candidateBlockServices.pop_back(); - } - } + // We try to copy the block services from the first and the last span. The first + // span is generally considered the "reference" one, and should work in the common + // case. The last span is useful only in the case where we start using different + // block services mid-file, probably because a block service went down. Why not + // always use the last span? When we migrate or defrag or in generally reorganize + // the spans we generally work from left-to-right, and in that case if we always + // looked at the last one we'd pick a random block service every time. The "last + // span" fallback is free in the common case anyhow. + const auto fillInBlockServicesFromSpan = [&](bool first) { + // empty file, bail out early and avoid pointless double lookup + if (entry.fileId == reference && entry.byteOffset == 0) { + return; } - } + // we're already done (avoid double seek in the common case) + if (pickedBlockServices.size() >= req.parity.blocks() || candidateBlockServices.size() < 0) { + return; + } + StaticValue startK; + startK().setFileId(reference); + // We should never have many tombstones here (spans aren't really deleted and + // re-added apart from rare cases), so the offset upper bound is fine. + startK().setOffset(first ? 0 : ~(uint64_t)0); + std::unique_ptr it(_db->NewIterator({}, _spansCf)); + it->SeekForPrev(startK.toSlice()); + if (!it->Valid()) { // nothing to do if we can't find a span + if (!it->status().IsNotFound()) { + ROCKS_DB_CHECKED(it->status()); + } + return; + } + auto k = ExternalValue::FromSlice(it->key()); + auto span = ExternalValue::FromSlice(it->value()); + if (span().storageClass() == INLINE_STORAGE) { return; } + const auto blocks = span().blocksBody(); + for ( + int i = 0; + i < blocks.parity().blocks() && pickedBlockServices.size() < req.parity.blocks() && candidateBlockServices.size() > 0; + i++ + ) { + const BlockBody spanBlock = blocks.block(i); + auto isCandidate = std::find(candidateBlockServices.begin(), candidateBlockServices.end(), spanBlock.blockService()); + if (isCandidate == candidateBlockServices.end()) { + continue; + } + LOG_DEBUG(_env, "(1) Picking block service candidate %s, failure domain %s", spanBlock.blockService(), GoLangQuotedStringFmt((const char*)_blockServicesCache.at(spanBlock.blockService().u64).failureDomain.data(), 16)); + BlockServiceId blockServiceId = spanBlock.blockService(); + pickedBlockServices.emplace_back(blockServiceId); + std::iter_swap(isCandidate, candidateBlockServices.end()-1); + candidateBlockServices.pop_back(); + } + }; + fillInBlockServicesFromSpan(true); + fillInBlockServicesFromSpan(false); // Fill in whatever remains. We don't need to be deterministic here (we would have to // if we were in log application), but we might as well. { @@ -1754,9 +1773,14 @@ struct ShardDBImpl { case ShardMessageKind::ADD_INLINE_SPAN: err = _prepareAddInlineSpan(time, req.getAddInlineSpan(), logEntryBody.setAddInlineSpan()); break; - case ShardMessageKind::ADD_SPAN_INITIATE: - err = _prepareAddSpanInitiate(time, req.getAddSpanInitiate(), logEntryBody.setAddSpanInitiate()); - break; + case ShardMessageKind::ADD_SPAN_INITIATE: { + const auto& addSpanReq = req.getAddSpanInitiate(); + err = _prepareAddSpanInitiate(time, addSpanReq, addSpanReq.fileId, logEntryBody.setAddSpanInitiate()); + break; } + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: { + const auto& addSpanReq = req.getAddSpanInitiateWithReference(); + err = _prepareAddSpanInitiate(time, addSpanReq.req, addSpanReq.reference, logEntryBody.setAddSpanInitiate()); + break; } case ShardMessageKind::ADD_SPAN_CERTIFY: err = _prepareAddSpanCertify(time, req.getAddSpanCertify(), logEntryBody.setAddSpanCertify()); break; @@ -3431,7 +3455,7 @@ struct ShardDBImpl { return NO_ERROR; } - EggsError applyLogEntry(bool sync, uint64_t logIndex, const ShardLogEntry& logEntry, ShardRespContainer& resp) { + EggsError applyLogEntry(bool sync, ShardMessageKind reqKind, uint64_t logIndex, const ShardLogEntry& logEntry, ShardRespContainer& resp) { // TODO figure out the story with what regards time monotonicity (possibly drop non-monotonic log // updates?) @@ -3509,9 +3533,15 @@ struct ShardDBImpl { case ShardLogEntryKind::ADD_INLINE_SPAN: err = _applyAddInlineSpan(time, batch, logEntryBody.getAddInlineSpan(), resp.setAddInlineSpan()); break; - case ShardLogEntryKind::ADD_SPAN_INITIATE: - err = _applyAddSpanInitiate(time, batch, logEntryBody.getAddSpanInitiate(), resp.setAddSpanInitiate()); - break; + case ShardLogEntryKind::ADD_SPAN_INITIATE: { + if (reqKind == ShardMessageKind::ADD_SPAN_INITIATE) { + err = _applyAddSpanInitiate(time, batch, logEntryBody.getAddSpanInitiate(), resp.setAddSpanInitiate()); + } else { + ALWAYS_ASSERT(reqKind == ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE); + auto& refResp = resp.setAddSpanInitiateWithReference(); + err = _applyAddSpanInitiate(time, batch, logEntryBody.getAddSpanInitiate(), refResp.resp); + } + break; } case ShardLogEntryKind::ADD_SPAN_CERTIFY: err = _applyAddSpanCertify(time, batch, logEntryBody.getAddSpanCertify(), resp.setAddSpanCertify()); break; @@ -3732,6 +3762,7 @@ bool readOnlyShardReq(const ShardMessageKind kind) { return true; case ShardMessageKind::CONSTRUCT_FILE: case ShardMessageKind::ADD_SPAN_INITIATE: + case ShardMessageKind::ADD_SPAN_INITIATE_WITH_REFERENCE: case ShardMessageKind::ADD_SPAN_CERTIFY: case ShardMessageKind::ADD_INLINE_SPAN: case ShardMessageKind::LINK_FILE: @@ -3782,8 +3813,8 @@ EggsError ShardDB::prepareLogEntry(const ShardReqContainer& req, ShardLogEntry& return ((ShardDBImpl*)_impl)->prepareLogEntry(req, logEntry); } -EggsError ShardDB::applyLogEntry(bool sync, uint64_t logEntryIx, const ShardLogEntry& logEntry, ShardRespContainer& resp) { - return ((ShardDBImpl*)_impl)->applyLogEntry(sync, logEntryIx, logEntry, resp); +EggsError ShardDB::applyLogEntry(bool sync, ShardMessageKind reqKind, uint64_t logEntryIx, const ShardLogEntry& logEntry, ShardRespContainer& resp) { + return ((ShardDBImpl*)_impl)->applyLogEntry(sync, reqKind, logEntryIx, logEntry, resp); } uint64_t ShardDB::lastAppliedLogEntry() { diff --git a/cpp/shard/ShardDB.hpp b/cpp/shard/ShardDB.hpp index b0847f61..ec2cbe31 100644 --- a/cpp/shard/ShardDB.hpp +++ b/cpp/shard/ShardDB.hpp @@ -91,7 +91,7 @@ public: // not be necessary. If it is not, then it is certainly necessary. // // As usual, if an error is returned, the contents of `resp` should be ignored. - EggsError applyLogEntry(bool sync, uint64_t logEntryIx, const ShardLogEntry& logEntry, ShardRespContainer& resp); + EggsError applyLogEntry(bool sync, ShardMessageKind reqKind, uint64_t logEntryIx, const ShardLogEntry& logEntry, ShardRespContainer& resp); // For internal testing const std::array& secretKey() const; diff --git a/cpp/tests/tests.cpp b/cpp/tests/tests.cpp index 16d4839a..b8321ccd 100644 --- a/cpp/tests/tests.cpp +++ b/cpp/tests/tests.cpp @@ -446,7 +446,7 @@ TEST_CASE("touch file") { req.note = "test note"; NO_EGGS_ERROR(db->prepareLogEntry(*reqContainer, *logEntry)); constructTime = logEntry->time; - NO_EGGS_ERROR(db->applyLogEntry(true, ++logEntryIndex, *logEntry, *respContainer)); + NO_EGGS_ERROR(db->applyLogEntry(true, ShardMessageKind::CONSTRUCT_FILE, ++logEntryIndex, *logEntry, *respContainer)); auto& resp = respContainer->getConstructFile(); id = resp.id; cookie = resp.cookie; @@ -468,7 +468,7 @@ TEST_CASE("touch file") { req.name = name; NO_EGGS_ERROR(db->prepareLogEntry(*reqContainer, *logEntry)); linkTime = logEntry->time; - NO_EGGS_ERROR(db->applyLogEntry(true, ++logEntryIndex, *logEntry, *respContainer)); + NO_EGGS_ERROR(db->applyLogEntry(true, ShardMessageKind::LINK_FILE, ++logEntryIndex, *logEntry, *respContainer)); } { auto& req = reqContainer->setReadDir(); @@ -517,7 +517,7 @@ TEST_CASE("override") { req.type = (uint8_t)InodeType::FILE; req.note = "test note"; NO_EGGS_ERROR(db->prepareLogEntry(*reqContainer, *logEntry)); - NO_EGGS_ERROR(db->applyLogEntry(true, ++logEntryIndex, *logEntry, *respContainer)); + NO_EGGS_ERROR(db->applyLogEntry(true, ShardMessageKind::CONSTRUCT_FILE, ++logEntryIndex, *logEntry, *respContainer)); auto& resp = respContainer->getConstructFile(); id = resp.id; cookie = resp.cookie; @@ -534,7 +534,7 @@ TEST_CASE("override") { req.ownerId = ROOT_DIR_INODE_ID; req.name = name; NO_EGGS_ERROR(db->prepareLogEntry(*reqContainer, *logEntry)); - NO_EGGS_ERROR(db->applyLogEntry(true, ++logEntryIndex, *logEntry, *respContainer)); + NO_EGGS_ERROR(db->applyLogEntry(true, ShardMessageKind::LINK_FILE, ++logEntryIndex, *logEntry, *respContainer)); creationTime = respContainer->getLinkFile().creationTime; } return {id, creationTime}; @@ -551,7 +551,7 @@ TEST_CASE("override") { req.oldCreationTime = fooCreationTime; req.newName = "bar"; NO_EGGS_ERROR(db->prepareLogEntry(*reqContainer, *logEntry)); - NO_EGGS_ERROR(db->applyLogEntry(true, ++logEntryIndex, *logEntry, *respContainer)); + NO_EGGS_ERROR(db->applyLogEntry(true, ShardMessageKind::SAME_DIRECTORY_RENAME, ++logEntryIndex, *logEntry, *respContainer)); } { auto& req = reqContainer->setFullReadDir(); diff --git a/go/bincodegen/bincodegen.go b/go/bincodegen/bincodegen.go index 1fc0b4fb..b37248a8 100644 --- a/go/bincodegen/bincodegen.go +++ b/go/bincodegen/bincodegen.go @@ -1519,6 +1519,11 @@ func main() { reflect.TypeOf(msgs.RemoveInodeReq{}), reflect.TypeOf(msgs.RemoveInodeResp{}), }, + { + 0x7C, + reflect.TypeOf(msgs.AddSpanInitiateWithReferenceReq{}), + reflect.TypeOf(msgs.AddSpanInitiateWithReferenceResp{}), + }, // UNSAFE OPERATIONS -- these can break invariants. { 0x80, diff --git a/go/eggstests/cleanup.go b/go/eggstests/cleanup.go index 56943141..02788814 100644 --- a/go/eggstests/cleanup.go +++ b/go/eggstests/cleanup.go @@ -68,7 +68,7 @@ func cleanupAfterTest( } log.Info("waiting for transient deadlines to have passed") time.Sleep(testTransientDeadlineInterval - time.Since(cleanupStartedAt)) - log.Info("all done, destructing files") + log.Info("deadlines passed, collecting") if err := lib.DestructFilesInAllShards(log, &lib.GCOptions{ShuckleAddress: shuckleAddress, Counters: counters}); err != nil { panic(err) } diff --git a/go/lib/metadatareq.go b/go/lib/metadatareq.go index 301c907c..9e24d25d 100644 --- a/go/lib/metadatareq.go +++ b/go/lib/metadatareq.go @@ -287,7 +287,7 @@ func (c *Client) metadataRequest( // Check if it's an error or not. We only use debug here because some errors are legitimate // responses (e.g. FILE_EMPTY) if eggsError != nil { - log.Debug("got error %v for req %T id %v from shard %v (took %v)", *eggsError, req.body, req.requestId, shid, elapsed) + log.DebugStack(1, "got error %v for req %T id %v from shard %v (took %v)", *eggsError, req.body, req.requestId, shid, elapsed) return *eggsError } log.Debug("got response %T from shard %v (took %v)", respBody, shid, elapsed) diff --git a/go/lib/migrate.go b/go/lib/migrate.go index 41015ffa..ba810a54 100644 --- a/go/lib/migrate.go +++ b/go/lib/migrate.go @@ -25,35 +25,6 @@ import ( "xtx/eggsfs/rs" ) -type scratchFile struct { - id msgs.InodeId - cookie [8]byte - size uint64 -} - -func ensureScratchFile(log *Logger, client *Client, shard msgs.ShardId, file *scratchFile) error { - if file.id != msgs.NULL_INODE_ID { - return nil - } - resp := msgs.ConstructFileResp{} - err := client.ShardRequest( - log, - shard, - &msgs.ConstructFileReq{ - Type: msgs.FILE, - Note: "migrate", - }, - &resp, - ) - if err != nil { - return err - } - file.id = resp.Id - file.cookie = resp.Cookie - file.size = 0 - return nil -} - func fetchBlock( log *Logger, client *Client, @@ -96,7 +67,8 @@ func fetchBlock( func writeBlock( log *Logger, client *Client, - file *scratchFile, + scratch *scratchFile, + file msgs.InodeId, blacklist []msgs.BlockServiceId, blockSize uint32, storageClass msgs.StorageClass, @@ -108,28 +80,31 @@ func writeBlock( blacklistEntries[i].BlockService = blacklist[i] } - initiateSpanReq := msgs.AddSpanInitiateReq{ - FileId: file.id, - Cookie: file.cookie, - ByteOffset: file.size, - Size: blockSize, - Crc: block.Crc, - StorageClass: storageClass, - Blacklist: blacklistEntries, - Parity: rs.MkParity(1, 0), - Stripes: 1, - CellSize: blockSize, - Crcs: []msgs.Crc{block.Crc}, + initiateSpanReq := msgs.AddSpanInitiateWithReferenceReq{ + Req: msgs.AddSpanInitiateReq{ + FileId: scratch.id, + Cookie: scratch.cookie, + ByteOffset: scratch.size, + Size: blockSize, + Crc: block.Crc, + StorageClass: storageClass, + Blacklist: blacklistEntries, + Parity: rs.MkParity(1, 0), + Stripes: 1, + CellSize: blockSize, + Crcs: []msgs.Crc{block.Crc}, + }, + Reference: file, } maxAttempts := 4 // 4 = block services we currently kill in testing for attempt := 0; ; attempt++ { var err error - initiateSpanResp := msgs.AddSpanInitiateResp{} - if err := client.ShardRequest(log, file.id.Shard(), &initiateSpanReq, &initiateSpanResp); err != nil { + initiateSpanResp := msgs.AddSpanInitiateWithReferenceResp{} + if err := client.ShardRequest(log, scratch.id.Shard(), &initiateSpanReq, &initiateSpanResp); err != nil { return 0, err } - dstBlock := &initiateSpanResp.Blocks[0] + dstBlock := &initiateSpanResp.Resp.Blocks[0] var dstConn BlocksConn certifySpanResp := msgs.AddSpanCertifyResp{} var writeProof [8]byte @@ -148,16 +123,16 @@ func writeBlock( } err = client.ShardRequest( log, - file.id.Shard(), + scratch.id.Shard(), &msgs.AddSpanCertifyReq{ - FileId: file.id, - Cookie: file.cookie, - ByteOffset: file.size, + FileId: scratch.id, + Cookie: scratch.cookie, + ByteOffset: scratch.size, Proofs: []msgs.BlockProof{{BlockId: dstBlock.BlockId, Proof: writeProof}}, }, &certifySpanResp, ) - file.size += uint64(blockSize) + scratch.size += uint64(blockSize) if err != nil { return 0, err } @@ -170,19 +145,19 @@ func writeBlock( err = nil // create temp file, move the bad span there, then we can restart constructResp := &msgs.ConstructFileResp{} - if err := client.ShardRequest(log, file.id.Shard(), &msgs.ConstructFileReq{Type: msgs.FILE, Note: "bad_write_block_attempt"}, constructResp); err != nil { + if err := client.ShardRequest(log, scratch.id.Shard(), &msgs.ConstructFileReq{Type: msgs.FILE, Note: "bad_write_block_attempt"}, constructResp); err != nil { return 0, err } moveSpanReq := &msgs.MoveSpanReq{ - FileId1: file.id, - ByteOffset1: initiateSpanReq.ByteOffset, - Cookie1: file.cookie, + FileId1: scratch.id, + ByteOffset1: initiateSpanReq.Req.ByteOffset, + Cookie1: scratch.cookie, FileId2: constructResp.Id, ByteOffset2: 0, Cookie2: constructResp.Cookie, SpanSize: blockSize, } - if err := client.ShardRequest(log, file.id.Shard(), moveSpanReq, &msgs.MoveSpanResp{}); err != nil { + if err := client.ShardRequest(log, scratch.id.Shard(), moveSpanReq, &msgs.MoveSpanResp{}); err != nil { return 0, err } @@ -195,7 +170,8 @@ func copyBlock( log *Logger, client *Client, bufPool *sync.Pool, - file *scratchFile, + scratch *scratchFile, + file msgs.InodeId, blockServices []msgs.BlockService, blacklist []msgs.BlockServiceId, blockSize uint32, @@ -208,7 +184,7 @@ func copyBlock( if err := fetchBlock(log, client, blockServices, blockSize, block, buf); err != nil { return 0, true, nil // might find other block services } - blockId, err := writeBlock(log, client, file, blacklist, blockSize, storageClass, block, buf) + blockId, err := writeBlock(log, client, scratch, file, blacklist, blockSize, storageClass, block, buf) return blockId, false, err } @@ -266,61 +242,7 @@ func reconstructBlock( wantBuf.Grow(int(blockSize)) wantBytes := wantBuf.Bytes()[:blockSize] rs.RecoverInto(haveBlocksIxs, haveBlocks, uint8(blockToMigrateIx), wantBytes) - return writeBlock(log, client, scratchFile, blacklist, blockSize, storageClass, &blocks[blockToMigrateIx], bytes.NewReader(wantBytes)) -} - -type keepScratchFileAlive struct { - stopHeartbeat chan struct{} - heartbeatStopped chan struct{} -} - -func startToKeepScratchFileAlive( - log *Logger, - client *Client, - scratchFile *scratchFile, -) keepScratchFileAlive { - stopHeartbeat := make(chan struct{}) - heartbeatStopped := make(chan struct{}) - timerExpired := make(chan struct{}, 1) - go func() { - for { - if scratchFile.id != msgs.NULL_INODE_ID { - // bump the deadline, makes sure the file stays alive for - // the duration of this function - log.Debug("bumping deadline for scratch file %v", scratchFile.id) - req := msgs.AddInlineSpanReq{ - FileId: scratchFile.id, - Cookie: scratchFile.cookie, - StorageClass: msgs.EMPTY_STORAGE, - } - if err := client.ShardRequest(log, scratchFile.id.Shard(), &req, &msgs.AddInlineSpanResp{}); err != nil { - log.RaiseAlert("could not bump scratch file deadline when migrating blocks: %w", err) - } - } - go func() { - time.Sleep(time.Minute) - select { - case timerExpired <- struct{}{}: - default: - } - }() - select { - case <-stopHeartbeat: - heartbeatStopped <- struct{}{} - return - case <-timerExpired: - } - } - }() - return keepScratchFileAlive{ - stopHeartbeat: stopHeartbeat, - heartbeatStopped: heartbeatStopped, - } -} - -func (k *keepScratchFileAlive) stop() { - k.stopHeartbeat <- struct{}{} - <-k.heartbeatStopped + return writeBlock(log, client, scratchFile, fileId, blacklist, blockSize, storageClass, &blocks[blockToMigrateIx], bytes.NewReader(wantBytes)) } type timeStats struct { @@ -435,7 +357,7 @@ func migrateBlocksInFileInternal( } var err error var canRetry bool - newBlock, canRetry, err = copyBlock(log, client, bufPool, scratchFile, fileSpansResp.BlockServices, blacklist, body.CellSize*uint32(body.Stripes), span.Header.StorageClass, block) + newBlock, canRetry, err = copyBlock(log, client, bufPool, scratchFile, fileId, fileSpansResp.BlockServices, blacklist, body.CellSize*uint32(body.Stripes), span.Header.StorageClass, block) if err != nil && !canRetry { return err } diff --git a/go/lib/scratch.go b/go/lib/scratch.go new file mode 100644 index 00000000..e24ca1d0 --- /dev/null +++ b/go/lib/scratch.go @@ -0,0 +1,89 @@ +package lib + +import ( + "time" + "xtx/eggsfs/msgs" +) + +type scratchFile struct { + id msgs.InodeId + cookie [8]byte + size uint64 +} + +func ensureScratchFile(log *Logger, client *Client, shard msgs.ShardId, file *scratchFile) error { + if file.id != msgs.NULL_INODE_ID { + return nil + } + resp := msgs.ConstructFileResp{} + err := client.ShardRequest( + log, + shard, + &msgs.ConstructFileReq{ + Type: msgs.FILE, + Note: "migrate", + }, + &resp, + ) + if err != nil { + return err + } + file.id = resp.Id + file.cookie = resp.Cookie + file.size = 0 + return nil +} + +type keepScratchFileAlive struct { + stopHeartbeat chan struct{} + heartbeatStopped chan struct{} +} + +func startToKeepScratchFileAlive( + log *Logger, + client *Client, + scratchFile *scratchFile, +) keepScratchFileAlive { + stopHeartbeat := make(chan struct{}) + heartbeatStopped := make(chan struct{}) + timerExpired := make(chan struct{}, 1) + go func() { + for { + if scratchFile.id != msgs.NULL_INODE_ID { + // bump the deadline, makes sure the file stays alive for + // the duration of this function + log.Debug("bumping deadline for scratch file %v", scratchFile.id) + req := msgs.AddInlineSpanReq{ + FileId: scratchFile.id, + Cookie: scratchFile.cookie, + StorageClass: msgs.EMPTY_STORAGE, + } + if err := client.ShardRequest(log, scratchFile.id.Shard(), &req, &msgs.AddInlineSpanResp{}); err != nil { + log.RaiseAlert("could not bump scratch file deadline when migrating blocks: %w", err) + } + } + go func() { + time.Sleep(time.Minute) + select { + case timerExpired <- struct{}{}: + default: + } + }() + select { + case <-stopHeartbeat: + heartbeatStopped <- struct{}{} + return + case <-timerExpired: + } + } + }() + return keepScratchFileAlive{ + stopHeartbeat: stopHeartbeat, + heartbeatStopped: heartbeatStopped, + } +} + +func (k *keepScratchFileAlive) stop() { + k.stopHeartbeat <- struct{}{} + <-k.heartbeatStopped +} diff --git a/go/msgs/msgs.go b/go/msgs/msgs.go index a5c44bf2..0823feb4 100644 --- a/go/msgs/msgs.go +++ b/go/msgs/msgs.go @@ -551,6 +551,20 @@ type AddSpanInitiateResp struct { Blocks []BlockInfo } +// The only reason why this isn't the same as AddSpanInitiateReq is +// that we added it after we had deployed many clients. +type AddSpanInitiateWithReferenceReq struct { + Req AddSpanInitiateReq + // What inode to use to compute what block services to pick. If NULL, FileId + // will be used. This is useful when migrating or defragmenting, where we're + // adding a span to a file but we plan to merge it into another file. + Reference InodeId +} + +type AddSpanInitiateWithReferenceResp struct { + Resp AddSpanInitiateResp +} + type BlockProof struct { BlockId BlockId Proof [8]byte diff --git a/go/msgs/msgs_bincode.go b/go/msgs/msgs_bincode.go index b99a3a45..4c4b666b 100644 --- a/go/msgs/msgs_bincode.go +++ b/go/msgs/msgs_bincode.go @@ -360,6 +360,8 @@ func (k ShardMessageKind) String() string { return "BLOCK_SERVICE_FILES" case 122: return "REMOVE_INODE" + case 124: + return "ADD_SPAN_INITIATE_WITH_REFERENCE" case 128: return "CREATE_DIRECTORY_INODE" case 129: @@ -410,6 +412,7 @@ const ( SWAP_BLOCKS ShardMessageKind = 0x78 BLOCK_SERVICE_FILES ShardMessageKind = 0x79 REMOVE_INODE ShardMessageKind = 0x7A + ADD_SPAN_INITIATE_WITH_REFERENCE ShardMessageKind = 0x7C CREATE_DIRECTORY_INODE ShardMessageKind = 0x80 SET_DIRECTORY_OWNER ShardMessageKind = 0x81 REMOVE_DIRECTORY_OWNER ShardMessageKind = 0x89 @@ -448,6 +451,7 @@ var AllShardMessageKind = [...]ShardMessageKind{ SWAP_BLOCKS, BLOCK_SERVICE_FILES, REMOVE_INODE, + ADD_SPAN_INITIATE_WITH_REFERENCE, CREATE_DIRECTORY_INODE, SET_DIRECTORY_OWNER, REMOVE_DIRECTORY_OWNER, @@ -516,6 +520,8 @@ func MkShardMessage(k string) (ShardRequest, ShardResponse, error) { return &BlockServiceFilesReq{}, &BlockServiceFilesResp{}, nil case k == "REMOVE_INODE": return &RemoveInodeReq{}, &RemoveInodeResp{}, nil + case k == "ADD_SPAN_INITIATE_WITH_REFERENCE": + return &AddSpanInitiateWithReferenceReq{}, &AddSpanInitiateWithReferenceResp{}, nil case k == "CREATE_DIRECTORY_INODE": return &CreateDirectoryInodeReq{}, &CreateDirectoryInodeResp{}, nil case k == "SET_DIRECTORY_OWNER": @@ -2342,6 +2348,48 @@ func (v *RemoveInodeResp) Unpack(r io.Reader) error { return nil } +func (v *AddSpanInitiateWithReferenceReq) ShardRequestKind() ShardMessageKind { + return ADD_SPAN_INITIATE_WITH_REFERENCE +} + +func (v *AddSpanInitiateWithReferenceReq) Pack(w io.Writer) error { + if err := v.Req.Pack(w); err != nil { + return err + } + if err := bincode.PackScalar(w, uint64(v.Reference)); err != nil { + return err + } + return nil +} + +func (v *AddSpanInitiateWithReferenceReq) Unpack(r io.Reader) error { + if err := v.Req.Unpack(r); err != nil { + return err + } + if err := bincode.UnpackScalar(r, (*uint64)(&v.Reference)); err != nil { + return err + } + return nil +} + +func (v *AddSpanInitiateWithReferenceResp) ShardResponseKind() ShardMessageKind { + return ADD_SPAN_INITIATE_WITH_REFERENCE +} + +func (v *AddSpanInitiateWithReferenceResp) Pack(w io.Writer) error { + if err := v.Resp.Pack(w); err != nil { + return err + } + return nil +} + +func (v *AddSpanInitiateWithReferenceResp) Unpack(r io.Reader) error { + if err := v.Resp.Unpack(r); err != nil { + return err + } + return nil +} + func (v *CreateDirectoryInodeReq) ShardRequestKind() ShardMessageKind { return CREATE_DIRECTORY_INODE }