Remove unnecessary template specification for lock_guard

This commit is contained in:
Alexander Bock
2021-07-30 14:48:08 +02:00
parent f6d74870bb
commit 5b3a66156c
11 changed files with 24 additions and 24 deletions
@@ -36,7 +36,7 @@ template<typename P>
void ConcurrentJobManager<P>::enqueueJob(std::shared_ptr<Job<P>> job) {
threadPool.enqueue([this, job]() {
job->execute();
std::lock_guard<std::mutex> lock(_finishedJobsMutex);
std::lock_guard lock(_finishedJobsMutex);
_finishedJobs.push(job);
});
}
@@ -50,7 +50,7 @@ template<typename P>
std::shared_ptr<Job<P>> ConcurrentJobManager<P>::popFinishedJob() {
ghoul_assert(!_finishedJobs.empty(), "There is no finished job to pop!");
std::lock_guard<std::mutex> lock(_finishedJobsMutex);
std::lock_guard lock(_finishedJobsMutex);
return _finishedJobs.pop();
}
+1 -1
View File
@@ -228,7 +228,7 @@ bool HttpSynchronization::trySyncFromUrl(std::string listUrl) {
return !_shouldCancel;
}
std::lock_guard<std::mutex> guard(sizeDataMutex);
std::lock_guard guard(sizeDataMutex);
sizeData[line] = { p.totalBytesKnown, p.totalBytes, p.downloadedBytes };
+1 -1
View File
@@ -192,7 +192,7 @@ void UrlSynchronization::start() {
&startedAllDownloads, &nDownloads](HttpRequest::Progress p)
{
if (p.totalBytesKnown) {
std::lock_guard<std::mutex> guard(fileSizeMutex);
std::lock_guard guard(fileSizeMutex);
fileSizes[url] = p.totalBytes;
if (!_nTotalBytesKnown && startedAllDownloads &&
+4 -4
View File
@@ -199,7 +199,7 @@ void ParallelPeer::sendAuthentication() {
}
void ParallelPeer::queueInMessage(const ParallelConnection::Message& message) {
std::lock_guard<std::mutex> unqlock(_receiveBufferMutex);
std::lock_guard unqlock(_receiveBufferMutex);
_receiveBuffer.push_back(message);
}
@@ -221,7 +221,7 @@ void ParallelPeer::handleMessage(const ParallelConnection::Message& message) {
}
void ParallelPeer::analyzeTimeDifference(double messageTimestamp) {
std::lock_guard<std::mutex> latencyLock(_latencyMutex);
std::lock_guard latencyLock(_latencyMutex);
const double timeDiff = global::windowDelegate->applicationTime() - messageTimestamp;
if (_latencyDiffs.empty()) {
@@ -235,7 +235,7 @@ void ParallelPeer::analyzeTimeDifference(double messageTimestamp) {
}
double ParallelPeer::convertTimestamp(double messageTimestamp) {
std::lock_guard<std::mutex> latencyLock(_latencyMutex);
std::lock_guard latencyLock(_latencyMutex);
return messageTimestamp + _initialTimeDiff + _bufferTime;
}
@@ -495,7 +495,7 @@ void ParallelPeer::sendScript(std::string script) {
void ParallelPeer::resetTimeOffset() {
global::navigationHandler->keyframeNavigator().clearKeyframes();
global::timeManager->clearKeyframes();
std::lock_guard<std::mutex> latencyLock(_latencyMutex);
std::lock_guard latencyLock(_latencyMutex);
_latencyDiffs.clear();
}
+4 -4
View File
@@ -283,7 +283,7 @@ void LoadingScreen::render() {
glm::vec2 messageLl = glm::vec2(0.f);
glm::vec2 messageUr = glm::vec2(0.f);
if (_showMessage) {
std::lock_guard<std::mutex> guard(_messageMutex);
std::lock_guard guard(_messageMutex);
const glm::vec2 bboxMessage = _messageFont->boundingBox(_message);
@@ -298,7 +298,7 @@ void LoadingScreen::render() {
}
if (_showNodeNames) {
std::lock_guard<std::mutex> guard(_itemsMutex);
std::lock_guard guard(_itemsMutex);
std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
@@ -485,7 +485,7 @@ void LoadingScreen::render() {
}
void LoadingScreen::postMessage(std::string message) {
std::lock_guard<std::mutex> guard(_messageMutex);
std::lock_guard guard(_messageMutex);
_message = std::move(message);
}
@@ -535,7 +535,7 @@ void LoadingScreen::updateItem(const std::string& itemIdentifier,
// also would create any of the text information
return;
}
std::lock_guard<std::mutex> guard(_itemsMutex);
std::lock_guard guard(_itemsMutex);
auto it = std::find_if(
_items.begin(),
+4 -4
View File
@@ -66,7 +66,7 @@ void MultiThreadedSceneInitializer::initializeNode(SceneGraphNode* node) {
}
node->initialize();
std::lock_guard<std::mutex> g(_mutex);
std::lock_guard g(_mutex);
_initializedNodes.push_back(node);
_initializingNodes.erase(node);
@@ -94,19 +94,19 @@ void MultiThreadedSceneInitializer::initializeNode(SceneGraphNode* node) {
);
}
std::lock_guard<std::mutex> g(_mutex);
std::lock_guard g(_mutex);
_initializingNodes.insert(node);
_threadPool.enqueue(initFunction);
}
std::vector<SceneGraphNode*> MultiThreadedSceneInitializer::takeInitializedNodes() {
std::lock_guard<std::mutex> g(_mutex);
std::lock_guard g(_mutex);
std::vector<SceneGraphNode*> nodes = std::move(_initializedNodes);
return nodes;
}
bool MultiThreadedSceneInitializer::isInitializing() const {
std::lock_guard<std::mutex> g(_mutex);
std::lock_guard g(_mutex);
return !_initializingNodes.empty();
}
+3 -3
View File
@@ -694,7 +694,7 @@ void ScriptEngine::preSync(bool isMaster) {
return;
}
std::lock_guard<std::mutex> guard(_slaveScriptsMutex);
std::lock_guard guard(_slaveScriptsMutex);
while (!_incomingScripts.empty()) {
QueueItem item = std::move(_incomingScripts.front());
_incomingScripts.pop();
@@ -728,7 +728,7 @@ void ScriptEngine::encode(SyncBuffer* syncBuffer) {
void ScriptEngine::decode(SyncBuffer* syncBuffer) {
ZoneScoped
std::lock_guard<std::mutex> guard(_slaveScriptsMutex);
std::lock_guard guard(_slaveScriptsMutex);
size_t nScripts;
syncBuffer->decode(nScripts);
@@ -757,7 +757,7 @@ void ScriptEngine::postSync(bool isMaster) {
}
}
else {
std::lock_guard<std::mutex> guard(_slaveScriptsMutex);
std::lock_guard guard(_slaveScriptsMutex);
while (!_slaveScriptQueue.empty()) {
try {
runScript(_slaveScriptQueue.front());
+1 -1
View File
@@ -258,7 +258,7 @@ const glm::mat4& Camera::SgctInternal::projectionMatrix() const {
const glm::mat4& Camera::SgctInternal::viewProjectionMatrix() const {
//if (_cachedViewProjectionMatrix.isDirty) {
std::lock_guard<std::mutex> _lock(_mutex);
std::lock_guard _lock(_mutex);
_cachedViewProjectionMatrix.datum = _projectionMatrix * _viewMatrix;
_cachedViewProjectionMatrix.isDirty = false;
//}
+2 -2
View File
@@ -245,7 +245,7 @@ AsyncHttpDownload::AsyncHttpDownload(AsyncHttpDownload&& d)
{}
void AsyncHttpDownload::start(HttpRequest::RequestOptions opt) {
std::lock_guard<std::mutex> guard(_stateChangeMutex);
std::lock_guard guard(_stateChangeMutex);
if (hasStarted()) {
return;
}
@@ -286,7 +286,7 @@ void AsyncHttpDownload::download(HttpRequest::RequestOptions opt) {
_httpRequest.onProgress([this](HttpRequest::Progress p) {
// Return a non-zero value to cancel download
// if onProgress returns false.
//std::lock_guard<std::mutex> guard(_mutex);
//std::lock_guard guard(_mutex);
const bool shouldContinue = callOnProgress(p);
if (!shouldContinue) {
return 1;
+1 -1
View File
@@ -41,7 +41,7 @@ SynchronizationWatcher::WatchHandle SynchronizationWatcher::watchSynchronization
[this, synchronization, watchHandle, cb = std::move(callback)]
(ResourceSynchronization::State state)
{
std::lock_guard<std::mutex> g(_mutex);
std::lock_guard g(_mutex);
_pendingNotifications.push_back({ synchronization, state, watchHandle, cb });
}
);