From 43296979d8b5e4dca88d1301df083ce5e822d82d Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Tue, 10 Dec 2019 15:15:52 +0100 Subject: [PATCH] Fixed rotation matrices in touch Fixed rotation matrices in the touchmodule, while at it I also moved out the lev-marq solver from the touchinteraction.cpp to its own file, in an effort to make the code lighter to read. Also changed some logic in how touch intersection is made with the scenenodes. --- modules/touch/CMakeLists.txt | 2 + modules/touch/include/directinputsolver.h | 71 +++++ modules/touch/include/touchinteraction.h | 28 +- modules/touch/src/directinputsolver.cpp | 271 ++++++++++++++++++ modules/touch/src/touchinteraction.cpp | 333 ++++------------------ modules/touch/src/win32_touch.cpp | 4 +- modules/touch/touchmodule.cpp | 2 +- 7 files changed, 402 insertions(+), 309 deletions(-) create mode 100644 modules/touch/include/directinputsolver.h create mode 100644 modules/touch/src/directinputsolver.cpp diff --git a/modules/touch/CMakeLists.txt b/modules/touch/CMakeLists.txt index 07190880d5..d0b7ec5289 100644 --- a/modules/touch/CMakeLists.txt +++ b/modules/touch/CMakeLists.txt @@ -26,6 +26,7 @@ include(${OPENSPACE_CMAKE_EXT_DIR}/module_definition.cmake) set(HEADER_FILES ${CMAKE_CURRENT_SOURCE_DIR}/ext/levmarq.h + ${CMAKE_CURRENT_SOURCE_DIR}/include/directinputsolver.h ${CMAKE_CURRENT_SOURCE_DIR}/include/tuioear.h ${CMAKE_CURRENT_SOURCE_DIR}/include/touchinteraction.h ${CMAKE_CURRENT_SOURCE_DIR}/include/touchmarker.h @@ -35,6 +36,7 @@ source_group("Header Files" FILES ${HEADER_FILES}) set(SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/ext/levmarq.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/directinputsolver.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/tuioear.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/touchinteraction.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/touchmarker.cpp diff --git a/modules/touch/include/directinputsolver.h b/modules/touch/include/directinputsolver.h new file mode 100644 index 0000000000..418f01a535 --- /dev/null +++ b/modules/touch/include/directinputsolver.h @@ -0,0 +1,71 @@ +/***************************************************************************************** + * * + * OpenSpace * + * * + * Copyright (c) 2014-2019 * + * * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this * + * software and associated documentation files (the "Software"), to deal in the Software * + * without restriction, including without limitation the rights to use, copy, modify, * + * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * + * permit persons to whom the Software is furnished to do so, subject to the following * + * conditions: * + * * + * The above copyright notice and this permission notice shall be included in all copies * + * or substantial portions of the Software. * + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, * + * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * + * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * + ****************************************************************************************/ + +#ifndef __OPENSPACE_MODULE_TOUCH___DIRECTINPUT_SOLVER___H__ +#define __OPENSPACE_MODULE_TOUCH___DIRECTINPUT_SOLVER___H__ + + +#include + +#include + +#include + + +namespace openspace { + +class Camera; +class SceneGraphNode; + +// Stores the selected node, the cursor ID as well as the surface coordinates the +// cursor touched +struct SelectedBody { + long id; + SceneGraphNode* node; + glm::dvec3 coordinates; +}; + +class DirectInputSolver { +public: + DirectInputSolver(); + bool solve(const std::vector& list, + const std::vector& selectedBodies, + std::vector* calculatedValues, + const Camera& camera); + int getNDof() { return _nDof; } + + const LMstat& getLevMarqStat() { return _lmstat; } + void setLevMarqVerbosity(bool verbose) { _lmstat.verbose = verbose; } + +private: + int _nDof; + LMstat _lmstat; +}; + + + +} // openspace namespace + +#endif // __OPENSPACE_MODULE_TOUCH___DIRECTINPUT_SOLVER___H__ + diff --git a/modules/touch/include/touchinteraction.h b/modules/touch/include/touchinteraction.h index f8de436b4b..5cfddec46e 100644 --- a/modules/touch/include/touchinteraction.h +++ b/modules/touch/include/touchinteraction.h @@ -27,7 +27,7 @@ #include -#include +#include #include #include @@ -38,6 +38,8 @@ #include #include +#include + //#define TOUCH_DEBUG_PROPERTIES //#define TOUCH_DEBUG_NODE_PICK_MESSAGES @@ -79,27 +81,6 @@ public: glm::dvec2 pan; }; - // Stores the selected node, the cursor ID as well as the surface coordinates the - // cursor touched - struct SelectedBody { - long id; - SceneGraphNode* node; - glm::dvec3 coordinates; - }; - - // Used in the LM algorithm - struct FunctionData { - std::vector selectedPoints; - std::vector screenPoints; - int nDOF; - glm::dvec2(*castToNDC)(const glm::dvec3&, Camera&, SceneGraphNode*); - double(*distToMinimize)(double* par, int x, void* fdata, LMstat* lmstat); - Camera* camera; - SceneGraphNode* node; - LMstat stats; - double objectScreenRadius; - }; - /* Main function call * 1 Checks if doubleTap occured * 2 Goes through the guiMode() function @@ -258,7 +239,8 @@ private: bool _guiON; std::vector _selected; SceneGraphNode* _pickingSelected = nullptr; - LMstat _lmstat; + std::unique_ptr _solver; + glm::dquat _toSlerp; glm::dvec3 _centroid; diff --git a/modules/touch/src/directinputsolver.cpp b/modules/touch/src/directinputsolver.cpp new file mode 100644 index 0000000000..43624e6af6 --- /dev/null +++ b/modules/touch/src/directinputsolver.cpp @@ -0,0 +1,271 @@ +/***************************************************************************************** + * * + * OpenSpace * + * * + * Copyright (c) 2014-2019 * + * * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this * + * software and associated documentation files (the "Software"), to deal in the Software * + * without restriction, including without limitation the rights to use, copy, modify, * + * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * + * permit persons to whom the Software is furnished to do so, subject to the following * + * conditions: * + * * + * The above copyright notice and this permission notice shall be included in all copies * + * or substantial portions of the Software. * + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, * + * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * + * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * + ****************************************************************************************/ + +#include + + +#include +#include + + +namespace openspace { + +// Used in the LM algorithm +struct FunctionData { + std::vector selectedPoints; + std::vector screenPoints; + int nDOF; + const Camera* camera; + SceneGraphNode* node; + LMstat stats; +}; + + +DirectInputSolver::DirectInputSolver() + : _nDof(0) +{ + levmarq_init(&_lmstat); +} + +// project back a 3D point in model view to clip space [-1,1] coordinates on the view plane +glm::dvec2 castToNDC(const glm::dvec3& vec, Camera& camera, SceneGraphNode* node) { + glm::dvec3 posInCamSpace = glm::inverse(camera.rotationQuaternion()) * + (node->worldRotationMatrix() * vec + + (node->worldPosition() - camera.positionVec3())); + + glm::dvec4 clipspace = camera.projectionMatrix() * glm::dvec4(posInCamSpace, 1.0); + return (glm::dvec2(clipspace) / clipspace.w); +} + +// Returns the screen point s(xi,par) dependent the transform M(par) and object point xi +double distToMinimize(double* par, int x, void* fdata, LMstat* lmstat) { + FunctionData* ptr = reinterpret_cast(fdata); + + // Apply transform to camera and find the new screen point of the updated camera state + + // { vec2 globalRot, zoom, roll, vec2 localRot } + double q[6] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; + for (int i = 0; i < ptr->nDOF; ++i) { + q[i] = par[i]; + } + + using namespace glm; + // Create variables from current state + dvec3 camPos = ptr->camera->positionVec3(); + dvec3 centerPos = ptr->node->worldPosition(); + + dvec3 directionToCenter = normalize(centerPos - camPos); + dvec3 lookUp = ptr->camera->lookUpVectorWorldSpace(); + dvec3 camDirection = ptr->camera->viewDirectionWorldSpace(); + + // Make a representation of the rotation quaternion with local and global + // rotations + dmat4 lookAtMat = lookAt( + dvec3(0, 0, 0), + directionToCenter, + // To avoid problem with lookup in up direction + normalize(camDirection + lookUp)); + dquat globalCamRot = normalize(quat_cast(inverse(lookAtMat))); + dquat localCamRot = inverse(globalCamRot) * ptr->camera->rotationQuaternion(); + + { // Roll + dquat rollRot = angleAxis(q[3], dvec3(0.0, 0.0, 1.0)); + localCamRot = localCamRot * rollRot; + } + { // Panning (local rotation) + dvec3 eulerAngles(q[5], q[4], 0); + dquat panRot = dquat(eulerAngles); + localCamRot = localCamRot * panRot; + } + { // Orbit (global rotation) + dvec3 eulerAngles(q[1], q[0], 0); + dquat rotationDiffCamSpace = dquat(eulerAngles); + + dvec3 centerToCamera = camPos - centerPos; + + dquat rotationDiffWorldSpace = + globalCamRot * rotationDiffCamSpace * inverse(globalCamRot); + dvec3 rotationDiffVec3 = + centerToCamera * rotationDiffWorldSpace - centerToCamera; + camPos += rotationDiffVec3; + + centerToCamera = camPos - centerPos; + directionToCenter = normalize(-centerToCamera); + dvec3 lookUpWhenFacingCenter = + globalCamRot * dvec3(ptr->camera->lookUpVectorCameraSpace()); + lookAtMat = lookAt( + dvec3(0, 0, 0), + directionToCenter, + lookUpWhenFacingCenter); + globalCamRot = normalize(quat_cast(inverse(lookAtMat))); + } + { // Zooming + camPos += directionToCenter * q[2]; + } + // Update the camera state + Camera cam = *(ptr->camera); + cam.setPositionVec3(camPos); + cam.setRotation(globalCamRot * localCamRot); + + // we now have a new position and orientation of camera, project surfacePoint to + // the new screen to get distance to minimize + glm::dvec2 newScreenPoint = castToNDC( + ptr->selectedPoints.at(x), + cam, + ptr->node + ); + lmstat->pos.push_back(newScreenPoint); + return glm::length(ptr->screenPoints.at(x) - newScreenPoint); +} + +// Gradient of distToMinimize w.r.t par (using forward difference) +void gradient(double* g, double* par, int x, void* fdata, LMstat* lmstat) { + FunctionData* ptr = reinterpret_cast(fdata); + double f0 = distToMinimize(par, x, fdata, lmstat); + // scale value to find minimum step size h, dependant on planet size + double scale = log10(ptr->node->boundingSphere()); + std::vector dPar(ptr->nDOF, 0.0); + dPar.assign(par, par + ptr->nDOF); + + for (int i = 0; i < ptr->nDOF; ++i) { + // Initial values + double h = 1e-8; + double lastG = 1; + dPar.at(i) += h; + double f1 = distToMinimize(dPar.data(), x, fdata, lmstat); + dPar.at(i) = par[i]; + // Iterative process to find the minimum step h that gives a good gradient + for (int j = 0; j < 100; ++j) { + if ((f1 - f0) != 0 && lastG == 0) { // found minimum step size h + // scale up to get a good initial guess value + h *= scale * scale * scale; + + // clamp min step size to a fraction of the incoming parameter + if (i == 2) { + double epsilon = 1e-3; + // make sure incoming parameter is larger than 0 + h = std::max(std::max(std::abs(dPar.at(i)), epsilon) * 0.001, h); + } + else if (ptr->nDOF == 2) { + h = std::max(std::abs(dPar.at(i)) * 0.001, h); + } + + // calculate f1 with good h for finite difference + dPar.at(i) += h; + f1 = distToMinimize(dPar.data(), x, fdata, lmstat); + dPar.at(i) = par[i]; + break; + } + else if ((f1 - f0) != 0 && lastG != 0) { // h too big + h /= scale; + } + else if ((f1 - f0) == 0) { // h too small + h *= scale; + } + lastG = f1 - f0; + dPar.at(i) += h; + f1 = distToMinimize(dPar.data(), x, fdata, lmstat); + dPar.at(i) = par[i]; + } + g[i] = (f1 - f0) / h; + } + if (ptr->nDOF == 2) { + // normalize on 1 finger case to allow for horizontal/vertical movement + for (int i = 0; i < 2; ++i) { + g[i] = g[i] / std::abs(g[i]); + } + } + else if (ptr->nDOF == 6) { + for (int i = 0; i < ptr->nDOF; ++i) { + // lock to only pan and zoom on 3 finger case, no roll/orbit + g[i] = (i == 2) ? g[i] : g[i] / std::abs(g[i]); + } + } +} + +bool DirectInputSolver::solve(const std::vector& list, + const std::vector& selectedBodies, + std::vector *parameters, + const Camera &camera) +{ + + int nFingers = std::min(static_cast(list.size()), 3); + _nDof = std::min(nFingers * 2, 6); + + // Parse input data to be used in the LM algorithm + std::vector selectedPoints; + std::vector screenPoints; + + for (int i = 0; i < nFingers; ++i) { + const SelectedBody& sb = selectedBodies.at(i); + selectedPoints.push_back(sb.coordinates); + screenPoints.emplace_back(2 * (list[i].getX() - 0.5), -2 * (list[i].getY() - 0.5)); + + // This might be needed when we're directing the touchtable from another screen? + // std::vector::const_iterator c = std::find_if( + // list.begin(), + // list.end(), + // [&sb](const TuioCursor& c) { return c.getSessionID() == sb.id; } + // ); + // if (c != list.end()) { + // // normalized -1 to 1 coordinates on screen + // screenPoints.emplace_back(2 * (c->getX() - 0.5), -2 * (c->getY() - 0.5)); + // } + // else { + // global::moduleEngine.module()->touchInput = { + // true, + // glm::dvec2(0.0, 0.0), + // 1 + // }; + // resetAfterInput(); + // return; + // } + } + + FunctionData fData = { + selectedPoints, + screenPoints, + _nDof, + &camera, + selectedBodies.at(0).node, + _lmstat + }; + void* dataPtr = reinterpret_cast(&fData); + + bool result = levmarq( + _nDof, + parameters->data(), + static_cast(screenPoints.size()), + nullptr, + distToMinimize, + gradient, + dataPtr, + &_lmstat + ); + + return result; +} + +} // openspace namespace + diff --git a/modules/touch/src/touchinteraction.cpp b/modules/touch/src/touchinteraction.cpp index 1653b13248..f9600b9e6f 100644 --- a/modules/touch/src/touchinteraction.cpp +++ b/modules/touch/src/touchinteraction.cpp @@ -23,7 +23,9 @@ ****************************************************************************************/ #include + #include +#include #include #include @@ -320,6 +322,7 @@ TouchInteraction::TouchInteraction() , _zoomOutTap(false) , _lmSuccess(true) , _guiON(false) + , _solver(new DirectInputSolver()) #ifdef TOUCH_DEBUG_PROPERTIES , _debugProperties() #endif @@ -369,11 +372,8 @@ TouchInteraction::TouchInteraction() } }); - levmarq_init(&_lmstat); - _time.initSession(); } - // Called each frame if there is any input void TouchInteraction::updateStateFromInput(const std::vector& list, std::vector& lastProcessed) @@ -513,231 +513,21 @@ void TouchInteraction::directControl(const std::vector& list) { #ifdef TOUCH_DEBUG_PROPERTIES LINFO("DirectControl"); #endif - // Returns the screen point s(xi,par) dependent the transform M(par) and object - // point xi - auto distToMinimize = [](double* par, int x, void* fdata, LMstat* lmstat) { - FunctionData* ptr = reinterpret_cast(fdata); - - // Apply transform to camera and find the new screen point of the updated camera - // state - - // { vec2 globalRot, zoom, roll, vec2 localRot } - double q[6] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; - for (int i = 0; i < ptr->nDOF; ++i) { - q[i] = par[i]; - } - - using namespace glm; - // Create variables from current state - dvec3 camPos = ptr->camera->positionVec3(); - dvec3 centerPos = ptr->node->worldPosition(); - - dvec3 directionToCenter = normalize(centerPos - camPos); - dvec3 lookUp = ptr->camera->lookUpVectorWorldSpace(); - dvec3 camDirection = ptr->camera->viewDirectionWorldSpace(); - - // Make a representation of the rotation quaternion with local and global - // rotations - dmat4 lookAtMat = lookAt( - dvec3(0, 0, 0), - directionToCenter, - // To avoid problem with lookup in up direction - normalize(camDirection + lookUp)); - dquat globalCamRot = normalize(quat_cast(inverse(lookAtMat))); - dquat localCamRot = inverse(globalCamRot) * ptr->camera->rotationQuaternion(); - - { // Roll - dquat rollRot = angleAxis(q[3], dvec3(0.0, 0.0, 1.0)); - localCamRot = localCamRot * rollRot; - } - { // Panning (local rotation) - dvec3 eulerAngles(q[5], q[4], 0); - dquat panRot = dquat(eulerAngles); - localCamRot = localCamRot * panRot; - } - { // Orbit (global rotation) - dvec3 eulerAngles(q[1], q[0], 0); - dquat rotationDiffCamSpace = dquat(eulerAngles); - - dvec3 centerToCamera = camPos - centerPos; - - dquat rotationDiffWorldSpace = - globalCamRot * rotationDiffCamSpace * inverse(globalCamRot); - dvec3 rotationDiffVec3 = - centerToCamera * rotationDiffWorldSpace - centerToCamera; - camPos += rotationDiffVec3; - - centerToCamera = camPos - centerPos; - directionToCenter = normalize(-centerToCamera); - dvec3 lookUpWhenFacingCenter = - globalCamRot * dvec3(ptr->camera->lookUpVectorCameraSpace()); - lookAtMat = lookAt( - dvec3(0, 0, 0), - directionToCenter, - lookUpWhenFacingCenter); - globalCamRot = normalize(quat_cast(inverse(lookAtMat))); - } - { // Zooming - camPos += directionToCenter * q[2]; - } - // Update the camera state - Camera cam = *(ptr->camera); - cam.setPositionVec3(camPos); - cam.setRotation(globalCamRot * localCamRot); - - // we now have a new position and orientation of camera, project surfacePoint to - // the new screen to get distance to minimize - glm::dvec2 newScreenPoint = ptr->castToNDC( - ptr->selectedPoints.at(x), - cam, - ptr->node - ); - lmstat->pos.push_back(newScreenPoint); - return glm::length(ptr->screenPoints.at(x) - newScreenPoint); - }; - // Gradient of distToMinimize w.r.t par (using forward difference) - auto gradient = [](double* g, double* par, int x, void* fdata, LMstat* lmstat) { - FunctionData* ptr = reinterpret_cast(fdata); - double h, lastG, f1, f0 = ptr->distToMinimize(par, x, fdata, lmstat); - // scale value to find minimum step size h, dependant on planet size - double scale = log10(ptr->node->boundingSphere()); - std::vector dPar(ptr->nDOF, 0.0); - dPar.assign(par, par + ptr->nDOF); - - for (int i = 0; i < ptr->nDOF; ++i) { - // Initial values - h = 1e-8; - lastG = 1; - dPar.at(i) += h; - f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat); - dPar.at(i) = par[i]; - // Iterative process to find the minimum step h that gives a good gradient - for (int j = 0; j < 100; ++j) { - if ((f1 - f0) != 0 && lastG == 0) { // found minimum step size h - // scale up to get a good initial guess value - h *= scale * scale * scale; - - // clamp min step size to a fraction of the incoming parameter - if (i == 2) { - double epsilon = 1e-3; - // make sure incoming parameter is larger than 0 - h = std::max(std::max(std::abs(dPar.at(i)), epsilon) * 0.001, h); - } - else if (ptr->nDOF == 2) { - h = std::max(std::abs(dPar.at(i)) * 0.001, h); - } - - // calculate f1 with good h for finite difference - dPar.at(i) += h; - f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat); - dPar.at(i) = par[i]; - break; - } - else if ((f1 - f0) != 0 && lastG != 0) { // h too big - h /= scale; - } - else if ((f1 - f0) == 0) { // h too small - h *= scale; - } - lastG = f1 - f0; - dPar.at(i) += h; - f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat); - dPar.at(i) = par[i]; - } - g[i] = (f1 - f0) / h; - } - if (ptr->nDOF == 2) { - // normalize on 1 finger case to allow for horizontal/vertical movement - for (int i = 0; i < 2; ++i) { - g[i] = g[i]/std::abs(g[i]); - } - } - else if (ptr->nDOF == 6) { - for (int i = 0; i < ptr->nDOF; ++i) { - // lock to only pan and zoom on 3 finger case, no roll/orbit - g[i] = (i == 2) ? g[i] : g[i] / std::abs(g[i]); - } - } - }; - - // project back a 3D point in model view to clip space [-1,1] coordinates on the view - // plane - auto castToNDC = [](const glm::dvec3& vec, Camera& camera, SceneGraphNode* node) { - glm::dvec3 posInCamSpace = glm::inverse(camera.rotationQuaternion()) * - (node->rotationMatrix() * vec + - (node->worldPosition() - camera.positionVec3())); - - glm::dvec4 clipspace = camera.projectionMatrix() * glm::dvec4(posInCamSpace, 1.0); - return (glm::dvec2(clipspace) / clipspace.w); - }; - - // only send in first three fingers (to make it easier for LMA to converge on 3+ - // finger case with only zoom/pan) - int nFingers = std::min(static_cast(list.size()), 3); - int nDOF = std::min(nFingers * 2, 6); - std::vector par(nDOF, 0.0); - par.at(0) = _lastVel.orbit.x; // use _lastVel for orbit - par.at(1) = _lastVel.orbit.y; - - // Parse input data to be used in the LM algorithm - std::vector selectedPoints; - std::vector screenPoints; - for (int i = 0; i < nFingers; ++i) { - const SelectedBody& sb = _selected.at(i); - selectedPoints.push_back(sb.coordinates); - - std::vector::const_iterator c = std::find_if( - list.begin(), - list.end(), - [&sb](const TuioCursor& c) { return c.getSessionID() == sb.id; } - ); - if (c != list.end()) { - // normalized -1 to 1 coordinates on screen - screenPoints.emplace_back(2 * (c->getX() - 0.5), -2 * (c->getY() - 0.5)); - } - else { - global::moduleEngine.module()->touchInput = { - true, - glm::dvec2(0.0, 0.0), - 1 - }; - resetAfterInput(); - return; - } - } - - FunctionData fData = { - selectedPoints, - screenPoints, - nDOF, - castToNDC, - distToMinimize, - _camera, - _selected.at(0).node, - _lmstat, - _currentRadius - }; - void* dataPtr = reinterpret_cast(&fData); // finds best transform values for the new camera state and stores them in par - _lmSuccess = levmarq( - nDOF, - par.data(), - static_cast(screenPoints.size()), - nullptr, - distToMinimize, - gradient, - dataPtr, - &_lmstat - ); + std::vector par(6, 0.0); + par.at(0) = _lastVel.orbit.x; // use _lastVel for orbit + par.at(1) = _lastVel.orbit.y; + _lmSuccess = _solver->solve(list, _selected, &par, *_camera); + int nDof = _solver->getNDof(); if (_lmSuccess && !_unitTest) { // if good values were found set new camera state _vel.orbit = glm::dvec2(par.at(0), par.at(1)); - if (nDOF > 2) { + if (nDof > 2) { _vel.zoom = par.at(2); _vel.roll = par.at(3); - if (_panEnabled && nDOF > 4) { + if (_panEnabled && nDof > 4) { _vel.roll = 0.0; _vel.pan = glm::dvec2(par.at(4), par.at(5)); } @@ -785,19 +575,17 @@ void TouchInteraction::findSelectedNode(const std::vector& list) { glm::dquat camToWorldSpace = _camera->rotationQuaternion(); glm::dvec3 camPos = _camera->positionVec3(); std::vector newSelected; - - struct PickingInfo { - SceneGraphNode* node; - double pickingDistanceNDC; - double pickingDistanceWorld; + + //node & distance + std::tuple currentlyPicked = { + nullptr, + std::numeric_limits::max() }; - std::vector pickingInfo; - + for (const TuioCursor& c : list) { double xCo = 2 * (c.getX() - 0.5); double yCo = -2 * (c.getY() - 0.5); // normalized -1 to 1 coordinates on screen - // vec3(projectionmatrix * clipspace), divide with w? glm::dvec3 cursorInWorldSpace = camToWorldSpace * glm::dvec3(glm::inverse(_camera->projectionMatrix()) * glm::dvec4(xCo, yCo, -1.0, 1.0)); @@ -808,20 +596,12 @@ void TouchInteraction::findSelectedNode(const std::vector& list) { for (SceneGraphNode* node : selectableNodes) { double boundingSphere = node->boundingSphere(); glm::dvec3 camToSelectable = node->worldPosition() - camPos; - double dist = length(glm::cross(cursorInWorldSpace, camToSelectable)) / - glm::length(cursorInWorldSpace) - boundingSphere; - if (dist <= 0.0) { - // finds intersection closest point between boundingsphere and line in - // world coordinates, assumes line direction is normalized - double d = glm::dot(raytrace, camToSelectable); - double root = boundingSphere * boundingSphere - - glm::dot(camToSelectable, camToSelectable) + d * d; - if (root > 0) { // two intersection points (take the closest one) - d -= sqrt(root); - } - glm::dvec3 intersectionPoint = camPos + d * raytrace; - glm::dvec3 pointInModelView = glm::inverse(node->rotationMatrix()) * - (intersectionPoint - node->worldPosition()); + glm::dvec3 intersectionPos = {}; + glm::dvec3 intersectionNormal = {}; + bool intersected = glm::intersectRaySphere(camPos, raytrace, node->worldPosition(), boundingSphere, intersectionPos, intersectionNormal); + if (intersected) { + glm::dvec3 pointInModelView = glm::inverse(node->worldRotationMatrix()) * + (intersectionPos - node->worldPosition()); // Add id, node and surface coordinates to the selected list std::vector::iterator oldNode = std::find_if( @@ -830,9 +610,7 @@ void TouchInteraction::findSelectedNode(const std::vector& list) { [id](SelectedBody s) { return s.id == id; } ); if (oldNode != newSelected.end()) { - double oldNodeDist = glm::length( - oldNode->node->worldPosition() - camPos - ); + double oldNodeDist = glm::length(oldNode->node->worldPosition() - camPos); if (glm::length(camToSelectable) < oldNodeDist) { // new node is closer, remove added node and add the new one // instead @@ -859,56 +637,44 @@ void TouchInteraction::findSelectedNode(const std::vector& list) { // We either want to select the object if it's bounding sphere as been // touched (checked by the first part of this loop above) or if the touch // point is within a minimum distance of the center - if (dist <= 0.0 || (ndcDist <= _pickingRadiusMinimum)) { - // If the user touched the planet directly, this is definitely the one - // they are interested in => minimum distance - if (dist <= 0.0) { + // If the user touched the planet directly, this is definitely the one + // they are interested in => minimum distance + if (intersected) { #ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES - LINFOC( - node->identifier(), - "Picking candidate based on direct touch" - ); + LINFOC( + node->identifier(), + "Picking candidate based on direct touch" + ); #endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES - pickingInfo.push_back({ - node, - -std::numeric_limits::max(), - -std::numeric_limits::max() - }); - } - else { - // The node was considered due to minimum picking distance radius + currentlyPicked = { + node, + -std::numeric_limits::max() + }; + } + else if (ndcDist <= _pickingRadiusMinimum) { + // The node was considered due to minimum picking distance radius #ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES - LINFOC( - node->identifier(), - "Picking candidate based on proximity" - ); + LINFOC( + node->identifier(), + "Picking candidate based on proximity" + ); #endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES - pickingInfo.push_back({ + double dist = length(intersectionPos - camPos); + if (dist < std::get<1>(currentlyPicked)) { + currentlyPicked = { node, - ndcDist, dist - }); + }; } } } } } - - // After we are done with all of the nodes, we can sort the picking list and pick the - // one that fits best (= is closest or was touched directly) - std::sort( - pickingInfo.begin(), - pickingInfo.end(), - [](const PickingInfo& lhs, const PickingInfo& rhs) { - return lhs.pickingDistanceWorld < rhs.pickingDistanceWorld; - } - ); - // If an item has been picked, it's in the first position of the vector now - if (!pickingInfo.empty()) { - _pickingSelected = pickingInfo.begin()->node; + if (SceneGraphNode* node = std::get<0>(currentlyPicked)) { + _pickingSelected = node; #ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES LINFOC("Picking", "Picked node: " + _pickingSelected->identifier()); #endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES @@ -1447,7 +1213,7 @@ void TouchInteraction::step(double dt) { void TouchInteraction::unitTest() { if (_unitTest) { - _lmstat.verbose = true; + _solver->setLevMarqVerbosity(true); // set _selected pos and new pos (on screen) std::vector lastFrame = { @@ -1468,7 +1234,7 @@ void TouchInteraction::unitTest() { snprintf(buffer, sizeof(char) * 32, "lmdata%i.csv", _numOfTests); _numOfTests++; std::ofstream file(buffer); - file << _lmstat.data; + file << _solver->getLevMarqStat().data; // clear everything _selected.clear(); @@ -1480,6 +1246,7 @@ void TouchInteraction::unitTest() { _lastVel = _vel; _unitTest = false; + _solver->setLevMarqVerbosity(false); // could be the camera copy in func } } diff --git a/modules/touch/src/win32_touch.cpp b/modules/touch/src/win32_touch.cpp index 8d248d3a63..32331c8bb3 100644 --- a/modules/touch/src/win32_touch.cpp +++ b/modules/touch/src/win32_touch.cpp @@ -69,8 +69,8 @@ LRESULT CALLBACK HookCallback(int nCode, WPARAM wParam, LPARAM lParam) { // native touch to screen conversion ScreenToClient(pStruct->hwnd, reinterpret_cast(&p)); - float xPos = (float)p.x / (float)(rect.right - rect.left); - float yPos = (float)p.y / (float)(rect.bottom - rect.top); + float xPos = static_cast(p.x) / static_cast(rect.right - rect.left); + float yPos = static_cast(p.y) / static_cast(rect.bottom - rect.top); if (pointerInfo.pointerFlags & POINTER_FLAG_DOWN) { // Handle new touchpoint gTuioServer->initFrame(TUIO::TuioTime::getSessionTime()); diff --git a/modules/touch/touchmodule.cpp b/modules/touch/touchmodule.cpp index a47e2fe4d2..f968095a44 100644 --- a/modules/touch/touchmodule.cpp +++ b/modules/touch/touchmodule.cpp @@ -164,7 +164,7 @@ TouchModule::TouchModule() if (nativeWindowHandle) { _win32TouchHook.reset(new Win32TouchHook(nativeWindowHandle)); } -#endif //WIN32 +#endif }); global::callback::deinitializeGL.push_back([&]() {