Merge pull request #1024 from OpenSpace/issue/1019

Issue/1019 - Fixed rotation matrices in touch
This commit is contained in:
Alexander Bock
2019-12-16 10:12:02 +01:00
committed by GitHub
7 changed files with 454 additions and 328 deletions
+2
View File
@@ -26,6 +26,7 @@ include(${OPENSPACE_CMAKE_EXT_DIR}/module_definition.cmake)
set(HEADER_FILES
${CMAKE_CURRENT_SOURCE_DIR}/ext/levmarq.h
${CMAKE_CURRENT_SOURCE_DIR}/include/directinputsolver.h
${CMAKE_CURRENT_SOURCE_DIR}/include/tuioear.h
${CMAKE_CURRENT_SOURCE_DIR}/include/touchinteraction.h
${CMAKE_CURRENT_SOURCE_DIR}/include/touchmarker.h
@@ -35,6 +36,7 @@ source_group("Header Files" FILES ${HEADER_FILES})
set(SOURCE_FILES
${CMAKE_CURRENT_SOURCE_DIR}/ext/levmarq.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/directinputsolver.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/tuioear.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/touchinteraction.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/touchmarker.cpp
+65
View File
@@ -0,0 +1,65 @@
/*****************************************************************************************
* *
* OpenSpace *
* *
* Copyright (c) 2014-2019 *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy of this *
* software and associated documentation files (the "Software"), to deal in the Software *
* without restriction, including without limitation the rights to use, copy, modify, *
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to *
* permit persons to whom the Software is furnished to do so, subject to the following *
* conditions: *
* *
* The above copyright notice and this permission notice shall be included in all copies *
* or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A *
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT *
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF *
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE *
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
****************************************************************************************/
#ifndef __OPENSPACE_MODULE_TOUCH___DIRECTINPUT_SOLVER___H__
#define __OPENSPACE_MODULE_TOUCH___DIRECTINPUT_SOLVER___H__
#include <modules/touch/ext/levmarq.h>
#include <modules/touch/ext/libTUIO11/TUIO/TuioCursor.h>
#include <vector>
namespace openspace {
class Camera;
class SceneGraphNode;
class DirectInputSolver {
public:
// Stores the selected node, the cursor ID as well as the surface coordinates the
// cursor touched
struct SelectedBody {
long id;
SceneGraphNode* node;
glm::dvec3 coordinates;
};
DirectInputSolver();
bool solve(const std::vector<TUIO::TuioCursor>& list,
const std::vector<SelectedBody>& selectedBodies,
std::vector<double>* calculatedValues, const Camera& camera);
int getNDof() const;
const LMstat& getLevMarqStat();
void setLevMarqVerbosity(bool verbose);
private:
int _nDof = 0;
LMstat _lmstat;
};
} // openspace namespace
#endif // __OPENSPACE_MODULE_TOUCH___DIRECTINPUT_SOLVER___H__
+6 -24
View File
@@ -27,7 +27,7 @@
#include <openspace/properties/propertyowner.h>
#include <modules/touch/ext/levmarq.h>
#include <modules/touch/include/directinputsolver.h>
#include <modules/touch/include/tuioear.h>
#include <openspace/properties/scalar/boolproperty.h>
@@ -38,6 +38,8 @@
#include <openspace/properties/vector/ivec2property.h>
#include <openspace/properties/vector/vec4property.h>
#include <memory>
//#define TOUCH_DEBUG_PROPERTIES
//#define TOUCH_DEBUG_NODE_PICK_MESSAGES
@@ -79,27 +81,6 @@ public:
glm::dvec2 pan;
};
// Stores the selected node, the cursor ID as well as the surface coordinates the
// cursor touched
struct SelectedBody {
long id;
SceneGraphNode* node;
glm::dvec3 coordinates;
};
// Used in the LM algorithm
struct FunctionData {
std::vector<glm::dvec3> selectedPoints;
std::vector<glm::dvec2> screenPoints;
int nDOF;
glm::dvec2(*castToNDC)(const glm::dvec3&, Camera&, SceneGraphNode*);
double(*distToMinimize)(double* par, int x, void* fdata, LMstat* lmstat);
Camera* camera;
SceneGraphNode* node;
LMstat stats;
double objectScreenRadius;
};
/* Main function call
* 1 Checks if doubleTap occured
* 2 Goes through the guiMode() function
@@ -256,9 +237,10 @@ private:
bool _zoomOutTap;
bool _lmSuccess;
bool _guiON;
std::vector<SelectedBody> _selected;
std::vector<DirectInputSolver::SelectedBody> _selected;
SceneGraphNode* _pickingSelected = nullptr;
LMstat _lmstat;
DirectInputSolver _solver;
glm::dquat _toSlerp;
glm::dvec3 _centroid;
+281
View File
@@ -0,0 +1,281 @@
/*****************************************************************************************
* *
* OpenSpace *
* *
* Copyright (c) 2014-2019 *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy of this *
* software and associated documentation files (the "Software"), to deal in the Software *
* without restriction, including without limitation the rights to use, copy, modify, *
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to *
* permit persons to whom the Software is furnished to do so, subject to the following *
* conditions: *
* *
* The above copyright notice and this permission notice shall be included in all copies *
* or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A *
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT *
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF *
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE *
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
****************************************************************************************/
#include <modules/touch/include/touchinteraction.h>
#include <openspace/scene/scenegraphnode.h>
#include <openspace/util/camera.h>
namespace {
// Used in the LM algorithm
struct FunctionData {
std::vector<glm::dvec3> selectedPoints;
std::vector<glm::dvec2> screenPoints;
int nDOF;
const openspace::Camera* camera;
openspace::SceneGraphNode* node;
LMstat stats;
};
}
namespace openspace {
DirectInputSolver::DirectInputSolver() {
levmarq_init(&_lmstat);
}
// project back a 3D point in model view to clip space [-1,1] coordinates on the view plane
glm::dvec2 castToNDC(const glm::dvec3& vec, Camera& camera, SceneGraphNode* node) {
glm::dvec3 posInCamSpace = glm::inverse(camera.rotationQuaternion()) *
(node->worldRotationMatrix() * vec +
(node->worldPosition() - camera.positionVec3()));
glm::dvec4 clipspace = camera.projectionMatrix() * glm::dvec4(posInCamSpace, 1.0);
return (glm::dvec2(clipspace) / clipspace.w);
}
// Returns the screen point s(xi,par) dependent the transform M(par) and object point xi
double distToMinimize(double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
// Apply transform to camera and find the new screen point of the updated camera state
// { vec2 globalRot, zoom, roll, vec2 localRot }
double q[6] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
for (int i = 0; i < ptr->nDOF; ++i) {
q[i] = par[i];
}
using namespace glm;
// Create variables from current state
dvec3 camPos = ptr->camera->positionVec3();
dvec3 centerPos = ptr->node->worldPosition();
dvec3 directionToCenter = normalize(centerPos - camPos);
dvec3 lookUp = ptr->camera->lookUpVectorWorldSpace();
dvec3 camDirection = ptr->camera->viewDirectionWorldSpace();
// Make a representation of the rotation quaternion with local and global
// rotations
dmat4 lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
// To avoid problem with lookup in up direction
normalize(camDirection + lookUp));
dquat globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
dquat localCamRot = inverse(globalCamRot) * ptr->camera->rotationQuaternion();
{ // Roll
dquat rollRot = angleAxis(q[3], dvec3(0.0, 0.0, 1.0));
localCamRot = localCamRot * rollRot;
}
{ // Panning (local rotation)
dvec3 eulerAngles(q[5], q[4], 0);
dquat panRot = dquat(eulerAngles);
localCamRot = localCamRot * panRot;
}
{ // Orbit (global rotation)
dvec3 eulerAngles(q[1], q[0], 0);
dquat rotationDiffCamSpace = dquat(eulerAngles);
dvec3 centerToCamera = camPos - centerPos;
dquat rotationDiffWorldSpace =
globalCamRot * rotationDiffCamSpace * inverse(globalCamRot);
dvec3 rotationDiffVec3 = centerToCamera * rotationDiffWorldSpace - centerToCamera;
camPos += rotationDiffVec3;
centerToCamera = camPos - centerPos;
directionToCenter = normalize(-centerToCamera);
dvec3 lookUpWhenFacingCenter =
globalCamRot * dvec3(ptr->camera->lookUpVectorCameraSpace());
lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
lookUpWhenFacingCenter
);
globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
}
{ // Zooming
camPos += directionToCenter * q[2];
}
// Update the camera state
Camera cam = *(ptr->camera);
cam.setPositionVec3(camPos);
cam.setRotation(globalCamRot * localCamRot);
// we now have a new position and orientation of camera, project surfacePoint to
// the new screen to get distance to minimize
glm::dvec2 newScreenPoint = castToNDC(
ptr->selectedPoints.at(x),
cam,
ptr->node
);
lmstat->pos.push_back(newScreenPoint);
return glm::length(ptr->screenPoints.at(x) - newScreenPoint);
}
// Gradient of distToMinimize w.r.t par (using forward difference)
void gradient(double* g, double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
double f0 = distToMinimize(par, x, fdata, lmstat);
// scale value to find minimum step size h, dependant on planet size
double scale = log10(ptr->node->boundingSphere());
std::vector<double> dPar(ptr->nDOF, 0.0);
dPar.assign(par, par + ptr->nDOF);
for (int i = 0; i < ptr->nDOF; ++i) {
// Initial values
double h = 1e-8;
double lastG = 1;
dPar.at(i) += h;
double f1 = distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
// Iterative process to find the minimum step h that gives a good gradient
for (int j = 0; j < 100; ++j) {
if ((f1 - f0) != 0 && lastG == 0) { // found minimum step size h
// scale up to get a good initial guess value
h *= scale * scale * scale;
// clamp min step size to a fraction of the incoming parameter
if (i == 2) {
double epsilon = 1e-3;
// make sure incoming parameter is larger than 0
h = std::max(std::max(std::abs(dPar.at(i)), epsilon) * 0.001, h);
}
else if (ptr->nDOF == 2) {
h = std::max(std::abs(dPar.at(i)) * 0.001, h);
}
// calculate f1 with good h for finite difference
dPar.at(i) += h;
f1 = distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
break;
}
else if ((f1 - f0) != 0 && lastG != 0) { // h too big
h /= scale;
}
else if ((f1 - f0) == 0) { // h too small
h *= scale;
}
lastG = f1 - f0;
dPar.at(i) += h;
f1 = distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
}
g[i] = (f1 - f0) / h;
}
if (ptr->nDOF == 2) {
// normalize on 1 finger case to allow for horizontal/vertical movement
for (int i = 0; i < 2; ++i) {
g[i] = g[i] / std::abs(g[i]);
}
}
else if (ptr->nDOF == 6) {
for (int i = 0; i < ptr->nDOF; ++i) {
// lock to only pan and zoom on 3 finger case, no roll/orbit
g[i] = (i == 2) ? g[i] : g[i] / std::abs(g[i]);
}
}
}
bool DirectInputSolver::solve(const std::vector<TUIO::TuioCursor>& list,
const std::vector<SelectedBody>& selectedBodies,
std::vector<double>* parameters, const Camera& camera)
{
int nFingers = std::min(static_cast<int>(list.size()), 3);
_nDof = std::min(nFingers * 2, 6);
// Parse input data to be used in the LM algorithm
std::vector<glm::dvec3> selectedPoints;
std::vector<glm::dvec2> screenPoints;
for (int i = 0; i < nFingers; ++i) {
const SelectedBody& sb = selectedBodies.at(i);
selectedPoints.push_back(sb.coordinates);
screenPoints.emplace_back(
2 * (list[i].getX() - 0.5),
-2 * (list[i].getY() - 0.5)
);
// This might be needed when we're directing the touchtable from another screen?
// std::vector<TuioCursor>::const_iterator c = std::find_if(
// list.begin(),
// list.end(),
// [&sb](const TuioCursor& c) { return c.getSessionID() == sb.id; }
// );
// if (c != list.end()) {
// // normalized -1 to 1 coordinates on screen
// screenPoints.emplace_back(2 * (c->getX() - 0.5), -2 * (c->getY() - 0.5));
// }
// else {
// global::moduleEngine.module<ImGUIModule>()->touchInput = {
// true,
// glm::dvec2(0.0, 0.0),
// 1
// };
// resetAfterInput();
// return;
// }
}
FunctionData fData = {
selectedPoints,
screenPoints,
_nDof,
&camera,
selectedBodies.at(0).node,
_lmstat
};
void* dataPtr = reinterpret_cast<void*>(&fData);
bool result = levmarq(
_nDof,
parameters->data(),
static_cast<int>(screenPoints.size()),
nullptr,
distToMinimize,
gradient,
dataPtr,
&_lmstat
);
return result;
}
int DirectInputSolver::getNDof() const {
return _nDof;
}
const LMstat& DirectInputSolver::getLevMarqStat() {
return _lmstat;
}
void DirectInputSolver::setLevMarqVerbosity(bool verbose) {
_lmstat.verbose = verbose;
}
} // openspace namespace
+72 -292
View File
@@ -23,7 +23,9 @@
****************************************************************************************/
#include <openspace/engine/globals.h>
#include <modules/touch/include/touchinteraction.h>
#include <modules/touch/include/directinputsolver.h>
#include <modules/imgui/imguimodule.h>
#include <openspace/interaction/orbitalnavigator.h>
@@ -369,11 +371,8 @@ TouchInteraction::TouchInteraction()
}
});
levmarq_init(&_lmstat);
_time.initSession();
}
// Called each frame if there is any input
void TouchInteraction::updateStateFromInput(const std::vector<TuioCursor>& list,
std::vector<Point>& lastProcessed)
@@ -513,231 +512,21 @@ void TouchInteraction::directControl(const std::vector<TuioCursor>& list) {
#ifdef TOUCH_DEBUG_PROPERTIES
LINFO("DirectControl");
#endif
// Returns the screen point s(xi,par) dependent the transform M(par) and object
// point xi
auto distToMinimize = [](double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
// Apply transform to camera and find the new screen point of the updated camera
// state
// { vec2 globalRot, zoom, roll, vec2 localRot }
double q[6] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
for (int i = 0; i < ptr->nDOF; ++i) {
q[i] = par[i];
}
using namespace glm;
// Create variables from current state
dvec3 camPos = ptr->camera->positionVec3();
dvec3 centerPos = ptr->node->worldPosition();
dvec3 directionToCenter = normalize(centerPos - camPos);
dvec3 lookUp = ptr->camera->lookUpVectorWorldSpace();
dvec3 camDirection = ptr->camera->viewDirectionWorldSpace();
// Make a representation of the rotation quaternion with local and global
// rotations
dmat4 lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
// To avoid problem with lookup in up direction
normalize(camDirection + lookUp));
dquat globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
dquat localCamRot = inverse(globalCamRot) * ptr->camera->rotationQuaternion();
{ // Roll
dquat rollRot = angleAxis(q[3], dvec3(0.0, 0.0, 1.0));
localCamRot = localCamRot * rollRot;
}
{ // Panning (local rotation)
dvec3 eulerAngles(q[5], q[4], 0);
dquat panRot = dquat(eulerAngles);
localCamRot = localCamRot * panRot;
}
{ // Orbit (global rotation)
dvec3 eulerAngles(q[1], q[0], 0);
dquat rotationDiffCamSpace = dquat(eulerAngles);
dvec3 centerToCamera = camPos - centerPos;
dquat rotationDiffWorldSpace =
globalCamRot * rotationDiffCamSpace * inverse(globalCamRot);
dvec3 rotationDiffVec3 =
centerToCamera * rotationDiffWorldSpace - centerToCamera;
camPos += rotationDiffVec3;
centerToCamera = camPos - centerPos;
directionToCenter = normalize(-centerToCamera);
dvec3 lookUpWhenFacingCenter =
globalCamRot * dvec3(ptr->camera->lookUpVectorCameraSpace());
lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
lookUpWhenFacingCenter);
globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
}
{ // Zooming
camPos += directionToCenter * q[2];
}
// Update the camera state
Camera cam = *(ptr->camera);
cam.setPositionVec3(camPos);
cam.setRotation(globalCamRot * localCamRot);
// we now have a new position and orientation of camera, project surfacePoint to
// the new screen to get distance to minimize
glm::dvec2 newScreenPoint = ptr->castToNDC(
ptr->selectedPoints.at(x),
cam,
ptr->node
);
lmstat->pos.push_back(newScreenPoint);
return glm::length(ptr->screenPoints.at(x) - newScreenPoint);
};
// Gradient of distToMinimize w.r.t par (using forward difference)
auto gradient = [](double* g, double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
double h, lastG, f1, f0 = ptr->distToMinimize(par, x, fdata, lmstat);
// scale value to find minimum step size h, dependant on planet size
double scale = log10(ptr->node->boundingSphere());
std::vector<double> dPar(ptr->nDOF, 0.0);
dPar.assign(par, par + ptr->nDOF);
for (int i = 0; i < ptr->nDOF; ++i) {
// Initial values
h = 1e-8;
lastG = 1;
dPar.at(i) += h;
f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
// Iterative process to find the minimum step h that gives a good gradient
for (int j = 0; j < 100; ++j) {
if ((f1 - f0) != 0 && lastG == 0) { // found minimum step size h
// scale up to get a good initial guess value
h *= scale * scale * scale;
// clamp min step size to a fraction of the incoming parameter
if (i == 2) {
double epsilon = 1e-3;
// make sure incoming parameter is larger than 0
h = std::max(std::max(std::abs(dPar.at(i)), epsilon) * 0.001, h);
}
else if (ptr->nDOF == 2) {
h = std::max(std::abs(dPar.at(i)) * 0.001, h);
}
// calculate f1 with good h for finite difference
dPar.at(i) += h;
f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
break;
}
else if ((f1 - f0) != 0 && lastG != 0) { // h too big
h /= scale;
}
else if ((f1 - f0) == 0) { // h too small
h *= scale;
}
lastG = f1 - f0;
dPar.at(i) += h;
f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
}
g[i] = (f1 - f0) / h;
}
if (ptr->nDOF == 2) {
// normalize on 1 finger case to allow for horizontal/vertical movement
for (int i = 0; i < 2; ++i) {
g[i] = g[i]/std::abs(g[i]);
}
}
else if (ptr->nDOF == 6) {
for (int i = 0; i < ptr->nDOF; ++i) {
// lock to only pan and zoom on 3 finger case, no roll/orbit
g[i] = (i == 2) ? g[i] : g[i] / std::abs(g[i]);
}
}
};
// project back a 3D point in model view to clip space [-1,1] coordinates on the view
// plane
auto castToNDC = [](const glm::dvec3& vec, Camera& camera, SceneGraphNode* node) {
glm::dvec3 posInCamSpace = glm::inverse(camera.rotationQuaternion()) *
(node->rotationMatrix() * vec +
(node->worldPosition() - camera.positionVec3()));
glm::dvec4 clipspace = camera.projectionMatrix() * glm::dvec4(posInCamSpace, 1.0);
return (glm::dvec2(clipspace) / clipspace.w);
};
// only send in first three fingers (to make it easier for LMA to converge on 3+
// finger case with only zoom/pan)
int nFingers = std::min(static_cast<int>(list.size()), 3);
int nDOF = std::min(nFingers * 2, 6);
std::vector<double> par(nDOF, 0.0);
par.at(0) = _lastVel.orbit.x; // use _lastVel for orbit
par.at(1) = _lastVel.orbit.y;
// Parse input data to be used in the LM algorithm
std::vector<glm::dvec3> selectedPoints;
std::vector<glm::dvec2> screenPoints;
for (int i = 0; i < nFingers; ++i) {
const SelectedBody& sb = _selected.at(i);
selectedPoints.push_back(sb.coordinates);
std::vector<TuioCursor>::const_iterator c = std::find_if(
list.begin(),
list.end(),
[&sb](const TuioCursor& c) { return c.getSessionID() == sb.id; }
);
if (c != list.end()) {
// normalized -1 to 1 coordinates on screen
screenPoints.emplace_back(2 * (c->getX() - 0.5), -2 * (c->getY() - 0.5));
}
else {
global::moduleEngine.module<ImGUIModule>()->touchInput = {
true,
glm::dvec2(0.0, 0.0),
1
};
resetAfterInput();
return;
}
}
FunctionData fData = {
selectedPoints,
screenPoints,
nDOF,
castToNDC,
distToMinimize,
_camera,
_selected.at(0).node,
_lmstat,
_currentRadius
};
void* dataPtr = reinterpret_cast<void*>(&fData);
// finds best transform values for the new camera state and stores them in par
_lmSuccess = levmarq(
nDOF,
par.data(),
static_cast<int>(screenPoints.size()),
nullptr,
distToMinimize,
gradient,
dataPtr,
&_lmstat
);
std::vector<double> par(6, 0.0);
par.at(0) = _lastVel.orbit.x; // use _lastVel for orbit
par.at(1) = _lastVel.orbit.y;
_lmSuccess = _solver.solve(list, _selected, &par, *_camera);
int nDof = _solver.getNDof();
if (_lmSuccess && !_unitTest) {
// if good values were found set new camera state
_vel.orbit = glm::dvec2(par.at(0), par.at(1));
if (nDOF > 2) {
if (nDof > 2) {
_vel.zoom = par.at(2);
_vel.roll = par.at(3);
if (_panEnabled && nDOF > 4) {
if (_panEnabled && nDof > 4) {
_vel.roll = 0.0;
_vel.pan = glm::dvec2(par.at(4), par.at(5));
}
@@ -784,20 +573,18 @@ void TouchInteraction::findSelectedNode(const std::vector<TuioCursor>& list) {
glm::dquat camToWorldSpace = _camera->rotationQuaternion();
glm::dvec3 camPos = _camera->positionVec3();
std::vector<SelectedBody> newSelected;
struct PickingInfo {
SceneGraphNode* node;
double pickingDistanceNDC;
double pickingDistanceWorld;
std::vector<DirectInputSolver::SelectedBody> newSelected;
//node & distance
std::tuple<SceneGraphNode*, double> currentlyPicked = {
nullptr,
std::numeric_limits<double>::max()
};
std::vector<PickingInfo> pickingInfo;
for (const TuioCursor& c : list) {
double xCo = 2 * (c.getX() - 0.5);
double yCo = -2 * (c.getY() - 0.5); // normalized -1 to 1 coordinates on screen
// vec3(projectionmatrix * clipspace), divide with w?
glm::dvec3 cursorInWorldSpace = camToWorldSpace *
glm::dvec3(glm::inverse(_camera->projectionMatrix()) *
glm::dvec4(xCo, yCo, -1.0, 1.0));
@@ -806,28 +593,27 @@ void TouchInteraction::findSelectedNode(const std::vector<TuioCursor>& list) {
long id = c.getSessionID();
for (SceneGraphNode* node : selectableNodes) {
double boundingSphere = node->boundingSphere();
double boundingSphereSquared = static_cast<double>(node->boundingSphere()) *
static_cast<double>(node->boundingSphere());
glm::dvec3 camToSelectable = node->worldPosition() - camPos;
double dist = length(glm::cross(cursorInWorldSpace, camToSelectable)) /
glm::length(cursorInWorldSpace) - boundingSphere;
if (dist <= 0.0) {
// finds intersection closest point between boundingsphere and line in
// world coordinates, assumes line direction is normalized
double d = glm::dot(raytrace, camToSelectable);
double root = boundingSphere * boundingSphere -
glm::dot(camToSelectable, camToSelectable) + d * d;
if (root > 0) { // two intersection points (take the closest one)
d -= sqrt(root);
}
glm::dvec3 intersectionPoint = camPos + d * raytrace;
glm::dvec3 pointInModelView = glm::inverse(node->rotationMatrix()) *
(intersectionPoint - node->worldPosition());
double intersectionDist = 0.0;
bool intersected = glm::intersectRaySphere(
camPos,
raytrace,
node->worldPosition(),
boundingSphereSquared,
intersectionDist
);
if (intersected) {
glm::dvec3 intersectionPos = camPos + raytrace * intersectionDist;
glm::dvec3 pointInModelView = glm::inverse(node->worldRotationMatrix()) *
(intersectionPos - node->worldPosition());
// Add id, node and surface coordinates to the selected list
std::vector<SelectedBody>::iterator oldNode = std::find_if(
auto oldNode = std::find_if(
newSelected.begin(),
newSelected.end(),
[id](SelectedBody s) { return s.id == id; }
[id](const DirectInputSolver::SelectedBody& s) { return s.id == id; }
);
if (oldNode != newSelected.end()) {
double oldNodeDist = glm::length(
@@ -859,56 +645,44 @@ void TouchInteraction::findSelectedNode(const std::vector<TuioCursor>& list) {
// We either want to select the object if it's bounding sphere as been
// touched (checked by the first part of this loop above) or if the touch
// point is within a minimum distance of the center
if (dist <= 0.0 || (ndcDist <= _pickingRadiusMinimum)) {
// If the user touched the planet directly, this is definitely the one
// they are interested in => minimum distance
if (dist <= 0.0) {
// If the user touched the planet directly, this is definitely the one
// they are interested in => minimum distance
if (intersected) {
#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
LINFOC(
node->identifier(),
"Picking candidate based on direct touch"
);
LINFOC(
node->identifier(),
"Picking candidate based on direct touch"
);
#endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
pickingInfo.push_back({
node,
-std::numeric_limits<double>::max(),
-std::numeric_limits<double>::max()
});
}
else {
// The node was considered due to minimum picking distance radius
currentlyPicked = {
node,
-std::numeric_limits<double>::max()
};
}
else if (ndcDist <= _pickingRadiusMinimum) {
// The node was considered due to minimum picking distance radius
#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
LINFOC(
node->identifier(),
"Picking candidate based on proximity"
);
LINFOC(
node->identifier(),
"Picking candidate based on proximity"
);
#endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
pickingInfo.push_back({
double dist = length(camToSelectable);
if (dist < std::get<1>(currentlyPicked)) {
currentlyPicked = {
node,
ndcDist,
dist
});
};
}
}
}
}
}
// After we are done with all of the nodes, we can sort the picking list and pick the
// one that fits best (= is closest or was touched directly)
std::sort(
pickingInfo.begin(),
pickingInfo.end(),
[](const PickingInfo& lhs, const PickingInfo& rhs) {
return lhs.pickingDistanceWorld < rhs.pickingDistanceWorld;
}
);
// If an item has been picked, it's in the first position of the vector now
if (!pickingInfo.empty()) {
_pickingSelected = pickingInfo.begin()->node;
if (SceneGraphNode* node = std::get<0>(currentlyPicked)) {
_pickingSelected = node;
#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
LINFOC("Picking", "Picked node: " + _pickingSelected->identifier());
#endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
@@ -1360,8 +1134,11 @@ void TouchInteraction::step(double dt) {
else if (_zoomInLimit.value() < zoomInBounds) {
// If zoom in limit is less than the estimated node radius we need to
// make sure we do not get too close to possible height maps
SurfacePositionHandle posHandle = anchor->calculateSurfacePositionHandle(camPos);
glm::dvec3 centerToActualSurfaceModelSpace = posHandle.centerToReferenceSurface +
SurfacePositionHandle posHandle = anchor->calculateSurfacePositionHandle(
camPos
);
glm::dvec3 centerToActualSurfaceModelSpace =
posHandle.centerToReferenceSurface +
posHandle.referenceSurfaceOutDirection * posHandle.heightToSurface;
glm::dvec3 centerToActualSurface = glm::dmat3(anchor->modelTransform()) *
centerToActualSurfaceModelSpace;
@@ -1370,9 +1147,8 @@ void TouchInteraction::step(double dt) {
// Because of heightmaps we should make sure we do not go through the surface
if (_zoomInLimit.value() < nodeRadius) {
#ifdef TOUCH_DEBUG_PROPERTIES
LINFO(fmt::format(
"{}: Zoom In Limit should be larger than anchor center to surface, setting it to {}",
_loggerCat, zoomInBounds));
LINFO(fmt::format("{}: Zoom In limit should be larger than anchor "
"center to surface, setting it to {}", _loggerCat, zoomInBounds));
#endif
_zoomInLimit.setValue(zoomInBounds);
}
@@ -1392,9 +1168,12 @@ void TouchInteraction::step(double dt) {
double currentPosDistance = length(centerToCamera);
// Possible with other navigations performed outside touch interaction
bool currentPosViolatingZoomOutLimit = (currentPosDistance >= _zoomOutLimit.value());
bool willNewPositionViolateZoomOutLimit = (newPosDistance >= _zoomOutLimit.value());
bool willNewPositionViolateZoomInLimit = (newPosDistance < _zoomInLimit.value());
bool currentPosViolatingZoomOutLimit =
(currentPosDistance >= _zoomOutLimit.value());
bool willNewPositionViolateZoomOutLimit =
(newPosDistance >= _zoomOutLimit.value());
bool willNewPositionViolateZoomInLimit =
(newPosDistance < _zoomInLimit.value());
if (!willNewPositionViolateZoomInLimit && !willNewPositionViolateZoomOutLimit){
camPos += zoomDistanceIncrement;
@@ -1447,7 +1226,7 @@ void TouchInteraction::step(double dt) {
void TouchInteraction::unitTest() {
if (_unitTest) {
_lmstat.verbose = true;
_solver.setLevMarqVerbosity(true);
// set _selected pos and new pos (on screen)
std::vector<TuioCursor> lastFrame = {
@@ -1468,7 +1247,7 @@ void TouchInteraction::unitTest() {
snprintf(buffer, sizeof(char) * 32, "lmdata%i.csv", _numOfTests);
_numOfTests++;
std::ofstream file(buffer);
file << _lmstat.data;
file << _solver.getLevMarqStat().data;
// clear everything
_selected.clear();
@@ -1480,6 +1259,7 @@ void TouchInteraction::unitTest() {
_lastVel = _vel;
_unitTest = false;
_solver.setLevMarqVerbosity(false);
// could be the camera copy in func
}
}
+27 -11
View File
@@ -28,11 +28,8 @@
#include <openspace/engine/openspaceengine.h>
#include <openspace/engine/windowdelegate.h>
#include <ghoul/logging/logmanager.h>
#include <TUIO/TuioServer.h>
#include <tchar.h>
#include <tpcshrd.h>
@@ -42,7 +39,7 @@ namespace {
bool gStarted{ false };
TUIO::TuioServer* gTuioServer{ nullptr };
std::unordered_map<UINT, TUIO::TuioCursor*> gCursorMap;
}
} // namespace
namespace openspace {
@@ -69,22 +66,32 @@ LRESULT CALLBACK HookCallback(int nCode, WPARAM wParam, LPARAM lParam) {
// native touch to screen conversion
ScreenToClient(pStruct->hwnd, reinterpret_cast<LPPOINT>(&p));
float xPos = (float)p.x / (float)(rect.right - rect.left);
float yPos = (float)p.y / (float)(rect.bottom - rect.top);
float xPos = static_cast<float>(p.x) /
static_cast<float>(rect.right - rect.left);
float yPos = static_cast<float>(p.y) /
static_cast<float>(rect.bottom - rect.top);
if (pointerInfo.pointerFlags & POINTER_FLAG_DOWN) {
// Handle new touchpoint
gTuioServer->initFrame(TUIO::TuioTime::getSessionTime());
gCursorMap[pointerInfo.pointerId] = gTuioServer->addTuioCursor(xPos, yPos);
gCursorMap[pointerInfo.pointerId] = gTuioServer->addTuioCursor(
xPos,
yPos
);
gTuioServer->commitFrame();
}
else if (pointerInfo.pointerFlags & POINTER_FLAG_UPDATE) {
// Handle update of touchpoint
TUIO::TuioTime frameTime = TUIO::TuioTime::getSessionTime();
if (gCursorMap[pointerInfo.pointerId]->getTuioTime() == frameTime) {
if (gCursorMap[pointerInfo.pointerId]->getTuioTime() == frameTime)
{
break;
}
gTuioServer->initFrame(frameTime);
gTuioServer->updateTuioCursor(gCursorMap[pointerInfo.pointerId], xPos, yPos);
gTuioServer->updateTuioCursor(
gCursorMap[pointerInfo.pointerId],
xPos,
yPos
);
gTuioServer->commitFrame();
}
else if (pointerInfo.pointerFlags & POINTER_FLAG_UP) {
@@ -139,14 +146,23 @@ Win32TouchHook::Win32TouchHook(void* nativeWindow)
const DWORD dwHwndTabletProperty = TABLET_DISABLE_PRESSANDHOLD;
ATOM atom = ::GlobalAddAtom(MICROSOFT_TABLETPENSERVICE_PROPERTY);
::SetProp(hWnd, MICROSOFT_TABLETPENSERVICE_PROPERTY, reinterpret_cast<HANDLE>(dwHwndTabletProperty));
::SetProp(
hWnd,
MICROSOFT_TABLETPENSERVICE_PROPERTY,
reinterpret_cast<HANDLE>(dwHwndTabletProperty)
);
::GlobalDeleteAtom(atom);
if (!gStarted) {
gStarted = true;
gTuioServer = new TUIO::TuioServer("localhost", 3333);
TUIO::TuioTime::initSession();
gTouchHook = SetWindowsHookExW(WH_GETMESSAGE, HookCallback, GetModuleHandleW(NULL), GetCurrentThreadId());
gTouchHook = SetWindowsHookExW(
WH_GETMESSAGE,
HookCallback,
GetModuleHandleW(NULL),
GetCurrentThreadId()
);
if (!gTouchHook) {
LINFO(fmt::format("Failed to setup WindowsHook for touch input redirection"));
delete gTuioServer;
+1 -1
View File
@@ -164,7 +164,7 @@ TouchModule::TouchModule()
if (nativeWindowHandle) {
_win32TouchHook.reset(new Win32TouchHook(nativeWindowHandle));
}
#endif //WIN32
#endif
});
global::callback::deinitializeGL.push_back([&]() {