Fixed rotation matrices in touch

Fixed rotation matrices in the touchmodule, while at it I also moved out
the lev-marq solver from the touchinteraction.cpp to its own file, in an
effort to make the code lighter to read.

Also changed some logic in how touch intersection is made with the
scenenodes.
This commit is contained in:
Mikael Pettersson
2019-12-10 15:15:52 +01:00
parent 9a50b27b27
commit 43296979d8
7 changed files with 402 additions and 309 deletions

View File

@@ -0,0 +1,271 @@
/*****************************************************************************************
* *
* OpenSpace *
* *
* Copyright (c) 2014-2019 *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy of this *
* software and associated documentation files (the "Software"), to deal in the Software *
* without restriction, including without limitation the rights to use, copy, modify, *
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to *
* permit persons to whom the Software is furnished to do so, subject to the following *
* conditions: *
* *
* The above copyright notice and this permission notice shall be included in all copies *
* or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A *
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT *
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF *
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE *
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *
****************************************************************************************/
#include <modules/touch/include/touchinteraction.h>
#include <openspace/scene/scenegraphnode.h>
#include <openspace/util/camera.h>
namespace openspace {
// Used in the LM algorithm
struct FunctionData {
std::vector<glm::dvec3> selectedPoints;
std::vector<glm::dvec2> screenPoints;
int nDOF;
const Camera* camera;
SceneGraphNode* node;
LMstat stats;
};
DirectInputSolver::DirectInputSolver()
: _nDof(0)
{
levmarq_init(&_lmstat);
}
// project back a 3D point in model view to clip space [-1,1] coordinates on the view plane
glm::dvec2 castToNDC(const glm::dvec3& vec, Camera& camera, SceneGraphNode* node) {
glm::dvec3 posInCamSpace = glm::inverse(camera.rotationQuaternion()) *
(node->worldRotationMatrix() * vec +
(node->worldPosition() - camera.positionVec3()));
glm::dvec4 clipspace = camera.projectionMatrix() * glm::dvec4(posInCamSpace, 1.0);
return (glm::dvec2(clipspace) / clipspace.w);
}
// Returns the screen point s(xi,par) dependent the transform M(par) and object point xi
double distToMinimize(double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
// Apply transform to camera and find the new screen point of the updated camera state
// { vec2 globalRot, zoom, roll, vec2 localRot }
double q[6] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
for (int i = 0; i < ptr->nDOF; ++i) {
q[i] = par[i];
}
using namespace glm;
// Create variables from current state
dvec3 camPos = ptr->camera->positionVec3();
dvec3 centerPos = ptr->node->worldPosition();
dvec3 directionToCenter = normalize(centerPos - camPos);
dvec3 lookUp = ptr->camera->lookUpVectorWorldSpace();
dvec3 camDirection = ptr->camera->viewDirectionWorldSpace();
// Make a representation of the rotation quaternion with local and global
// rotations
dmat4 lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
// To avoid problem with lookup in up direction
normalize(camDirection + lookUp));
dquat globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
dquat localCamRot = inverse(globalCamRot) * ptr->camera->rotationQuaternion();
{ // Roll
dquat rollRot = angleAxis(q[3], dvec3(0.0, 0.0, 1.0));
localCamRot = localCamRot * rollRot;
}
{ // Panning (local rotation)
dvec3 eulerAngles(q[5], q[4], 0);
dquat panRot = dquat(eulerAngles);
localCamRot = localCamRot * panRot;
}
{ // Orbit (global rotation)
dvec3 eulerAngles(q[1], q[0], 0);
dquat rotationDiffCamSpace = dquat(eulerAngles);
dvec3 centerToCamera = camPos - centerPos;
dquat rotationDiffWorldSpace =
globalCamRot * rotationDiffCamSpace * inverse(globalCamRot);
dvec3 rotationDiffVec3 =
centerToCamera * rotationDiffWorldSpace - centerToCamera;
camPos += rotationDiffVec3;
centerToCamera = camPos - centerPos;
directionToCenter = normalize(-centerToCamera);
dvec3 lookUpWhenFacingCenter =
globalCamRot * dvec3(ptr->camera->lookUpVectorCameraSpace());
lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
lookUpWhenFacingCenter);
globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
}
{ // Zooming
camPos += directionToCenter * q[2];
}
// Update the camera state
Camera cam = *(ptr->camera);
cam.setPositionVec3(camPos);
cam.setRotation(globalCamRot * localCamRot);
// we now have a new position and orientation of camera, project surfacePoint to
// the new screen to get distance to minimize
glm::dvec2 newScreenPoint = castToNDC(
ptr->selectedPoints.at(x),
cam,
ptr->node
);
lmstat->pos.push_back(newScreenPoint);
return glm::length(ptr->screenPoints.at(x) - newScreenPoint);
}
// Gradient of distToMinimize w.r.t par (using forward difference)
void gradient(double* g, double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
double f0 = distToMinimize(par, x, fdata, lmstat);
// scale value to find minimum step size h, dependant on planet size
double scale = log10(ptr->node->boundingSphere());
std::vector<double> dPar(ptr->nDOF, 0.0);
dPar.assign(par, par + ptr->nDOF);
for (int i = 0; i < ptr->nDOF; ++i) {
// Initial values
double h = 1e-8;
double lastG = 1;
dPar.at(i) += h;
double f1 = distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
// Iterative process to find the minimum step h that gives a good gradient
for (int j = 0; j < 100; ++j) {
if ((f1 - f0) != 0 && lastG == 0) { // found minimum step size h
// scale up to get a good initial guess value
h *= scale * scale * scale;
// clamp min step size to a fraction of the incoming parameter
if (i == 2) {
double epsilon = 1e-3;
// make sure incoming parameter is larger than 0
h = std::max(std::max(std::abs(dPar.at(i)), epsilon) * 0.001, h);
}
else if (ptr->nDOF == 2) {
h = std::max(std::abs(dPar.at(i)) * 0.001, h);
}
// calculate f1 with good h for finite difference
dPar.at(i) += h;
f1 = distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
break;
}
else if ((f1 - f0) != 0 && lastG != 0) { // h too big
h /= scale;
}
else if ((f1 - f0) == 0) { // h too small
h *= scale;
}
lastG = f1 - f0;
dPar.at(i) += h;
f1 = distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
}
g[i] = (f1 - f0) / h;
}
if (ptr->nDOF == 2) {
// normalize on 1 finger case to allow for horizontal/vertical movement
for (int i = 0; i < 2; ++i) {
g[i] = g[i] / std::abs(g[i]);
}
}
else if (ptr->nDOF == 6) {
for (int i = 0; i < ptr->nDOF; ++i) {
// lock to only pan and zoom on 3 finger case, no roll/orbit
g[i] = (i == 2) ? g[i] : g[i] / std::abs(g[i]);
}
}
}
bool DirectInputSolver::solve(const std::vector<TUIO::TuioCursor>& list,
const std::vector<SelectedBody>& selectedBodies,
std::vector<double> *parameters,
const Camera &camera)
{
int nFingers = std::min(static_cast<int>(list.size()), 3);
_nDof = std::min(nFingers * 2, 6);
// Parse input data to be used in the LM algorithm
std::vector<glm::dvec3> selectedPoints;
std::vector<glm::dvec2> screenPoints;
for (int i = 0; i < nFingers; ++i) {
const SelectedBody& sb = selectedBodies.at(i);
selectedPoints.push_back(sb.coordinates);
screenPoints.emplace_back(2 * (list[i].getX() - 0.5), -2 * (list[i].getY() - 0.5));
// This might be needed when we're directing the touchtable from another screen?
// std::vector<TuioCursor>::const_iterator c = std::find_if(
// list.begin(),
// list.end(),
// [&sb](const TuioCursor& c) { return c.getSessionID() == sb.id; }
// );
// if (c != list.end()) {
// // normalized -1 to 1 coordinates on screen
// screenPoints.emplace_back(2 * (c->getX() - 0.5), -2 * (c->getY() - 0.5));
// }
// else {
// global::moduleEngine.module<ImGUIModule>()->touchInput = {
// true,
// glm::dvec2(0.0, 0.0),
// 1
// };
// resetAfterInput();
// return;
// }
}
FunctionData fData = {
selectedPoints,
screenPoints,
_nDof,
&camera,
selectedBodies.at(0).node,
_lmstat
};
void* dataPtr = reinterpret_cast<void*>(&fData);
bool result = levmarq(
_nDof,
parameters->data(),
static_cast<int>(screenPoints.size()),
nullptr,
distToMinimize,
gradient,
dataPtr,
&_lmstat
);
return result;
}
} // openspace namespace

View File

@@ -23,7 +23,9 @@
****************************************************************************************/
#include <openspace/engine/globals.h>
#include <modules/touch/include/touchinteraction.h>
#include <modules/touch/include/directinputsolver.h>
#include <modules/imgui/imguimodule.h>
#include <openspace/interaction/orbitalnavigator.h>
@@ -320,6 +322,7 @@ TouchInteraction::TouchInteraction()
, _zoomOutTap(false)
, _lmSuccess(true)
, _guiON(false)
, _solver(new DirectInputSolver())
#ifdef TOUCH_DEBUG_PROPERTIES
, _debugProperties()
#endif
@@ -369,11 +372,8 @@ TouchInteraction::TouchInteraction()
}
});
levmarq_init(&_lmstat);
_time.initSession();
}
// Called each frame if there is any input
void TouchInteraction::updateStateFromInput(const std::vector<TuioCursor>& list,
std::vector<Point>& lastProcessed)
@@ -513,231 +513,21 @@ void TouchInteraction::directControl(const std::vector<TuioCursor>& list) {
#ifdef TOUCH_DEBUG_PROPERTIES
LINFO("DirectControl");
#endif
// Returns the screen point s(xi,par) dependent the transform M(par) and object
// point xi
auto distToMinimize = [](double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
// Apply transform to camera and find the new screen point of the updated camera
// state
// { vec2 globalRot, zoom, roll, vec2 localRot }
double q[6] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
for (int i = 0; i < ptr->nDOF; ++i) {
q[i] = par[i];
}
using namespace glm;
// Create variables from current state
dvec3 camPos = ptr->camera->positionVec3();
dvec3 centerPos = ptr->node->worldPosition();
dvec3 directionToCenter = normalize(centerPos - camPos);
dvec3 lookUp = ptr->camera->lookUpVectorWorldSpace();
dvec3 camDirection = ptr->camera->viewDirectionWorldSpace();
// Make a representation of the rotation quaternion with local and global
// rotations
dmat4 lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
// To avoid problem with lookup in up direction
normalize(camDirection + lookUp));
dquat globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
dquat localCamRot = inverse(globalCamRot) * ptr->camera->rotationQuaternion();
{ // Roll
dquat rollRot = angleAxis(q[3], dvec3(0.0, 0.0, 1.0));
localCamRot = localCamRot * rollRot;
}
{ // Panning (local rotation)
dvec3 eulerAngles(q[5], q[4], 0);
dquat panRot = dquat(eulerAngles);
localCamRot = localCamRot * panRot;
}
{ // Orbit (global rotation)
dvec3 eulerAngles(q[1], q[0], 0);
dquat rotationDiffCamSpace = dquat(eulerAngles);
dvec3 centerToCamera = camPos - centerPos;
dquat rotationDiffWorldSpace =
globalCamRot * rotationDiffCamSpace * inverse(globalCamRot);
dvec3 rotationDiffVec3 =
centerToCamera * rotationDiffWorldSpace - centerToCamera;
camPos += rotationDiffVec3;
centerToCamera = camPos - centerPos;
directionToCenter = normalize(-centerToCamera);
dvec3 lookUpWhenFacingCenter =
globalCamRot * dvec3(ptr->camera->lookUpVectorCameraSpace());
lookAtMat = lookAt(
dvec3(0, 0, 0),
directionToCenter,
lookUpWhenFacingCenter);
globalCamRot = normalize(quat_cast(inverse(lookAtMat)));
}
{ // Zooming
camPos += directionToCenter * q[2];
}
// Update the camera state
Camera cam = *(ptr->camera);
cam.setPositionVec3(camPos);
cam.setRotation(globalCamRot * localCamRot);
// we now have a new position and orientation of camera, project surfacePoint to
// the new screen to get distance to minimize
glm::dvec2 newScreenPoint = ptr->castToNDC(
ptr->selectedPoints.at(x),
cam,
ptr->node
);
lmstat->pos.push_back(newScreenPoint);
return glm::length(ptr->screenPoints.at(x) - newScreenPoint);
};
// Gradient of distToMinimize w.r.t par (using forward difference)
auto gradient = [](double* g, double* par, int x, void* fdata, LMstat* lmstat) {
FunctionData* ptr = reinterpret_cast<FunctionData*>(fdata);
double h, lastG, f1, f0 = ptr->distToMinimize(par, x, fdata, lmstat);
// scale value to find minimum step size h, dependant on planet size
double scale = log10(ptr->node->boundingSphere());
std::vector<double> dPar(ptr->nDOF, 0.0);
dPar.assign(par, par + ptr->nDOF);
for (int i = 0; i < ptr->nDOF; ++i) {
// Initial values
h = 1e-8;
lastG = 1;
dPar.at(i) += h;
f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
// Iterative process to find the minimum step h that gives a good gradient
for (int j = 0; j < 100; ++j) {
if ((f1 - f0) != 0 && lastG == 0) { // found minimum step size h
// scale up to get a good initial guess value
h *= scale * scale * scale;
// clamp min step size to a fraction of the incoming parameter
if (i == 2) {
double epsilon = 1e-3;
// make sure incoming parameter is larger than 0
h = std::max(std::max(std::abs(dPar.at(i)), epsilon) * 0.001, h);
}
else if (ptr->nDOF == 2) {
h = std::max(std::abs(dPar.at(i)) * 0.001, h);
}
// calculate f1 with good h for finite difference
dPar.at(i) += h;
f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
break;
}
else if ((f1 - f0) != 0 && lastG != 0) { // h too big
h /= scale;
}
else if ((f1 - f0) == 0) { // h too small
h *= scale;
}
lastG = f1 - f0;
dPar.at(i) += h;
f1 = ptr->distToMinimize(dPar.data(), x, fdata, lmstat);
dPar.at(i) = par[i];
}
g[i] = (f1 - f0) / h;
}
if (ptr->nDOF == 2) {
// normalize on 1 finger case to allow for horizontal/vertical movement
for (int i = 0; i < 2; ++i) {
g[i] = g[i]/std::abs(g[i]);
}
}
else if (ptr->nDOF == 6) {
for (int i = 0; i < ptr->nDOF; ++i) {
// lock to only pan and zoom on 3 finger case, no roll/orbit
g[i] = (i == 2) ? g[i] : g[i] / std::abs(g[i]);
}
}
};
// project back a 3D point in model view to clip space [-1,1] coordinates on the view
// plane
auto castToNDC = [](const glm::dvec3& vec, Camera& camera, SceneGraphNode* node) {
glm::dvec3 posInCamSpace = glm::inverse(camera.rotationQuaternion()) *
(node->rotationMatrix() * vec +
(node->worldPosition() - camera.positionVec3()));
glm::dvec4 clipspace = camera.projectionMatrix() * glm::dvec4(posInCamSpace, 1.0);
return (glm::dvec2(clipspace) / clipspace.w);
};
// only send in first three fingers (to make it easier for LMA to converge on 3+
// finger case with only zoom/pan)
int nFingers = std::min(static_cast<int>(list.size()), 3);
int nDOF = std::min(nFingers * 2, 6);
std::vector<double> par(nDOF, 0.0);
par.at(0) = _lastVel.orbit.x; // use _lastVel for orbit
par.at(1) = _lastVel.orbit.y;
// Parse input data to be used in the LM algorithm
std::vector<glm::dvec3> selectedPoints;
std::vector<glm::dvec2> screenPoints;
for (int i = 0; i < nFingers; ++i) {
const SelectedBody& sb = _selected.at(i);
selectedPoints.push_back(sb.coordinates);
std::vector<TuioCursor>::const_iterator c = std::find_if(
list.begin(),
list.end(),
[&sb](const TuioCursor& c) { return c.getSessionID() == sb.id; }
);
if (c != list.end()) {
// normalized -1 to 1 coordinates on screen
screenPoints.emplace_back(2 * (c->getX() - 0.5), -2 * (c->getY() - 0.5));
}
else {
global::moduleEngine.module<ImGUIModule>()->touchInput = {
true,
glm::dvec2(0.0, 0.0),
1
};
resetAfterInput();
return;
}
}
FunctionData fData = {
selectedPoints,
screenPoints,
nDOF,
castToNDC,
distToMinimize,
_camera,
_selected.at(0).node,
_lmstat,
_currentRadius
};
void* dataPtr = reinterpret_cast<void*>(&fData);
// finds best transform values for the new camera state and stores them in par
_lmSuccess = levmarq(
nDOF,
par.data(),
static_cast<int>(screenPoints.size()),
nullptr,
distToMinimize,
gradient,
dataPtr,
&_lmstat
);
std::vector<double> par(6, 0.0);
par.at(0) = _lastVel.orbit.x; // use _lastVel for orbit
par.at(1) = _lastVel.orbit.y;
_lmSuccess = _solver->solve(list, _selected, &par, *_camera);
int nDof = _solver->getNDof();
if (_lmSuccess && !_unitTest) {
// if good values were found set new camera state
_vel.orbit = glm::dvec2(par.at(0), par.at(1));
if (nDOF > 2) {
if (nDof > 2) {
_vel.zoom = par.at(2);
_vel.roll = par.at(3);
if (_panEnabled && nDOF > 4) {
if (_panEnabled && nDof > 4) {
_vel.roll = 0.0;
_vel.pan = glm::dvec2(par.at(4), par.at(5));
}
@@ -785,19 +575,17 @@ void TouchInteraction::findSelectedNode(const std::vector<TuioCursor>& list) {
glm::dquat camToWorldSpace = _camera->rotationQuaternion();
glm::dvec3 camPos = _camera->positionVec3();
std::vector<SelectedBody> newSelected;
struct PickingInfo {
SceneGraphNode* node;
double pickingDistanceNDC;
double pickingDistanceWorld;
//node & distance
std::tuple<SceneGraphNode*, double> currentlyPicked = {
nullptr,
std::numeric_limits<double>::max()
};
std::vector<PickingInfo> pickingInfo;
for (const TuioCursor& c : list) {
double xCo = 2 * (c.getX() - 0.5);
double yCo = -2 * (c.getY() - 0.5); // normalized -1 to 1 coordinates on screen
// vec3(projectionmatrix * clipspace), divide with w?
glm::dvec3 cursorInWorldSpace = camToWorldSpace *
glm::dvec3(glm::inverse(_camera->projectionMatrix()) *
glm::dvec4(xCo, yCo, -1.0, 1.0));
@@ -808,20 +596,12 @@ void TouchInteraction::findSelectedNode(const std::vector<TuioCursor>& list) {
for (SceneGraphNode* node : selectableNodes) {
double boundingSphere = node->boundingSphere();
glm::dvec3 camToSelectable = node->worldPosition() - camPos;
double dist = length(glm::cross(cursorInWorldSpace, camToSelectable)) /
glm::length(cursorInWorldSpace) - boundingSphere;
if (dist <= 0.0) {
// finds intersection closest point between boundingsphere and line in
// world coordinates, assumes line direction is normalized
double d = glm::dot(raytrace, camToSelectable);
double root = boundingSphere * boundingSphere -
glm::dot(camToSelectable, camToSelectable) + d * d;
if (root > 0) { // two intersection points (take the closest one)
d -= sqrt(root);
}
glm::dvec3 intersectionPoint = camPos + d * raytrace;
glm::dvec3 pointInModelView = glm::inverse(node->rotationMatrix()) *
(intersectionPoint - node->worldPosition());
glm::dvec3 intersectionPos = {};
glm::dvec3 intersectionNormal = {};
bool intersected = glm::intersectRaySphere(camPos, raytrace, node->worldPosition(), boundingSphere, intersectionPos, intersectionNormal);
if (intersected) {
glm::dvec3 pointInModelView = glm::inverse(node->worldRotationMatrix()) *
(intersectionPos - node->worldPosition());
// Add id, node and surface coordinates to the selected list
std::vector<SelectedBody>::iterator oldNode = std::find_if(
@@ -830,9 +610,7 @@ void TouchInteraction::findSelectedNode(const std::vector<TuioCursor>& list) {
[id](SelectedBody s) { return s.id == id; }
);
if (oldNode != newSelected.end()) {
double oldNodeDist = glm::length(
oldNode->node->worldPosition() - camPos
);
double oldNodeDist = glm::length(oldNode->node->worldPosition() - camPos);
if (glm::length(camToSelectable) < oldNodeDist) {
// new node is closer, remove added node and add the new one
// instead
@@ -859,56 +637,44 @@ void TouchInteraction::findSelectedNode(const std::vector<TuioCursor>& list) {
// We either want to select the object if it's bounding sphere as been
// touched (checked by the first part of this loop above) or if the touch
// point is within a minimum distance of the center
if (dist <= 0.0 || (ndcDist <= _pickingRadiusMinimum)) {
// If the user touched the planet directly, this is definitely the one
// they are interested in => minimum distance
if (dist <= 0.0) {
// If the user touched the planet directly, this is definitely the one
// they are interested in => minimum distance
if (intersected) {
#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
LINFOC(
node->identifier(),
"Picking candidate based on direct touch"
);
LINFOC(
node->identifier(),
"Picking candidate based on direct touch"
);
#endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
pickingInfo.push_back({
node,
-std::numeric_limits<double>::max(),
-std::numeric_limits<double>::max()
});
}
else {
// The node was considered due to minimum picking distance radius
currentlyPicked = {
node,
-std::numeric_limits<double>::max()
};
}
else if (ndcDist <= _pickingRadiusMinimum) {
// The node was considered due to minimum picking distance radius
#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
LINFOC(
node->identifier(),
"Picking candidate based on proximity"
);
LINFOC(
node->identifier(),
"Picking candidate based on proximity"
);
#endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
pickingInfo.push_back({
double dist = length(intersectionPos - camPos);
if (dist < std::get<1>(currentlyPicked)) {
currentlyPicked = {
node,
ndcDist,
dist
});
};
}
}
}
}
}
// After we are done with all of the nodes, we can sort the picking list and pick the
// one that fits best (= is closest or was touched directly)
std::sort(
pickingInfo.begin(),
pickingInfo.end(),
[](const PickingInfo& lhs, const PickingInfo& rhs) {
return lhs.pickingDistanceWorld < rhs.pickingDistanceWorld;
}
);
// If an item has been picked, it's in the first position of the vector now
if (!pickingInfo.empty()) {
_pickingSelected = pickingInfo.begin()->node;
if (SceneGraphNode* node = std::get<0>(currentlyPicked)) {
_pickingSelected = node;
#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
LINFOC("Picking", "Picked node: " + _pickingSelected->identifier());
#endif //#ifdef TOUCH_DEBUG_NODE_PICK_MESSAGES
@@ -1447,7 +1213,7 @@ void TouchInteraction::step(double dt) {
void TouchInteraction::unitTest() {
if (_unitTest) {
_lmstat.verbose = true;
_solver->setLevMarqVerbosity(true);
// set _selected pos and new pos (on screen)
std::vector<TuioCursor> lastFrame = {
@@ -1468,7 +1234,7 @@ void TouchInteraction::unitTest() {
snprintf(buffer, sizeof(char) * 32, "lmdata%i.csv", _numOfTests);
_numOfTests++;
std::ofstream file(buffer);
file << _lmstat.data;
file << _solver->getLevMarqStat().data;
// clear everything
_selected.clear();
@@ -1480,6 +1246,7 @@ void TouchInteraction::unitTest() {
_lastVel = _vel;
_unitTest = false;
_solver->setLevMarqVerbosity(false);
// could be the camera copy in func
}
}

View File

@@ -69,8 +69,8 @@ LRESULT CALLBACK HookCallback(int nCode, WPARAM wParam, LPARAM lParam) {
// native touch to screen conversion
ScreenToClient(pStruct->hwnd, reinterpret_cast<LPPOINT>(&p));
float xPos = (float)p.x / (float)(rect.right - rect.left);
float yPos = (float)p.y / (float)(rect.bottom - rect.top);
float xPos = static_cast<float>(p.x) / static_cast<float>(rect.right - rect.left);
float yPos = static_cast<float>(p.y) / static_cast<float>(rect.bottom - rect.top);
if (pointerInfo.pointerFlags & POINTER_FLAG_DOWN) {
// Handle new touchpoint
gTuioServer->initFrame(TUIO::TuioTime::getSessionTime());