Remove NURBS interpolation in favor of global lat-lon maping in shader due to mis match in UV coordinates.

This commit is contained in:
kbladin
2016-04-11 12:09:45 -04:00
parent 4e0fc13035
commit 552bdc1786
3 changed files with 29 additions and 186 deletions

View File

@@ -130,9 +130,6 @@ namespace openspace {
_programObject->setUniform("ModelTransform", transform);
setPscUniforms(*_programObject.get(), data.camera, data.position);
glm::dmat4 cameraTransform = data.camera.viewMatrix(); // TODO : NEEDS TO BE DOUBLE PRECISION
glm::dmat4 cameraProjectionTransform = data.camera.viewProjectionMatrix(); // TODO : NEEDS TO BE DOUBLE PRECISION
glDisable(GL_CULL_FACE);
//glCullFace(GL_BACK);

View File

@@ -35,6 +35,7 @@
#define _USE_MATH_DEFINES
#include <math.h>
#include <glm/gtc/matrix_transform.hpp>
namespace {
const std::string _loggerCat = "LatLonPatch";
@@ -106,104 +107,26 @@ namespace openspace {
}
void LatLonPatch::render(const RenderData& data) {
using namespace glm;
// activate shader
_programObject->activate();
using namespace glm;
// TODO : Not sure if double precision will be needed for all these calculations
// Using doubles in case for now.
// Create control points and normals
//
// These are the physical positions of the control points for the patch:
//
// y p[6] p[7] p[8] p02 p12 p22
// ^ p[3] p[4] p[5] <=> p01 p11 p21
// | p[0] p[1] p[2] p00 p10 p20
// |
// -----> x
dvec3 p[9];// p00, p01, p02, p10, p11, p12, p20, p21, p22;
dvec3 n00, n01, n02, n10, n11, n12, n20, n21, n22;
dvec3 nHorizontal0, nHorizontal1, nHorizontal2;
float interpolationWeight;
// Calculate positions of corner control points
p[0] = calculateCornerPointBottomLeft(); // p00
p[2] = calculateCornerPointBottomRight(); // p20
p[6] = calculateCornerPointTopLeft(); // p02
p[8] = calculateCornerPointTopRight(); // p22
// Calculate the horizontal normals
// Horizontal normals are the same for constant latitude
nHorizontal0 = normalize(converter::latLonToCartesian(
0,
_posLatLon.y - _sizeLatLon.y,
_globeRadius));
nHorizontal1 = normalize(converter::latLonToCartesian(
0,
_posLatLon.y,
_globeRadius));
nHorizontal2 = normalize(converter::latLonToCartesian(
0,
_posLatLon.y + _sizeLatLon.y,
_globeRadius));
// Get position of center control points
p[3] = calculateCenterPoint(p[0], normalize(p[0]), p[6], normalize(p[6])); // p01
p[5] = calculateCenterPoint(p[2], normalize(p[2]), p[8], normalize(p[8])); // p21
p[1] = calculateCenterPoint(p[0], nHorizontal0, p[2], nHorizontal2); // p10
p[7] = calculateCenterPoint(p[6], nHorizontal0, p[8], nHorizontal2); // p12
p[4] = calculateCenterPoint(p[3], nHorizontal0, p[5], nHorizontal2); // p11
// Calculate one weight to send to GPU for interpolation in longitude range.
// Actually there are three weights but they all have the same value.
// By weighting the center control point with this value, a circle segment is
// defined by the NURBS curve
interpolationWeight = dot(nHorizontal0, nHorizontal1);
// TODO : Transformation to world space from model space should also consider
// rotations. Now it only uses translatation for simplicity. Should be done
// With a matrix transform
dvec3 modelPosition = data.position.dvec3();
for (size_t i = 0; i < 9; i++) {
p[i] += modelPosition;
}
// Get camera transform matrix
// TODO : Should only need to fetch the camera transform and use directly
// but this is not currently possible in the camera class.
dvec3 cameraPosition = data.camera.position().dvec3();
dmat4 viewTransform = inverse(translate(dmat4(1.0), cameraPosition));
viewTransform = dmat4(data.camera.viewRotationMatrix()) * viewTransform;
vec3 cameraPosition = data.camera.position().vec3();
mat4 viewTransform = inverse(translate(mat4(1.0), cameraPosition));
viewTransform = mat4(data.camera.viewRotationMatrix()) * viewTransform;
// Transform control points to camera space
for (size_t i = 0; i < 9; i++) {
p[i] = dvec3(viewTransform * dvec4(p[i], 1.0));
}
// TODO : Model transform should be fetched as a matrix directly.
mat4 modelTransform = translate(mat4(1), data.position.vec3());
// Transform normals to camera space
n00 = dvec3(viewTransform * dvec4(n00, 0.0));
n10 = dvec3(viewTransform * dvec4(n10, 0.0));
n20 = dvec3(viewTransform * dvec4(n20, 0.0));
n01 = dvec3(viewTransform * dvec4(n01, 0.0));
n11 = dvec3(viewTransform * dvec4(n11, 0.0));
n21 = dvec3(viewTransform * dvec4(n21, 0.0));
n02 = dvec3(viewTransform * dvec4(n02, 0.0));
n12 = dvec3(viewTransform * dvec4(n12, 0.0));
n22 = dvec3(viewTransform * dvec4(n22, 0.0));
_programObject->setUniform("modelViewProjectionTransform", data.camera.projectionMatrix() * viewTransform * modelTransform);
_programObject->setUniform("minLatLon", vec2(_posLatLon - _sizeLatLon));
_programObject->setUniform("latLonScalingFactor", 2.0f * vec2(_sizeLatLon));
_programObject->setUniform("globeRadius", float(_globeRadius));
// Send control points to GPU to be used in shader
// Transform to float values
vec3 pFloat[9];
for (size_t i = 0; i < 9; i++) {
pFloat[i] = vec3(p[i]);
}
_programObject->setUniform("p", &pFloat[0], 9);
_programObject->setUniform("interpolationWeight", interpolationWeight);
_programObject->setUniform("Projection", data.camera.projectionMatrix());
// Render triangles (use texture coordinates to interpolate to new positions)
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);

View File

@@ -24,9 +24,11 @@
#version __CONTEXT__
uniform mat4 Projection;
uniform vec3[9] p;
uniform float interpolationWeight;
uniform mat4 modelViewProjectionTransform;
uniform float globeRadius;
uniform vec2 minLatLon;
uniform vec2 latLonScalingFactor;
layout(location = 1) in vec2 in_UV;
@@ -35,105 +37,26 @@ out vec2 vs_uv;
#include "PowerScaling/powerScaling_vs.hglsl"
// Nurbs basis function for third order and three conrol points
// This function is not general. It only works for third order with three control points.
void nurbsBasis(float t, inout float n[3])
{
#define ORDER 3
#define NPTS 3
#define ORDER_2 2
#define ORDER_3 3
int i,k;
float d, e;
float temp[5];
int knots[6] = {0,0,0,1,1,1};
// calculate the first order basis functions n[i][1]
for (i = 0; i < 5; i++){ // Loop to NPTS + ORDER -1
if (( t >= knots[i]) && (t < knots[i+1]))
temp[i] = 1;
else
temp[i] = 0;
}
// calculate the higher order basis functions
// Second order
for (i = 0; i < 4; i++){ // Loop to NPTS + ORDER - 2 = 4
// if the lower order basis function is zero skip the calculation
d = (temp[i] != 0) ? ((t-knots[i])*temp[i])/(knots[i+ORDER_2-1]-knots[i]) : 0;
e = (temp[i+1] != 0) ? ((knots[i+ORDER_2]-t)*temp[i+1])/(knots[i+ORDER_2]-knots[i+1]) : 0;
temp[i] = d + e;
}
// Third order
for (i = 0; i < 3; i++){ // Loop to NPTS + ORDER - 3 = 3
// if the lower order basis function is zero skip the calculation
d = (temp[i] != 0) ? ((t-knots[i])*temp[i])/(knots[i+ORDER_3-1]-knots[i]) : 0;
e = (temp[i+1] != 0) ? ((knots[i+ORDER_3]-t)*temp[i+1])/(knots[i+ORDER_3]-knots[i+1]) : 0;
temp[i] = d + e;
}
// Last point
temp[NPTS - 1] = (t == knots[NPTS + ORDER - 1]) ? 1 : temp[NPTS - 1];
// Copy to array
for (i = 0; i < 3; i++) {
n[i] = temp[i];
}
vec3 latLonToCartesian(float latitude, float longitude, float radius) {
return radius * vec3(
cos(latitude) * cos(longitude),
cos(latitude) * sin(longitude),
sin(latitude));
}
// These are the physical positions of the control points for the patch:
//
// y p[6] p[7] p[8] p02 p12 p22
// ^ p[3] p[4] p[5] <=> p01 p11 p21
// | p[0] p[1] p[2] p00 p10 p20
// |
// -----> x
vec3 bilinearInterpolation() {
// Bilinear interpolation
vec3 p0 = (1 - in_UV.y) * p[0] + in_UV.y * p[2];
vec3 p2 = (1 - in_UV.y) * p[6] + in_UV.y * p[8];
return (1 - in_UV.x) * p0 + in_UV.x * p2;
}
vec3 nurbsInterpolation() {
float basisFunctionValues[3];
// Interpolate in u direction
nurbsBasis(in_UV.y, basisFunctionValues);
vec3 p0 =
basisFunctionValues[0] * p[0] +
basisFunctionValues[1] * p[1] * interpolationWeight +
basisFunctionValues[2] * p[2];
vec3 p1 =
basisFunctionValues[0] * p[3] +
basisFunctionValues[1] * p[4] * interpolationWeight +
basisFunctionValues[2] * p[5];
vec3 p2 =
basisFunctionValues[0] * p[6] +
basisFunctionValues[1] * p[7] * interpolationWeight +
basisFunctionValues[2] * p[8];
// Calculate the last interpolation weight
float w1 = dot(normalize(p0), normalize(p1));
// Interpolate in v direction
nurbsBasis(in_UV.x, basisFunctionValues);
return
basisFunctionValues[0] * p0 +
basisFunctionValues[1] * p1 * w1 * interpolationWeight +
basisFunctionValues[2] * p2;
vec3 globalInterpolation() {
vec2 latLonInput;
latLonInput.x = minLatLon.x + latLonScalingFactor.x * in_UV.x; // Lat
latLonInput.y = minLatLon.y + latLonScalingFactor.y * in_UV.y; // Lon
vec3 positionModelSpace = latLonToCartesian(latLonInput.x, latLonInput.y, globeRadius);
return positionModelSpace;
}
void main()
{
vs_uv = in_UV;
//vec3 p = bilinearInterpolation();
vec3 p = nurbsInterpolation();
vec3 p = globalInterpolation();
vec4 position = Projection * vec4(p, 1);
vec4 position = modelViewProjectionTransform * vec4(p, 1);
gl_Position = z_normalization(position);
}