Commit b7e73763 by Rémi Verschelde

Fix non UTF8-encoded thirdparty files

parent 0acdeb2e
//{{NO_DEPENDENCIES}}
// Microsoft Visual C++ generated include file.
// Used by assimp.rc
// Nchste Standardwerte fr neue Objekte
//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
#define _APS_NEXT_RESOURCE_VALUE 101
#define _APS_NEXT_COMMAND_VALUE 40001
#define _APS_NEXT_CONTROL_VALUE 1001
#define _APS_NEXT_SYMED_VALUE 101
#endif
#endif
...@@ -4329,10 +4329,10 @@ double DistanceFromLineSqrd( ...@@ -4329,10 +4329,10 @@ double DistanceFromLineSqrd(
const IntPoint& pt, const IntPoint& ln1, const IntPoint& ln2) const IntPoint& pt, const IntPoint& ln1, const IntPoint& ln2)
{ {
//The equation of a line in general form (Ax + By + C = 0) //The equation of a line in general form (Ax + By + C = 0)
//given 2 points (x,y) & (x,y) is ... //given 2 points (x¹,y¹) & (x²,y²) is ...
//(y - y)x + (x - x)y + (y - y)x - (x - x)y = 0 //(y¹ - y²)x + (x² - x¹)y + (y² - y¹)x¹ - (x² - x¹)y¹ = 0
//A = (y - y); B = (x - x); C = (y - y)x - (x - x)y //A = (y¹ - y²); B = (x² - x¹); C = (y² - y¹)x¹ - (x² - x¹)y¹
//perpendicular distance of point (x,y) = (Ax + By + C)/Sqrt(A + B) //perpendicular distance of point (x³,y³) = (Ax³ + By³ + C)/Sqrt(A² + B²)
//see http://en.wikipedia.org/wiki/Perpendicular_distance //see http://en.wikipedia.org/wiki/Perpendicular_distance
double A = double(ln1.Y - ln2.Y); double A = double(ln1.Y - ln2.Y);
double B = double(ln2.X - ln1.X); double B = double(ln2.X - ln1.X);
......
...@@ -4388,7 +4388,7 @@ private: ...@@ -4388,7 +4388,7 @@ private:
class Solver class Solver
{ {
public: public:
// Solve the symmetric system: AtAx = Atb // Solve the symmetric system: At·A·x = At·b
static bool LeastSquaresSolver(const sparse::Matrix &A, const FullVector &b, FullVector &x, float epsilon = 1e-5f) static bool LeastSquaresSolver(const sparse::Matrix &A, const FullVector &b, FullVector &x, float epsilon = 1e-5f)
{ {
xaDebugAssert(A.width() == x.dimension()); xaDebugAssert(A.width() == x.dimension());
...@@ -4477,22 +4477,22 @@ private: ...@@ -4477,22 +4477,22 @@ private:
* Gradient method. * Gradient method.
* *
* Solving sparse linear systems: * Solving sparse linear systems:
* (1) Ax = b * (1) A·x = b
* *
* The conjugate gradient algorithm solves (1) only in the case that A is * The conjugate gradient algorithm solves (1) only in the case that A is
* symmetric and positive definite. It is based on the idea of minimizing the * symmetric and positive definite. It is based on the idea of minimizing the
* function * function
* *
* (2) f(x) = 1/2xAx - bx * (2) f(x) = 1/2·x·A·x - b·x
* *
* This function is minimized when its gradient * This function is minimized when its gradient
* *
* (3) df = Ax - b * (3) df = A·x - b
* *
* is zero, which is equivalent to (1). The minimization is carried out by * is zero, which is equivalent to (1). The minimization is carried out by
* generating a succession of search directions p.k and improved minimizers x.k. * generating a succession of search directions p.k and improved minimizers x.k.
* At each stage a quantity alfa.k is found that minimizes f(x.k + alfa.kp.k), * At each stage a quantity alfa.k is found that minimizes f(x.k + alfa.k·p.k),
* and x.k+1 is set equal to the new point x.k + alfa.kp.k. The p.k and x.k are * and x.k+1 is set equal to the new point x.k + alfa.k·p.k. The p.k and x.k are
* built up in such a way that x.k+1 is also the minimizer of f over the whole * built up in such a way that x.k+1 is also the minimizer of f over the whole
* vector space of directions already taken, {p.1, p.2, . . . , p.k}. After N * vector space of directions already taken, {p.1, p.2, . . . , p.k}. After N
* iterations you arrive at the minimizer over the entire vector space, i.e., the * iterations you arrive at the minimizer over the entire vector space, i.e., the
...@@ -4520,7 +4520,7 @@ private: ...@@ -4520,7 +4520,7 @@ private:
float delta_new; float delta_new;
float alpha; float alpha;
float beta; float beta;
// r = b - Ax; // r = b - A·x;
sparse::copy(b, r); sparse::copy(b, r);
sparse::sgemv(-1, A, x, 1, r); sparse::sgemv(-1, A, x, 1, r);
// p = r; // p = r;
...@@ -4529,24 +4529,24 @@ private: ...@@ -4529,24 +4529,24 @@ private:
delta_0 = delta_new; delta_0 = delta_new;
while (i < i_max && delta_new > epsilon * epsilon * delta_0) { while (i < i_max && delta_new > epsilon * epsilon * delta_0) {
i++; i++;
// q = Ap // q = A·p
mult(A, p, q); mult(A, p, q);
// alpha = delta_new / pq // alpha = delta_new / p·q
alpha = delta_new / sparse::dot( p, q ); alpha = delta_new / sparse::dot( p, q );
// x = alfap + x // x = alfa·p + x
sparse::saxpy(alpha, p, x); sparse::saxpy(alpha, p, x);
if ((i & 31) == 0) { // recompute r after 32 steps if ((i & 31) == 0) { // recompute r after 32 steps
// r = b - Ax // r = b - A·x
sparse::copy(b, r); sparse::copy(b, r);
sparse::sgemv(-1, A, x, 1, r); sparse::sgemv(-1, A, x, 1, r);
} else { } else {
// r = r - alphaq // r = r - alpha·q
sparse::saxpy(-alpha, q, r); sparse::saxpy(-alpha, q, r);
} }
delta_old = delta_new; delta_old = delta_new;
delta_new = sparse::dot( r, r ); delta_new = sparse::dot( r, r );
beta = delta_new / delta_old; beta = delta_new / delta_old;
// p = betap + r // p = beta·p + r
sparse::scal(beta, p); sparse::scal(beta, p);
sparse::saxpy(1, r, p); sparse::saxpy(1, r, p);
} }
...@@ -4572,35 +4572,35 @@ private: ...@@ -4572,35 +4572,35 @@ private:
float delta_new; float delta_new;
float alpha; float alpha;
float beta; float beta;
// r = b - Ax // r = b - A·x
sparse::copy(b, r); sparse::copy(b, r);
sparse::sgemv(-1, A, x, 1, r); sparse::sgemv(-1, A, x, 1, r);
// p = M^-1 r // p = M^-1 · r
preconditioner.apply(r, p); preconditioner.apply(r, p);
delta_new = sparse::dot(r, p); delta_new = sparse::dot(r, p);
delta_0 = delta_new; delta_0 = delta_new;
while (i < i_max && delta_new > epsilon * epsilon * delta_0) { while (i < i_max && delta_new > epsilon * epsilon * delta_0) {
i++; i++;
// q = Ap // q = A·p
mult(A, p, q); mult(A, p, q);
// alpha = delta_new / pq // alpha = delta_new / p·q
alpha = delta_new / sparse::dot(p, q); alpha = delta_new / sparse::dot(p, q);
// x = alfap + x // x = alfa·p + x
sparse::saxpy(alpha, p, x); sparse::saxpy(alpha, p, x);
if ((i & 31) == 0) { // recompute r after 32 steps if ((i & 31) == 0) { // recompute r after 32 steps
// r = b - Ax // r = b - A·x
sparse::copy(b, r); sparse::copy(b, r);
sparse::sgemv(-1, A, x, 1, r); sparse::sgemv(-1, A, x, 1, r);
} else { } else {
// r = r - alfaq // r = r - alfa·q
sparse::saxpy(-alpha, q, r); sparse::saxpy(-alpha, q, r);
} }
// s = M^-1 r // s = M^-1 · r
preconditioner.apply(r, s); preconditioner.apply(r, s);
delta_old = delta_new; delta_old = delta_new;
delta_new = sparse::dot( r, s ); delta_new = sparse::dot( r, s );
beta = delta_new / delta_old; beta = delta_new / delta_old;
// p = s + betap // p = s + beta·p
sparse::scal(beta, p); sparse::scal(beta, p);
sparse::saxpy(1, s, p); sparse::saxpy(1, s, p);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment