aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src/IterativeLinearSolvers/ConjugateGradient.h')
-rw-r--r--Eigen/src/IterativeLinearSolvers/ConjugateGradient.h44
1 files changed, 22 insertions, 22 deletions
diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index a799c3ef5..10cd94783 100644
--- a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -60,29 +60,29 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
}
VectorType p(n);
- p = precond.solve(residual); //initial search direction
+ p = precond.solve(residual); // initial search direction
VectorType z(n), tmp(n);
RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
Index i = 0;
while(i < maxIters)
{
- tmp.noalias() = mat * p; // the bottleneck of the algorithm
+ tmp.noalias() = mat * p; // the bottleneck of the algorithm
- Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
- x += alpha * p; // update solution
- residual -= alpha * tmp; // update residue
+ Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
+ x += alpha * p; // update solution
+ residual -= alpha * tmp; // update residual
residualNorm2 = residual.squaredNorm();
if(residualNorm2 < threshold)
break;
- z = precond.solve(residual); // approximately solve for "A z = residual"
+ z = precond.solve(residual); // approximately solve for "A z = residual"
RealScalar absOld = absNew;
absNew = numext::real(residual.dot(z)); // update the absolute value of r
- RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
- p = z + beta * p; // update search direction
+ RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
+ p = z + beta * p; // update search direction
i++;
}
tol_error = sqrt(residualNorm2 / rhsNorm2);
@@ -122,24 +122,24 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* and NumTraits<Scalar>::epsilon() for the tolerance.
*
* This class can be used as the direct solver classes. Here is a typical usage example:
- * \code
- * int n = 10000;
- * VectorXd x(n), b(n);
- * SparseMatrix<double> A(n,n);
- * // fill A and b
- * ConjugateGradient<SparseMatrix<double> > cg;
- * cg.compute(A);
- * x = cg.solve(b);
- * std::cout << "#iterations: " << cg.iterations() << std::endl;
- * std::cout << "estimated error: " << cg.error() << std::endl;
- * // update b, and solve again
- * x = cg.solve(b);
- * \endcode
+ \code
+ int n = 10000;
+ VectorXd x(n), b(n);
+ SparseMatrix<double> A(n,n);
+ // fill A and b
+ ConjugateGradient<SparseMatrix<double> > cg;
+ cg.compute(A);
+ x = cg.solve(b);
+ std::cout << "#iterations: " << cg.iterations() << std::endl;
+ std::cout << "estimated error: " << cg.error() << std::endl;
+ // update b, and solve again
+ x = cg.solve(b);
+ \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
- * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ * \sa class LSCG, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >