aboutsummaryrefslogtreecommitdiffhomepage
path: root/third_party/eigen3/Eigen/src/IterativeLinearSolvers
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/eigen3/Eigen/src/IterativeLinearSolvers')
-rw-r--r--third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h149
-rw-r--r--third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h254
-rw-r--r--third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h265
-rw-r--r--third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h467
-rw-r--r--third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h254
5 files changed, 0 insertions, 1389 deletions
diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
deleted file mode 100644
index 1f3c060d02..0000000000
--- a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BASIC_PRECONDITIONERS_H
-#define EIGEN_BASIC_PRECONDITIONERS_H
-
-namespace Eigen {
-
-/** \ingroup IterativeLinearSolvers_Module
- * \brief A preconditioner based on the digonal entries
- *
- * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.
- * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
- * \code
- * A.diagonal().asDiagonal() . x = b
- * \endcode
- *
- * \tparam _Scalar the type of the scalar.
- *
- * This preconditioner is suitable for both selfadjoint and general problems.
- * The diagonal entries are pre-inverted and stored into a dense vector.
- *
- * \note A variant that has yet to be implemented would attempt to preserve the norm of each column.
- *
- */
-template <typename _Scalar>
-class DiagonalPreconditioner
-{
- typedef _Scalar Scalar;
- typedef Matrix<Scalar,Dynamic,1> Vector;
- typedef typename Vector::Index Index;
-
- public:
- // this typedef is only to export the scalar type and compile-time dimensions to solve_retval
- typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
-
- DiagonalPreconditioner() : m_isInitialized(false) {}
-
- template<typename MatType>
- DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols())
- {
- compute(mat);
- }
-
- Index rows() const { return m_invdiag.size(); }
- Index cols() const { return m_invdiag.size(); }
-
- template<typename MatType>
- DiagonalPreconditioner& analyzePattern(const MatType& )
- {
- return *this;
- }
-
- template<typename MatType>
- DiagonalPreconditioner& factorize(const MatType& mat)
- {
- m_invdiag.resize(mat.cols());
- for(int j=0; j<mat.outerSize(); ++j)
- {
- typename MatType::InnerIterator it(mat,j);
- while(it && it.index()!=j) ++it;
- if(it && it.index()==j && it.value()!=Scalar(0))
- m_invdiag(j) = Scalar(1)/it.value();
- else
- m_invdiag(j) = Scalar(1);
- }
- m_isInitialized = true;
- return *this;
- }
-
- template<typename MatType>
- DiagonalPreconditioner& compute(const MatType& mat)
- {
- return factorize(mat);
- }
-
- template<typename Rhs, typename Dest>
- void _solve(const Rhs& b, Dest& x) const
- {
- x = m_invdiag.array() * b.array() ;
- }
-
- template<typename Rhs> inline const internal::solve_retval<DiagonalPreconditioner, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
- eigen_assert(m_invdiag.size()==b.rows()
- && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval<DiagonalPreconditioner, Rhs>(*this, b.derived());
- }
-
- protected:
- Vector m_invdiag;
- bool m_isInitialized;
-};
-
-namespace internal {
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<DiagonalPreconditioner<_MatrixType>, Rhs>
- : solve_retval_base<DiagonalPreconditioner<_MatrixType>, Rhs>
-{
- typedef DiagonalPreconditioner<_MatrixType> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-}
-
-/** \ingroup IterativeLinearSolvers_Module
- * \brief A naive preconditioner which approximates any matrix as the identity matrix
- *
- * \sa class DiagonalPreconditioner
- */
-class IdentityPreconditioner
-{
- public:
-
- IdentityPreconditioner() {}
-
- template<typename MatrixType>
- IdentityPreconditioner(const MatrixType& ) {}
-
- template<typename MatrixType>
- IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }
-
- template<typename MatrixType>
- IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }
-
- template<typename MatrixType>
- IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
-
- template<typename Rhs>
- inline const Rhs& solve(const Rhs& b) const { return b; }
-};
-
-} // end namespace Eigen
-
-#endif // EIGEN_BASIC_PRECONDITIONERS_H
diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
deleted file mode 100644
index 7a46b51fa6..0000000000
--- a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BICGSTAB_H
-#define EIGEN_BICGSTAB_H
-
-namespace Eigen {
-
-namespace internal {
-
-/** \internal Low-level bi conjugate gradient stabilized algorithm
- * \param mat The matrix A
- * \param rhs The right hand side vector b
- * \param x On input and initial solution, on output the computed solution.
- * \param precond A preconditioner being able to efficiently solve for an
- * approximation of Ax=b (regardless of b)
- * \param iters On input the max number of iteration, on output the number of performed iterations.
- * \param tol_error On input the tolerance error, on output an estimation of the relative error.
- * \return false in the case of numerical issue, for example a break down of BiCGSTAB.
- */
-template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
-bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
- const Preconditioner& precond, int& iters,
- typename Dest::RealScalar& tol_error)
-{
- using std::sqrt;
- using std::abs;
- typedef typename Dest::RealScalar RealScalar;
- typedef typename Dest::Scalar Scalar;
- typedef Matrix<Scalar,Dynamic,1> VectorType;
- RealScalar tol = tol_error;
- int maxIters = iters;
-
- int n = mat.cols();
- x = precond.solve(x);
- VectorType r = rhs - mat * x;
- VectorType r0 = r;
-
- RealScalar r0_sqnorm = r0.squaredNorm();
- RealScalar rhs_sqnorm = rhs.squaredNorm();
- if(rhs_sqnorm == 0)
- {
- x.setZero();
- return true;
- }
- Scalar rho = 1;
- Scalar alpha = 1;
- Scalar w = 1;
-
- VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
- VectorType y(n), z(n);
- VectorType kt(n), ks(n);
-
- VectorType s(n), t(n);
-
- RealScalar tol2 = tol*tol;
- int i = 0;
- int restarts = 0;
-
- while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters )
- {
- Scalar rho_old = rho;
-
- rho = r0.dot(r);
- if (internal::isMuchSmallerThan(rho,r0_sqnorm))
- {
- // The new residual vector became too orthogonal to the arbitrarily choosen direction r0
- // Let's restart with a new r0:
- r0 = r;
- rho = r0_sqnorm = r.squaredNorm();
- if(restarts++ == 0)
- i = 0;
- }
- Scalar beta = (rho/rho_old) * (alpha / w);
- p = r + beta * (p - w * v);
-
- y = precond.solve(p);
-
- v.noalias() = mat * y;
-
- alpha = rho / r0.dot(v);
- s = r - alpha * v;
-
- z = precond.solve(s);
- t.noalias() = mat * z;
-
- RealScalar tmp = t.squaredNorm();
- if(tmp>RealScalar(0))
- w = t.dot(s) / tmp;
- else
- w = Scalar(0);
- x += alpha * y + w * z;
- r = s - w * t;
- ++i;
- }
- tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
- iters = i;
- return true;
-}
-
-}
-
-template< typename _MatrixType,
- typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
-class BiCGSTAB;
-
-namespace internal {
-
-template< typename _MatrixType, typename _Preconditioner>
-struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
-{
- typedef _MatrixType MatrixType;
- typedef _Preconditioner Preconditioner;
-};
-
-}
-
-/** \ingroup IterativeLinearSolvers_Module
- * \brief A bi conjugate gradient stabilized solver for sparse square problems
- *
- * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
- * stabilized algorithm. The vectors x and b can be either dense or sparse.
- *
- * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
- * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
- *
- * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
- * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
- * and NumTraits<Scalar>::epsilon() for the tolerance.
- *
- * This class can be used as the direct solver classes. Here is a typical usage example:
- * \include BiCGSTAB_simple.cpp
- *
- * By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * \include BiCGSTAB_step_by_step.cpp
- * Note that such a step by step excution is slightly slower.
- *
- * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
- */
-template< typename _MatrixType, typename _Preconditioner>
-class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
-{
- typedef IterativeSolverBase<BiCGSTAB> Base;
- using Base::mp_matrix;
- using Base::m_error;
- using Base::m_iterations;
- using Base::m_info;
- using Base::m_isInitialized;
-public:
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef _Preconditioner Preconditioner;
-
-public:
-
- /** Default constructor. */
- BiCGSTAB() : Base() {}
-
- /** Initialize the solver with matrix \a A for further \c Ax=b solving.
- *
- * This constructor is a shortcut for the default constructor followed
- * by a call to compute().
- *
- * \warning this class stores a reference to the matrix A as well as some
- * precomputed values that depend on it. Therefore, if \a A is changed
- * this class becomes invalid. Call compute() to update it with the new
- * matrix A, or modify a copy of A.
- */
- BiCGSTAB(const MatrixType& A) : Base(A) {}
-
- ~BiCGSTAB() {}
-
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<BiCGSTAB, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "BiCGSTAB is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <BiCGSTAB, Rhs, Guess>(*this, b.derived(), x0);
- }
-
- /** \internal */
- template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
- {
- bool failed = false;
- for(int j=0; j<b.cols(); ++j)
- {
- m_iterations = Base::maxIterations();
- m_error = Base::m_tolerance;
-
- typename Dest::ColXpr xj(x,j);
- if(!internal::bicgstab(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
- failed = true;
- }
- m_info = failed ? NumericalIssue
- : m_error <= Base::m_tolerance ? Success
- : NoConvergence;
- m_isInitialized = true;
- }
-
- /** \internal */
- template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
- {
-// x.setZero();
- x = b;
- _solveWithGuess(b,x);
- }
-
-protected:
-
-};
-
-
-namespace internal {
-
- template<typename _MatrixType, typename _Preconditioner, typename Rhs>
-struct solve_retval<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
- : solve_retval_base<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
-{
- typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_BICGSTAB_H
diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
deleted file mode 100644
index 3ce5179409..0000000000
--- a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ /dev/null
@@ -1,265 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CONJUGATE_GRADIENT_H
-#define EIGEN_CONJUGATE_GRADIENT_H
-
-namespace Eigen {
-
-namespace internal {
-
-/** \internal Low-level conjugate gradient algorithm
- * \param mat The matrix A
- * \param rhs The right hand side vector b
- * \param x On input and initial solution, on output the computed solution.
- * \param precond A preconditioner being able to efficiently solve for an
- * approximation of Ax=b (regardless of b)
- * \param iters On input the max number of iteration, on output the number of performed iterations.
- * \param tol_error On input the tolerance error, on output an estimation of the relative error.
- */
-template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
-EIGEN_DONT_INLINE
-void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
- const Preconditioner& precond, int& iters,
- typename Dest::RealScalar& tol_error)
-{
- using std::sqrt;
- using std::abs;
- typedef typename Dest::RealScalar RealScalar;
- typedef typename Dest::Scalar Scalar;
- typedef Matrix<Scalar,Dynamic,1> VectorType;
-
- RealScalar tol = tol_error;
- int maxIters = iters;
-
- int n = mat.cols();
-
- VectorType residual = rhs - mat * x; //initial residual
-
- RealScalar rhsNorm2 = rhs.squaredNorm();
- if(rhsNorm2 == 0)
- {
- x.setZero();
- iters = 0;
- tol_error = 0;
- return;
- }
- RealScalar threshold = tol*tol*rhsNorm2;
- RealScalar residualNorm2 = residual.squaredNorm();
- if (residualNorm2 < threshold)
- {
- iters = 0;
- tol_error = sqrt(residualNorm2 / rhsNorm2);
- return;
- }
-
- VectorType p(n);
- p = precond.solve(residual); //initial search direction
-
- VectorType z(n), tmp(n);
- RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
- int i = 0;
- while(i < maxIters)
- {
- tmp.noalias() = mat * p; // the bottleneck of the algorithm
-
- Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
- x += alpha * p; // update solution
- residual -= alpha * tmp; // update residue
-
- residualNorm2 = residual.squaredNorm();
- if(residualNorm2 < threshold)
- break;
-
- z = precond.solve(residual); // approximately solve for "A z = residual"
-
- RealScalar absOld = absNew;
- absNew = numext::real(residual.dot(z)); // update the absolute value of r
- RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
- p = z + beta * p; // update search direction
- i++;
- }
- tol_error = sqrt(residualNorm2 / rhsNorm2);
- iters = i;
-}
-
-}
-
-template< typename _MatrixType, int _UpLo=Lower,
- typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
-class ConjugateGradient;
-
-namespace internal {
-
-template< typename _MatrixType, int _UpLo, typename _Preconditioner>
-struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
-{
- typedef _MatrixType MatrixType;
- typedef _Preconditioner Preconditioner;
-};
-
-}
-
-/** \ingroup IterativeLinearSolvers_Module
- * \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems
- *
- * This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm.
- * The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse.
- *
- * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
- * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
- * or Upper. Default is Lower.
- * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
- *
- * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
- * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
- * and NumTraits<Scalar>::epsilon() for the tolerance.
- *
- * This class can be used as the direct solver classes. Here is a typical usage example:
- * \code
- * int n = 10000;
- * VectorXd x(n), b(n);
- * SparseMatrix<double> A(n,n);
- * // fill A and b
- * ConjugateGradient<SparseMatrix<double> > cg;
- * cg.compute(A);
- * x = cg.solve(b);
- * std::cout << "#iterations: " << cg.iterations() << std::endl;
- * std::cout << "estimated error: " << cg.error() << std::endl;
- * // update b, and solve again
- * x = cg.solve(b);
- * \endcode
- *
- * By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * cg.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = cg.solveWithGuess(b,x);
- * std::cout << i << " : " << cg.error() << std::endl;
- * ++i;
- * } while (cg.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
- *
- * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
- */
-template< typename _MatrixType, int _UpLo, typename _Preconditioner>
-class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
-{
- typedef IterativeSolverBase<ConjugateGradient> Base;
- using Base::mp_matrix;
- using Base::m_error;
- using Base::m_iterations;
- using Base::m_info;
- using Base::m_isInitialized;
-public:
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef _Preconditioner Preconditioner;
-
- enum {
- UpLo = _UpLo
- };
-
-public:
-
- /** Default constructor. */
- ConjugateGradient() : Base() {}
-
- /** Initialize the solver with matrix \a A for further \c Ax=b solving.
- *
- * This constructor is a shortcut for the default constructor followed
- * by a call to compute().
- *
- * \warning this class stores a reference to the matrix A as well as some
- * precomputed values that depend on it. Therefore, if \a A is changed
- * this class becomes invalid. Call compute() to update it with the new
- * matrix A, or modify a copy of A.
- */
- ConjugateGradient(const MatrixType& A) : Base(A) {}
-
- ~ConjugateGradient() {}
-
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<ConjugateGradient, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <ConjugateGradient, Rhs, Guess>(*this, b.derived(), x0);
- }
-
- /** \internal */
- template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
- {
- m_iterations = Base::maxIterations();
- m_error = Base::m_tolerance;
-
- for(int j=0; j<b.cols(); ++j)
- {
- m_iterations = Base::maxIterations();
- m_error = Base::m_tolerance;
-
- typename Dest::ColXpr xj(x,j);
- internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
- Base::m_preconditioner, m_iterations, m_error);
- }
-
- m_isInitialized = true;
- m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
- }
-
- /** \internal */
- template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
- {
- x.setOnes();
- _solveWithGuess(b,x);
- }
-
-protected:
-
-};
-
-
-namespace internal {
-
-template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
-struct solve_retval<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
- : solve_retval_base<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
-{
- typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_CONJUGATE_GRADIENT_H
diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
deleted file mode 100644
index b55afc1363..0000000000
--- a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ /dev/null
@@ -1,467 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_INCOMPLETE_LUT_H
-#define EIGEN_INCOMPLETE_LUT_H
-
-
-namespace Eigen {
-
-namespace internal {
-
-/** \internal
- * Compute a quick-sort split of a vector
- * On output, the vector row is permuted such that its elements satisfy
- * abs(row(i)) >= abs(row(ncut)) if i<ncut
- * abs(row(i)) <= abs(row(ncut)) if i>ncut
- * \param row The vector of values
- * \param ind The array of index for the elements in @p row
- * \param ncut The number of largest elements to keep
- **/
-template <typename VectorV, typename VectorI, typename Index>
-Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
-{
- typedef typename VectorV::RealScalar RealScalar;
- using std::swap;
- using std::abs;
- Index mid;
- Index n = row.size(); /* length of the vector */
- Index first, last ;
-
- ncut--; /* to fit the zero-based indices */
- first = 0;
- last = n-1;
- if (ncut < first || ncut > last ) return 0;
-
- do {
- mid = first;
- RealScalar abskey = abs(row(mid));
- for (Index j = first + 1; j <= last; j++) {
- if ( abs(row(j)) > abskey) {
- ++mid;
- swap(row(mid), row(j));
- swap(ind(mid), ind(j));
- }
- }
- /* Interchange for the pivot element */
- swap(row(mid), row(first));
- swap(ind(mid), ind(first));
-
- if (mid > ncut) last = mid - 1;
- else if (mid < ncut ) first = mid + 1;
- } while (mid != ncut );
-
- return 0; /* mid is equal to ncut */
-}
-
-}// end namespace internal
-
-/** \ingroup IterativeLinearSolvers_Module
- * \class IncompleteLUT
- * \brief Incomplete LU factorization with dual-threshold strategy
- *
- * During the numerical factorization, two dropping rules are used :
- * 1) any element whose magnitude is less than some tolerance is dropped.
- * This tolerance is obtained by multiplying the input tolerance @p droptol
- * by the average magnitude of all the original elements in the current row.
- * 2) After the elimination of the row, only the @p fill largest elements in
- * the L part and the @p fill largest elements in the U part are kept
- * (in addition to the diagonal element ). Note that @p fill is computed from
- * the input parameter @p fillfactor which is used the ratio to control the fill_in
- * relatively to the initial number of nonzero elements.
- *
- * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
- * and when @p fill=n/2 with @p droptol being different to zero.
- *
- * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
- * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
- *
- * NOTE : The following implementation is derived from the ILUT implementation
- * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
- * released under the terms of the GNU LGPL:
- * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
- * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
- * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
- * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
- * alternatively, on GMANE:
- * http://comments.gmane.org/gmane.comp.lib.eigen/3302
- */
-template <typename _Scalar>
-class IncompleteLUT : internal::noncopyable
-{
- typedef _Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef Matrix<Scalar,Dynamic,1> Vector;
- typedef SparseMatrix<Scalar,RowMajor> FactorType;
- typedef SparseMatrix<Scalar,ColMajor> PermutType;
- typedef typename FactorType::Index Index;
-
- public:
- typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
-
- IncompleteLUT()
- : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
- m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false)
- {}
-
- template<typename MatrixType>
- IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
- : m_droptol(droptol),m_fillfactor(fillfactor),
- m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
- {
- eigen_assert(fillfactor != 0);
- compute(mat);
- }
-
- Index rows() const { return m_lu.rows(); }
-
- Index cols() const { return m_lu.cols(); }
-
- /** \brief Reports whether previous computation was successful.
- *
- * \returns \c Success if computation was succesful,
- * \c NumericalIssue if the matrix.appears to be negative.
- */
- ComputationInfo info() const
- {
- eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
- return m_info;
- }
-
- template<typename MatrixType>
- void analyzePattern(const MatrixType& amat);
-
- template<typename MatrixType>
- void factorize(const MatrixType& amat);
-
- /**
- * Compute an incomplete LU factorization with dual threshold on the matrix mat
- * No pivoting is done in this version
- *
- **/
- template<typename MatrixType>
- IncompleteLUT<Scalar>& compute(const MatrixType& amat)
- {
- analyzePattern(amat);
- factorize(amat);
- m_isInitialized = m_factorizationIsOk;
- return *this;
- }
-
- void setDroptol(const RealScalar& droptol);
- void setFillfactor(int fillfactor);
-
- template<typename Rhs, typename Dest>
- void _solve(const Rhs& b, Dest& x) const
- {
- x = m_Pinv * b;
- x = m_lu.template triangularView<UnitLower>().solve(x);
- x = m_lu.template triangularView<Upper>().solve(x);
- x = m_P * x;
- }
-
- template<typename Rhs> inline const internal::solve_retval<IncompleteLUT, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
- eigen_assert(cols()==b.rows()
- && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval<IncompleteLUT, Rhs>(*this, b.derived());
- }
-
-protected:
-
- /** keeps off-diagonal entries; drops diagonal entries */
- struct keep_diag {
- inline bool operator() (const Index& row, const Index& col, const Scalar&) const
- {
- return row!=col;
- }
- };
-
-protected:
-
- FactorType m_lu;
- RealScalar m_droptol;
- int m_fillfactor;
- bool m_analysisIsOk;
- bool m_factorizationIsOk;
- bool m_isInitialized;
- ComputationInfo m_info;
- PermutationMatrix<Dynamic,Dynamic,Index> m_P; // Fill-reducing permutation
- PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // Inverse permutation
-};
-
-/**
- * Set control parameter droptol
- * \param droptol Drop any element whose magnitude is less than this tolerance
- **/
-template<typename Scalar>
-void IncompleteLUT<Scalar>::setDroptol(const RealScalar& droptol)
-{
- this->m_droptol = droptol;
-}
-
-/**
- * Set control parameter fillfactor
- * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
- **/
-template<typename Scalar>
-void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
-{
- this->m_fillfactor = fillfactor;
-}
-
-template <typename Scalar>
-template<typename _MatrixType>
-void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
-{
- // Compute the Fill-reducing permutation
- SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
- SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
- // Symmetrize the pattern
- // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
- // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
- SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
- AtA.prune(keep_diag());
- internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
-
- m_Pinv = m_P.inverse(); // ... and the inverse permutation
-
- m_analysisIsOk = true;
-}
-
-template <typename Scalar>
-template<typename _MatrixType>
-void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
-{
- using std::sqrt;
- using std::swap;
- using std::abs;
-
- eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
- Index n = amat.cols(); // Size of the matrix
- m_lu.resize(n,n);
- // Declare Working vectors and variables
- Vector u(n) ; // real values of the row -- maximum size is n --
- VectorXi ju(n); // column position of the values in u -- maximum size is n
- VectorXi jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
-
- // Apply the fill-reducing permutation
- eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
- SparseMatrix<Scalar,RowMajor, Index> mat;
- mat = amat.twistedBy(m_Pinv);
-
- // Initialization
- jr.fill(-1);
- ju.fill(0);
- u.fill(0);
-
- // number of largest elements to keep in each row:
- Index fill_in = static_cast<Index> (amat.nonZeros()*m_fillfactor)/n+1;
- if (fill_in > n) fill_in = n;
-
- // number of largest nonzero elements to keep in the L and the U part of the current row:
- Index nnzL = fill_in/2;
- Index nnzU = nnzL;
- m_lu.reserve(n * (nnzL + nnzU + 1));
-
- // global loop over the rows of the sparse matrix
- for (Index ii = 0; ii < n; ii++)
- {
- // 1 - copy the lower and the upper part of the row i of mat in the working vector u
-
- Index sizeu = 1; // number of nonzero elements in the upper part of the current row
- Index sizel = 0; // number of nonzero elements in the lower part of the current row
- ju(ii) = ii;
- u(ii) = 0;
- jr(ii) = ii;
- RealScalar rownorm = 0;
-
- typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
- for (; j_it; ++j_it)
- {
- Index k = j_it.index();
- if (k < ii)
- {
- // copy the lower part
- ju(sizel) = k;
- u(sizel) = j_it.value();
- jr(k) = sizel;
- ++sizel;
- }
- else if (k == ii)
- {
- u(ii) = j_it.value();
- }
- else
- {
- // copy the upper part
- Index jpos = ii + sizeu;
- ju(jpos) = k;
- u(jpos) = j_it.value();
- jr(k) = jpos;
- ++sizeu;
- }
- rownorm += numext::abs2(j_it.value());
- }
-
- // 2 - detect possible zero row
- if(rownorm==0)
- {
- m_info = NumericalIssue;
- return;
- }
- // Take the 2-norm of the current row as a relative tolerance
- rownorm = sqrt(rownorm);
-
- // 3 - eliminate the previous nonzero rows
- Index jj = 0;
- Index len = 0;
- while (jj < sizel)
- {
- // In order to eliminate in the correct order,
- // we must select first the smallest column index among ju(jj:sizel)
- Index k;
- Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
- k += jj;
- if (minrow != ju(jj))
- {
- // swap the two locations
- Index j = ju(jj);
- swap(ju(jj), ju(k));
- jr(minrow) = jj; jr(j) = k;
- swap(u(jj), u(k));
- }
- // Reset this location
- jr(minrow) = -1;
-
- // Start elimination
- typename FactorType::InnerIterator ki_it(m_lu, minrow);
- while (ki_it && ki_it.index() < minrow) ++ki_it;
- eigen_internal_assert(ki_it && ki_it.col()==minrow);
- Scalar fact = u(jj) / ki_it.value();
-
- // drop too small elements
- if(abs(fact) <= m_droptol)
- {
- jj++;
- continue;
- }
-
- // linear combination of the current row ii and the row minrow
- ++ki_it;
- for (; ki_it; ++ki_it)
- {
- Scalar prod = fact * ki_it.value();
- Index j = ki_it.index();
- Index jpos = jr(j);
- if (jpos == -1) // fill-in element
- {
- Index newpos;
- if (j >= ii) // dealing with the upper part
- {
- newpos = ii + sizeu;
- sizeu++;
- eigen_internal_assert(sizeu<=n);
- }
- else // dealing with the lower part
- {
- newpos = sizel;
- sizel++;
- eigen_internal_assert(sizel<=ii);
- }
- ju(newpos) = j;
- u(newpos) = -prod;
- jr(j) = newpos;
- }
- else
- u(jpos) -= prod;
- }
- // store the pivot element
- u(len) = fact;
- ju(len) = minrow;
- ++len;
-
- jj++;
- } // end of the elimination on the row ii
-
- // reset the upper part of the pointer jr to zero
- for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
-
- // 4 - partially sort and insert the elements in the m_lu matrix
-
- // sort the L-part of the row
- sizel = len;
- len = (std::min)(sizel, nnzL);
- typename Vector::SegmentReturnType ul(u.segment(0, sizel));
- typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
- internal::QuickSplit(ul, jul, len);
-
- // store the largest m_fill elements of the L part
- m_lu.startVec(ii);
- for(Index k = 0; k < len; k++)
- m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
-
- // store the diagonal element
- // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
- if (u(ii) == Scalar(0))
- u(ii) = sqrt(m_droptol) * rownorm;
- m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
-
- // sort the U-part of the row
- // apply the dropping rule first
- len = 0;
- for(Index k = 1; k < sizeu; k++)
- {
- if(abs(u(ii+k)) > m_droptol * rownorm )
- {
- ++len;
- u(ii + len) = u(ii + k);
- ju(ii + len) = ju(ii + k);
- }
- }
- sizeu = len + 1; // +1 to take into account the diagonal element
- len = (std::min)(sizeu, nnzU);
- typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
- typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
- internal::QuickSplit(uu, juu, len);
-
- // store the largest elements of the U part
- for(Index k = ii + 1; k < ii + len; k++)
- m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
- }
-
- m_lu.finalize();
- m_lu.makeCompressed();
-
- m_factorizationIsOk = true;
- m_info = Success;
-}
-
-namespace internal {
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
- : solve_retval_base<IncompleteLUT<_MatrixType>, Rhs>
-{
- typedef IncompleteLUT<_MatrixType> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_INCOMPLETE_LUT_H
diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
deleted file mode 100644
index 2036922d69..0000000000
--- a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
-#define EIGEN_ITERATIVE_SOLVER_BASE_H
-
-namespace Eigen {
-
-/** \ingroup IterativeLinearSolvers_Module
- * \brief Base class for linear iterative solvers
- *
- * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
- */
-template< typename Derived>
-class IterativeSolverBase : internal::noncopyable
-{
-public:
- typedef typename internal::traits<Derived>::MatrixType MatrixType;
- typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::RealScalar RealScalar;
-
-public:
-
- Derived& derived() { return *static_cast<Derived*>(this); }
- const Derived& derived() const { return *static_cast<const Derived*>(this); }
-
- /** Default constructor. */
- IterativeSolverBase()
- : mp_matrix(0)
- {
- init();
- }
-
- /** Initialize the solver with matrix \a A for further \c Ax=b solving.
- *
- * This constructor is a shortcut for the default constructor followed
- * by a call to compute().
- *
- * \warning this class stores a reference to the matrix A as well as some
- * precomputed values that depend on it. Therefore, if \a A is changed
- * this class becomes invalid. Call compute() to update it with the new
- * matrix A, or modify a copy of A.
- */
- IterativeSolverBase(const MatrixType& A)
- {
- init();
- compute(A);
- }
-
- ~IterativeSolverBase() {}
-
- /** Initializes the iterative solver for the sparcity pattern of the matrix \a A for further solving \c Ax=b problems.
- *
- * Currently, this function mostly call analyzePattern on the preconditioner. In the future
- * we might, for instance, implement column reodering for faster matrix vector products.
- */
- Derived& analyzePattern(const MatrixType& A)
- {
- m_preconditioner.analyzePattern(A);
- m_isInitialized = true;
- m_analysisIsOk = true;
- m_info = Success;
- return derived();
- }
-
- /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems.
- *
- * Currently, this function mostly call factorize on the preconditioner.
- *
- * \warning this class stores a reference to the matrix A as well as some
- * precomputed values that depend on it. Therefore, if \a A is changed
- * this class becomes invalid. Call compute() to update it with the new
- * matrix A, or modify a copy of A.
- */
- Derived& factorize(const MatrixType& A)
- {
- eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
- mp_matrix = &A;
- m_preconditioner.factorize(A);
- m_factorizationIsOk = true;
- m_info = Success;
- return derived();
- }
-
- /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems.
- *
- * Currently, this function mostly initialized/compute the preconditioner. In the future
- * we might, for instance, implement column reodering for faster matrix vector products.
- *
- * \warning this class stores a reference to the matrix A as well as some
- * precomputed values that depend on it. Therefore, if \a A is changed
- * this class becomes invalid. Call compute() to update it with the new
- * matrix A, or modify a copy of A.
- */
- Derived& compute(const MatrixType& A)
- {
- mp_matrix = &A;
- m_preconditioner.compute(A);
- m_isInitialized = true;
- m_analysisIsOk = true;
- m_factorizationIsOk = true;
- m_info = Success;
- return derived();
- }
-
- /** \internal */
- Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; }
- /** \internal */
- Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; }
-
- /** \returns the tolerance threshold used by the stopping criteria */
- RealScalar tolerance() const { return m_tolerance; }
-
- /** Sets the tolerance threshold used by the stopping criteria */
- Derived& setTolerance(const RealScalar& tolerance)
- {
- m_tolerance = tolerance;
- return derived();
- }
-
- /** \returns a read-write reference to the preconditioner for custom configuration. */
- Preconditioner& preconditioner() { return m_preconditioner; }
-
- /** \returns a read-only reference to the preconditioner. */
- const Preconditioner& preconditioner() const { return m_preconditioner; }
-
- /** \returns the max number of iterations */
- int maxIterations() const
- {
- return (mp_matrix && m_maxIterations<0) ? mp_matrix->cols() : m_maxIterations;
- }
-
- /** Sets the max number of iterations */
- Derived& setMaxIterations(int maxIters)
- {
- m_maxIterations = maxIters;
- return derived();
- }
-
- /** \returns the number of iterations performed during the last solve */
- int iterations() const
- {
- eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
- return m_iterations;
- }
-
- /** \returns the tolerance error reached during the last solve */
- RealScalar error() const
- {
- eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
- return m_error;
- }
-
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
- *
- * \sa compute()
- */
- template<typename Rhs> inline const internal::solve_retval<Derived, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
- eigen_assert(rows()==b.rows()
- && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval<Derived, Rhs>(derived(), b.derived());
- }
-
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
- *
- * \sa compute()
- */
- template<typename Rhs>
- inline const internal::sparse_solve_retval<IterativeSolverBase, Rhs>
- solve(const SparseMatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
- eigen_assert(rows()==b.rows()
- && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
- return internal::sparse_solve_retval<IterativeSolverBase, Rhs>(*this, b.derived());
- }
-
- /** \returns Success if the iterations converged, and NoConvergence otherwise. */
- ComputationInfo info() const
- {
- eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
- return m_info;
- }
-
- /** \internal */
- template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
- void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
- {
- eigen_assert(rows()==b.rows());
-
- int rhsCols = b.cols();
- int size = b.rows();
- Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
- Eigen::Matrix<DestScalar,Dynamic,1> tx(size);
- for(int k=0; k<rhsCols; ++k)
- {
- tb = b.col(k);
- tx = derived().solve(tb);
- dest.col(k) = tx.sparseView(0);
- }
- }
-
-protected:
- void init()
- {
- m_isInitialized = false;
- m_analysisIsOk = false;
- m_factorizationIsOk = false;
- m_maxIterations = -1;
- m_tolerance = NumTraits<Scalar>::epsilon();
- }
- const MatrixType* mp_matrix;
- Preconditioner m_preconditioner;
-
- int m_maxIterations;
- RealScalar m_tolerance;
-
- mutable RealScalar m_error;
- mutable int m_iterations;
- mutable ComputationInfo m_info;
- mutable bool m_isInitialized, m_analysisIsOk, m_factorizationIsOk;
-};
-
-namespace internal {
-
-template<typename Derived, typename Rhs>
-struct sparse_solve_retval<IterativeSolverBase<Derived>, Rhs>
- : sparse_solve_retval_base<IterativeSolverBase<Derived>, Rhs>
-{
- typedef IterativeSolverBase<Derived> Dec;
- EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec().derived()._solve_sparse(rhs(),dst);
- }
-};
-
-} // end namespace internal
-
-} // end namespace Eigen
-
-#endif // EIGEN_ITERATIVE_SOLVER_BASE_H