From f41959ccb2d9d4c722fe8fc3351401d53bcf4900 Mon Sep 17 00:00:00 2001 From: Manjunath Kudlur Date: Fri, 6 Nov 2015 16:27:58 -0800 Subject: TensorFlow: Initial commit of TensorFlow library. TensorFlow is an open source software library for numerical computation using data flow graphs. Base CL: 107276108 --- .../IterativeLinearSolvers/BasicPreconditioners.h | 149 +++++++ .../Eigen/src/IterativeLinearSolvers/BiCGSTAB.h | 254 +++++++++++ .../src/IterativeLinearSolvers/ConjugateGradient.h | 265 ++++++++++++ .../src/IterativeLinearSolvers/IncompleteLUT.h | 467 +++++++++++++++++++++ .../IterativeLinearSolvers/IterativeSolverBase.h | 254 +++++++++++ 5 files changed, 1389 insertions(+) create mode 100644 third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h create mode 100644 third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h create mode 100644 third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h create mode 100644 third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h create mode 100644 third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h (limited to 'third_party/eigen3/Eigen/src/IterativeLinearSolvers') diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h new file mode 100644 index 0000000000..1f3c060d02 --- /dev/null +++ b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -0,0 +1,149 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BASIC_PRECONDITIONERS_H +#define EIGEN_BASIC_PRECONDITIONERS_H + +namespace Eigen { + +/** \ingroup IterativeLinearSolvers_Module + * \brief A preconditioner based on the digonal entries + * + * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix. + * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: + * \code + * A.diagonal().asDiagonal() . x = b + * \endcode + * + * \tparam _Scalar the type of the scalar. + * + * This preconditioner is suitable for both selfadjoint and general problems. + * The diagonal entries are pre-inverted and stored into a dense vector. + * + * \note A variant that has yet to be implemented would attempt to preserve the norm of each column. + * + */ +template +class DiagonalPreconditioner +{ + typedef _Scalar Scalar; + typedef Matrix Vector; + typedef typename Vector::Index Index; + + public: + // this typedef is only to export the scalar type and compile-time dimensions to solve_retval + typedef Matrix MatrixType; + + DiagonalPreconditioner() : m_isInitialized(false) {} + + template + DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols()) + { + compute(mat); + } + + Index rows() const { return m_invdiag.size(); } + Index cols() const { return m_invdiag.size(); } + + template + DiagonalPreconditioner& analyzePattern(const MatType& ) + { + return *this; + } + + template + DiagonalPreconditioner& factorize(const MatType& mat) + { + m_invdiag.resize(mat.cols()); + for(int j=0; j + DiagonalPreconditioner& compute(const MatType& mat) + { + return factorize(mat); + } + + template + void _solve(const Rhs& b, Dest& x) const + { + x = m_invdiag.array() * b.array() ; + } + + template inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); + eigen_assert(m_invdiag.size()==b.rows() + && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + protected: + Vector m_invdiag; + bool m_isInitialized; +}; + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef DiagonalPreconditioner<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A naive preconditioner which approximates any matrix as the identity matrix + * + * \sa class DiagonalPreconditioner + */ +class IdentityPreconditioner +{ + public: + + IdentityPreconditioner() {} + + template + IdentityPreconditioner(const MatrixType& ) {} + + template + IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& factorize(const MatrixType& ) { return *this; } + + template + IdentityPreconditioner& compute(const MatrixType& ) { return *this; } + + template + inline const Rhs& solve(const Rhs& b) const { return b; } +}; + +} // end namespace Eigen + +#endif // EIGEN_BASIC_PRECONDITIONERS_H diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h new file mode 100644 index 0000000000..7a46b51fa6 --- /dev/null +++ b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -0,0 +1,254 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BICGSTAB_H +#define EIGEN_BICGSTAB_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level bi conjugate gradient stabilized algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + * \return false in the case of numerical issue, for example a break down of BiCGSTAB. + */ +template +bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, int& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + RealScalar tol = tol_error; + int maxIters = iters; + + int n = mat.cols(); + x = precond.solve(x); + VectorType r = rhs - mat * x; + VectorType r0 = r; + + RealScalar r0_sqnorm = r0.squaredNorm(); + RealScalar rhs_sqnorm = rhs.squaredNorm(); + if(rhs_sqnorm == 0) + { + x.setZero(); + return true; + } + Scalar rho = 1; + Scalar alpha = 1; + Scalar w = 1; + + VectorType v = VectorType::Zero(n), p = VectorType::Zero(n); + VectorType y(n), z(n); + VectorType kt(n), ks(n); + + VectorType s(n), t(n); + + RealScalar tol2 = tol*tol; + int i = 0; + int restarts = 0; + + while ( r.squaredNorm()/rhs_sqnorm > tol2 && iRealScalar(0)) + w = t.dot(s) / tmp; + else + w = Scalar(0); + x += alpha * y + w * z; + r = s - w * t; + ++i; + } + tol_error = sqrt(r.squaredNorm()/rhs_sqnorm); + iters = i; + return true; +} + +} + +template< typename _MatrixType, + typename _Preconditioner = DiagonalPreconditioner > +class BiCGSTAB; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A bi conjugate gradient stabilized solver for sparse square problems + * + * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient + * stabilized algorithm. The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \include BiCGSTAB_simple.cpp + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. Here is a step by + * step execution example starting with a random guess and printing the evolution + * of the estimated error: + * \include BiCGSTAB_step_by_step.cpp + * Note that such a step by step excution is slightly slower. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, typename _Preconditioner> +class BiCGSTAB : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::mp_matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + BiCGSTAB() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + BiCGSTAB(const MatrixType& A) : Base(A) {} + + ~BiCGSTAB() {} + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * \a x0 as an initial solution. + * + * \sa compute() + */ + template + inline const internal::solve_retval_with_guess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "BiCGSTAB is not initialized."); + eigen_assert(Base::rows()==b.rows() + && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval_with_guess + (*this, b.derived(), x0); + } + + /** \internal */ + template + void _solveWithGuess(const Rhs& b, Dest& x) const + { + bool failed = false; + for(int j=0; j + void _solve(const Rhs& b, Dest& x) const + { +// x.setZero(); + x = b; + _solveWithGuess(b,x); + } + +protected: + +}; + + +namespace internal { + + template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BICGSTAB_H diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h new file mode 100644 index 0000000000..3ce5179409 --- /dev/null +++ b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h @@ -0,0 +1,265 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONJUGATE_GRADIENT_H +#define EIGEN_CONJUGATE_GRADIENT_H + +namespace Eigen { + +namespace internal { + +/** \internal Low-level conjugate gradient algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ +template +EIGEN_DONT_INLINE +void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, int& iters, + typename Dest::RealScalar& tol_error) +{ + using std::sqrt; + using std::abs; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + + RealScalar tol = tol_error; + int maxIters = iters; + + int n = mat.cols(); + + VectorType residual = rhs - mat * x; //initial residual + + RealScalar rhsNorm2 = rhs.squaredNorm(); + if(rhsNorm2 == 0) + { + x.setZero(); + iters = 0; + tol_error = 0; + return; + } + RealScalar threshold = tol*tol*rhsNorm2; + RealScalar residualNorm2 = residual.squaredNorm(); + if (residualNorm2 < threshold) + { + iters = 0; + tol_error = sqrt(residualNorm2 / rhsNorm2); + return; + } + + VectorType p(n); + p = precond.solve(residual); //initial search direction + + VectorType z(n), tmp(n); + RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM + int i = 0; + while(i < maxIters) + { + tmp.noalias() = mat * p; // the bottleneck of the algorithm + + Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir + x += alpha * p; // update solution + residual -= alpha * tmp; // update residue + + residualNorm2 = residual.squaredNorm(); + if(residualNorm2 < threshold) + break; + + z = precond.solve(residual); // approximately solve for "A z = residual" + + RealScalar absOld = absNew; + absNew = numext::real(residual.dot(z)); // update the absolute value of r + RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction + p = z + beta * p; // update search direction + i++; + } + tol_error = sqrt(residualNorm2 / rhsNorm2); + iters = i; +} + +} + +template< typename _MatrixType, int _UpLo=Lower, + typename _Preconditioner = DiagonalPreconditioner > +class ConjugateGradient; + +namespace internal { + +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A conjugate gradient solver for sparse (or dense) self-adjoint problems + * + * This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm. + * The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \code + * int n = 10000; + * VectorXd x(n), b(n); + * SparseMatrix A(n,n); + * // fill A and b + * ConjugateGradient > cg; + * cg.compute(A); + * x = cg.solve(b); + * std::cout << "#iterations: " << cg.iterations() << std::endl; + * std::cout << "estimated error: " << cg.error() << std::endl; + * // update b, and solve again + * x = cg.solve(b); + * \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. Here is a step by + * step execution example starting with a random guess and printing the evolution + * of the estimated error: + * * \code + * x = VectorXd::Random(n); + * cg.setMaxIterations(1); + * int i = 0; + * do { + * x = cg.solveWithGuess(b,x); + * std::cout << i << " : " << cg.error() << std::endl; + * ++i; + * } while (cg.info()!=Success && i<100); + * \endcode + * Note that such a step by step excution is slightly slower. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, int _UpLo, typename _Preconditioner> +class ConjugateGradient : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::mp_matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; +public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + + enum { + UpLo = _UpLo + }; + +public: + + /** Default constructor. */ + ConjugateGradient() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + ConjugateGradient(const MatrixType& A) : Base(A) {} + + ~ConjugateGradient() {} + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * \a x0 as an initial solution. + * + * \sa compute() + */ + template + inline const internal::solve_retval_with_guess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + eigen_assert(Base::rows()==b.rows() + && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval_with_guess + (*this, b.derived(), x0); + } + + /** \internal */ + template + void _solveWithGuess(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + for(int j=0; jtemplate selfadjointView(), b.col(j), xj, + Base::m_preconditioner, m_iterations, m_error); + } + + m_isInitialized = true; + m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; + } + + /** \internal */ + template + void _solve(const Rhs& b, Dest& x) const + { + x.setOnes(); + _solveWithGuess(b,x); + } + +protected: + +}; + + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CONJUGATE_GRADIENT_H diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h new file mode 100644 index 0000000000..b55afc1363 --- /dev/null +++ b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -0,0 +1,467 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INCOMPLETE_LUT_H +#define EIGEN_INCOMPLETE_LUT_H + + +namespace Eigen { + +namespace internal { + +/** \internal + * Compute a quick-sort split of a vector + * On output, the vector row is permuted such that its elements satisfy + * abs(row(i)) >= abs(row(ncut)) if incut + * \param row The vector of values + * \param ind The array of index for the elements in @p row + * \param ncut The number of largest elements to keep + **/ +template +Index QuickSplit(VectorV &row, VectorI &ind, Index ncut) +{ + typedef typename VectorV::RealScalar RealScalar; + using std::swap; + using std::abs; + Index mid; + Index n = row.size(); /* length of the vector */ + Index first, last ; + + ncut--; /* to fit the zero-based indices */ + first = 0; + last = n-1; + if (ncut < first || ncut > last ) return 0; + + do { + mid = first; + RealScalar abskey = abs(row(mid)); + for (Index j = first + 1; j <= last; j++) { + if ( abs(row(j)) > abskey) { + ++mid; + swap(row(mid), row(j)); + swap(ind(mid), ind(j)); + } + } + /* Interchange for the pivot element */ + swap(row(mid), row(first)); + swap(ind(mid), ind(first)); + + if (mid > ncut) last = mid - 1; + else if (mid < ncut ) first = mid + 1; + } while (mid != ncut ); + + return 0; /* mid is equal to ncut */ +} + +}// end namespace internal + +/** \ingroup IterativeLinearSolvers_Module + * \class IncompleteLUT + * \brief Incomplete LU factorization with dual-threshold strategy + * + * During the numerical factorization, two dropping rules are used : + * 1) any element whose magnitude is less than some tolerance is dropped. + * This tolerance is obtained by multiplying the input tolerance @p droptol + * by the average magnitude of all the original elements in the current row. + * 2) After the elimination of the row, only the @p fill largest elements in + * the L part and the @p fill largest elements in the U part are kept + * (in addition to the diagonal element ). Note that @p fill is computed from + * the input parameter @p fillfactor which is used the ratio to control the fill_in + * relatively to the initial number of nonzero elements. + * + * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) + * and when @p fill=n/2 with @p droptol being different to zero. + * + * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, + * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. + * + * NOTE : The following implementation is derived from the ILUT implementation + * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota + * released under the terms of the GNU LGPL: + * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README + * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2. + * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012: + * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html + * alternatively, on GMANE: + * http://comments.gmane.org/gmane.comp.lib.eigen/3302 + */ +template +class IncompleteLUT : internal::noncopyable +{ + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix Vector; + typedef SparseMatrix FactorType; + typedef SparseMatrix PermutType; + typedef typename FactorType::Index Index; + + public: + typedef Matrix MatrixType; + + IncompleteLUT() + : m_droptol(NumTraits::dummy_precision()), m_fillfactor(10), + m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false) + {} + + template + IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits::dummy_precision(), int fillfactor = 10) + : m_droptol(droptol),m_fillfactor(fillfactor), + m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false) + { + eigen_assert(fillfactor != 0); + compute(mat); + } + + Index rows() const { return m_lu.rows(); } + + Index cols() const { return m_lu.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); + return m_info; + } + + template + void analyzePattern(const MatrixType& amat); + + template + void factorize(const MatrixType& amat); + + /** + * Compute an incomplete LU factorization with dual threshold on the matrix mat + * No pivoting is done in this version + * + **/ + template + IncompleteLUT& compute(const MatrixType& amat) + { + analyzePattern(amat); + factorize(amat); + m_isInitialized = m_factorizationIsOk; + return *this; + } + + void setDroptol(const RealScalar& droptol); + void setFillfactor(int fillfactor); + + template + void _solve(const Rhs& b, Dest& x) const + { + x = m_Pinv * b; + x = m_lu.template triangularView().solve(x); + x = m_lu.template triangularView().solve(x); + x = m_P * x; + } + + template inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); + eigen_assert(cols()==b.rows() + && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + +protected: + + /** keeps off-diagonal entries; drops diagonal entries */ + struct keep_diag { + inline bool operator() (const Index& row, const Index& col, const Scalar&) const + { + return row!=col; + } + }; + +protected: + + FactorType m_lu; + RealScalar m_droptol; + int m_fillfactor; + bool m_analysisIsOk; + bool m_factorizationIsOk; + bool m_isInitialized; + ComputationInfo m_info; + PermutationMatrix m_P; // Fill-reducing permutation + PermutationMatrix m_Pinv; // Inverse permutation +}; + +/** + * Set control parameter droptol + * \param droptol Drop any element whose magnitude is less than this tolerance + **/ +template +void IncompleteLUT::setDroptol(const RealScalar& droptol) +{ + this->m_droptol = droptol; +} + +/** + * Set control parameter fillfactor + * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. + **/ +template +void IncompleteLUT::setFillfactor(int fillfactor) +{ + this->m_fillfactor = fillfactor; +} + +template +template +void IncompleteLUT::analyzePattern(const _MatrixType& amat) +{ + // Compute the Fill-reducing permutation + SparseMatrix mat1 = amat; + SparseMatrix mat2 = amat.transpose(); + // Symmetrize the pattern + // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. + // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... + SparseMatrix AtA = mat2 + mat1; + AtA.prune(keep_diag()); + internal::minimum_degree_ordering(AtA, m_P); // Then compute the AMD ordering... + + m_Pinv = m_P.inverse(); // ... and the inverse permutation + + m_analysisIsOk = true; +} + +template +template +void IncompleteLUT::factorize(const _MatrixType& amat) +{ + using std::sqrt; + using std::swap; + using std::abs; + + eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); + Index n = amat.cols(); // Size of the matrix + m_lu.resize(n,n); + // Declare Working vectors and variables + Vector u(n) ; // real values of the row -- maximum size is n -- + VectorXi ju(n); // column position of the values in u -- maximum size is n + VectorXi jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 + + // Apply the fill-reducing permutation + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + SparseMatrix mat; + mat = amat.twistedBy(m_Pinv); + + // Initialization + jr.fill(-1); + ju.fill(0); + u.fill(0); + + // number of largest elements to keep in each row: + Index fill_in = static_cast (amat.nonZeros()*m_fillfactor)/n+1; + if (fill_in > n) fill_in = n; + + // number of largest nonzero elements to keep in the L and the U part of the current row: + Index nnzL = fill_in/2; + Index nnzU = nnzL; + m_lu.reserve(n * (nnzL + nnzU + 1)); + + // global loop over the rows of the sparse matrix + for (Index ii = 0; ii < n; ii++) + { + // 1 - copy the lower and the upper part of the row i of mat in the working vector u + + Index sizeu = 1; // number of nonzero elements in the upper part of the current row + Index sizel = 0; // number of nonzero elements in the lower part of the current row + ju(ii) = ii; + u(ii) = 0; + jr(ii) = ii; + RealScalar rownorm = 0; + + typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii + for (; j_it; ++j_it) + { + Index k = j_it.index(); + if (k < ii) + { + // copy the lower part + ju(sizel) = k; + u(sizel) = j_it.value(); + jr(k) = sizel; + ++sizel; + } + else if (k == ii) + { + u(ii) = j_it.value(); + } + else + { + // copy the upper part + Index jpos = ii + sizeu; + ju(jpos) = k; + u(jpos) = j_it.value(); + jr(k) = jpos; + ++sizeu; + } + rownorm += numext::abs2(j_it.value()); + } + + // 2 - detect possible zero row + if(rownorm==0) + { + m_info = NumericalIssue; + return; + } + // Take the 2-norm of the current row as a relative tolerance + rownorm = sqrt(rownorm); + + // 3 - eliminate the previous nonzero rows + Index jj = 0; + Index len = 0; + while (jj < sizel) + { + // In order to eliminate in the correct order, + // we must select first the smallest column index among ju(jj:sizel) + Index k; + Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment + k += jj; + if (minrow != ju(jj)) + { + // swap the two locations + Index j = ju(jj); + swap(ju(jj), ju(k)); + jr(minrow) = jj; jr(j) = k; + swap(u(jj), u(k)); + } + // Reset this location + jr(minrow) = -1; + + // Start elimination + typename FactorType::InnerIterator ki_it(m_lu, minrow); + while (ki_it && ki_it.index() < minrow) ++ki_it; + eigen_internal_assert(ki_it && ki_it.col()==minrow); + Scalar fact = u(jj) / ki_it.value(); + + // drop too small elements + if(abs(fact) <= m_droptol) + { + jj++; + continue; + } + + // linear combination of the current row ii and the row minrow + ++ki_it; + for (; ki_it; ++ki_it) + { + Scalar prod = fact * ki_it.value(); + Index j = ki_it.index(); + Index jpos = jr(j); + if (jpos == -1) // fill-in element + { + Index newpos; + if (j >= ii) // dealing with the upper part + { + newpos = ii + sizeu; + sizeu++; + eigen_internal_assert(sizeu<=n); + } + else // dealing with the lower part + { + newpos = sizel; + sizel++; + eigen_internal_assert(sizel<=ii); + } + ju(newpos) = j; + u(newpos) = -prod; + jr(j) = newpos; + } + else + u(jpos) -= prod; + } + // store the pivot element + u(len) = fact; + ju(len) = minrow; + ++len; + + jj++; + } // end of the elimination on the row ii + + // reset the upper part of the pointer jr to zero + for(Index k = 0; k m_droptol * rownorm ) + { + ++len; + u(ii + len) = u(ii + k); + ju(ii + len) = ju(ii + k); + } + } + sizeu = len + 1; // +1 to take into account the diagonal element + len = (std::min)(sizeu, nnzU); + typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1)); + typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1)); + internal::QuickSplit(uu, juu, len); + + // store the largest elements of the U part + for(Index k = ii + 1; k < ii + len; k++) + m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); + } + + m_lu.finalize(); + m_lu.makeCompressed(); + + m_factorizationIsOk = true; + m_info = Success; +} + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef IncompleteLUT<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_INCOMPLETE_LUT_H diff --git a/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h new file mode 100644 index 0000000000..2036922d69 --- /dev/null +++ b/third_party/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -0,0 +1,254 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H +#define EIGEN_ITERATIVE_SOLVER_BASE_H + +namespace Eigen { + +/** \ingroup IterativeLinearSolvers_Module + * \brief Base class for linear iterative solvers + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename Derived> +class IterativeSolverBase : internal::noncopyable +{ +public: + typedef typename internal::traits::MatrixType MatrixType; + typedef typename internal::traits::Preconditioner Preconditioner; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + +public: + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** Default constructor. */ + IterativeSolverBase() + : mp_matrix(0) + { + init(); + } + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + IterativeSolverBase(const MatrixType& A) + { + init(); + compute(A); + } + + ~IterativeSolverBase() {} + + /** Initializes the iterative solver for the sparcity pattern of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly call analyzePattern on the preconditioner. In the future + * we might, for instance, implement column reodering for faster matrix vector products. + */ + Derived& analyzePattern(const MatrixType& A) + { + m_preconditioner.analyzePattern(A); + m_isInitialized = true; + m_analysisIsOk = true; + m_info = Success; + return derived(); + } + + /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly call factorize on the preconditioner. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + Derived& factorize(const MatrixType& A) + { + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + mp_matrix = &A; + m_preconditioner.factorize(A); + m_factorizationIsOk = true; + m_info = Success; + return derived(); + } + + /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems. + * + * Currently, this function mostly initialized/compute the preconditioner. In the future + * we might, for instance, implement column reodering for faster matrix vector products. + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + Derived& compute(const MatrixType& A) + { + mp_matrix = &A; + m_preconditioner.compute(A); + m_isInitialized = true; + m_analysisIsOk = true; + m_factorizationIsOk = true; + m_info = Success; + return derived(); + } + + /** \internal */ + Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; } + /** \internal */ + Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; } + + /** \returns the tolerance threshold used by the stopping criteria */ + RealScalar tolerance() const { return m_tolerance; } + + /** Sets the tolerance threshold used by the stopping criteria */ + Derived& setTolerance(const RealScalar& tolerance) + { + m_tolerance = tolerance; + return derived(); + } + + /** \returns a read-write reference to the preconditioner for custom configuration. */ + Preconditioner& preconditioner() { return m_preconditioner; } + + /** \returns a read-only reference to the preconditioner. */ + const Preconditioner& preconditioner() const { return m_preconditioner; } + + /** \returns the max number of iterations */ + int maxIterations() const + { + return (mp_matrix && m_maxIterations<0) ? mp_matrix->cols() : m_maxIterations; + } + + /** Sets the max number of iterations */ + Derived& setMaxIterations(int maxIters) + { + m_maxIterations = maxIters; + return derived(); + } + + /** \returns the number of iterations performed during the last solve */ + int iterations() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_iterations; + } + + /** \returns the tolerance error reached during the last solve */ + RealScalar error() const + { + eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); + return m_error; + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + eigen_assert(rows()==b.rows() + && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(derived(), b.derived()); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::sparse_solve_retval + solve(const SparseMatrixBase& b) const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + eigen_assert(rows()==b.rows() + && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b"); + return internal::sparse_solve_retval(*this, b.derived()); + } + + /** \returns Success if the iterations converged, and NoConvergence otherwise. */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); + return m_info; + } + + /** \internal */ + template + void _solve_sparse(const Rhs& b, SparseMatrix &dest) const + { + eigen_assert(rows()==b.rows()); + + int rhsCols = b.cols(); + int size = b.rows(); + Eigen::Matrix tb(size); + Eigen::Matrix tx(size); + for(int k=0; k::epsilon(); + } + const MatrixType* mp_matrix; + Preconditioner m_preconditioner; + + int m_maxIterations; + RealScalar m_tolerance; + + mutable RealScalar m_error; + mutable int m_iterations; + mutable ComputationInfo m_info; + mutable bool m_isInitialized, m_analysisIsOk, m_factorizationIsOk; +}; + +namespace internal { + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef IterativeSolverBase Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec().derived()._solve_sparse(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ITERATIVE_SOLVER_BASE_H -- cgit v1.2.3