aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2011-11-12 14:11:27 +0100
committerGravatar Gael Guennebaud <g.gael@free.fr>2011-11-12 14:11:27 +0100
commit53fa8517245e0136c83b77526b05ce67de232a56 (patch)
tree99dd17062c742eabfc3626a04c38fd6f72e43bc4 /Eigen/src
parentdcb66d6b403ed2c4341fdb091f2ef22b73ea8b8a (diff)
move sparse solvers from unsupported/ to main Eigen/ and remove the "not stable yet" warning
Diffstat (limited to 'Eigen/src')
-rw-r--r--Eigen/src/CholmodSupport/CMakeLists.txt6
-rw-r--r--Eigen/src/CholmodSupport/CholmodSupport.h399
-rw-r--r--Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h139
-rw-r--r--Eigen/src/IterativeLinearSolvers/BiCGSTAB.h261
-rw-r--r--Eigen/src/IterativeLinearSolvers/CMakeLists.txt6
-rw-r--r--Eigen/src/IterativeLinearSolvers/ConjugateGradient.h255
-rw-r--r--Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h225
-rw-r--r--Eigen/src/OrderingMethods/Amd.h448
-rw-r--r--Eigen/src/OrderingMethods/CMakeLists.txt6
-rw-r--r--Eigen/src/Sparse/CMakeLists.txt6
-rw-r--r--Eigen/src/SparseCholesky/CMakeLists.txt6
-rw-r--r--Eigen/src/SparseCholesky/SimplicialCholesky.h803
-rw-r--r--Eigen/src/SparseCore/AmbiVector.h (renamed from Eigen/src/Sparse/AmbiVector.h)0
-rw-r--r--Eigen/src/SparseCore/CMakeLists.txt6
-rw-r--r--Eigen/src/SparseCore/CompressedStorage.h (renamed from Eigen/src/Sparse/CompressedStorage.h)0
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h (renamed from Eigen/src/Sparse/ConservativeSparseSparseProduct.h)0
-rw-r--r--Eigen/src/SparseCore/CoreIterators.h (renamed from Eigen/src/Sparse/CoreIterators.h)0
-rw-r--r--Eigen/src/SparseCore/MappedSparseMatrix.h (renamed from Eigen/src/Sparse/MappedSparseMatrix.h)0
-rw-r--r--Eigen/src/SparseCore/SparseAssign.h (renamed from Eigen/src/Sparse/SparseAssign.h)0
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h (renamed from Eigen/src/Sparse/SparseBlock.h)0
-rw-r--r--Eigen/src/SparseCore/SparseCwiseBinaryOp.h (renamed from Eigen/src/Sparse/SparseCwiseBinaryOp.h)0
-rw-r--r--Eigen/src/SparseCore/SparseCwiseUnaryOp.h (renamed from Eigen/src/Sparse/SparseCwiseUnaryOp.h)0
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h (renamed from Eigen/src/Sparse/SparseDenseProduct.h)0
-rw-r--r--Eigen/src/SparseCore/SparseDiagonalProduct.h (renamed from Eigen/src/Sparse/SparseDiagonalProduct.h)0
-rw-r--r--Eigen/src/SparseCore/SparseDot.h (renamed from Eigen/src/Sparse/SparseDot.h)0
-rw-r--r--Eigen/src/SparseCore/SparseFuzzy.h (renamed from Eigen/src/Sparse/SparseFuzzy.h)0
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h (renamed from Eigen/src/Sparse/SparseMatrix.h)0
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h (renamed from Eigen/src/Sparse/SparseMatrixBase.h)0
-rw-r--r--Eigen/src/SparseCore/SparseProduct.h (renamed from Eigen/src/Sparse/SparseProduct.h)0
-rw-r--r--Eigen/src/SparseCore/SparseRedux.h (renamed from Eigen/src/Sparse/SparseRedux.h)0
-rw-r--r--Eigen/src/SparseCore/SparseSelfAdjointView.h (renamed from Eigen/src/Sparse/SparseSelfAdjointView.h)0
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h (renamed from Eigen/src/Sparse/SparseSparseProductWithPruning.h)0
-rw-r--r--Eigen/src/SparseCore/SparseTranspose.h (renamed from Eigen/src/Sparse/SparseTranspose.h)0
-rw-r--r--Eigen/src/SparseCore/SparseTriangularView.h (renamed from Eigen/src/Sparse/SparseTriangularView.h)0
-rw-r--r--Eigen/src/SparseCore/SparseUtil.h (renamed from Eigen/src/Sparse/SparseUtil.h)0
-rw-r--r--Eigen/src/SparseCore/SparseVector.h (renamed from Eigen/src/Sparse/SparseVector.h)0
-rw-r--r--Eigen/src/SparseCore/SparseView.h (renamed from Eigen/src/Sparse/SparseView.h)0
-rw-r--r--Eigen/src/SparseCore/TriangularSolver.h (renamed from Eigen/src/Sparse/TriangularSolver.h)0
-rw-r--r--Eigen/src/SuperLUSupport/CMakeLists.txt6
-rw-r--r--Eigen/src/SuperLUSupport/SuperLUSupport.h989
-rw-r--r--Eigen/src/UmfPackSupport/CMakeLists.txt6
-rw-r--r--Eigen/src/UmfPackSupport/UmfPackSupport.h406
-rw-r--r--Eigen/src/misc/SparseSolve.h122
43 files changed, 4089 insertions, 6 deletions
diff --git a/Eigen/src/CholmodSupport/CMakeLists.txt b/Eigen/src/CholmodSupport/CMakeLists.txt
new file mode 100644
index 000000000..814dfa613
--- /dev/null
+++ b/Eigen/src/CholmodSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_CholmodSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_CholmodSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/CholmodSupport COMPONENT Devel
+ )
diff --git a/Eigen/src/CholmodSupport/CholmodSupport.h b/Eigen/src/CholmodSupport/CholmodSupport.h
new file mode 100644
index 000000000..3e502c0aa
--- /dev/null
+++ b/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -0,0 +1,399 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CHOLMODSUPPORT_H
+#define EIGEN_CHOLMODSUPPORT_H
+
+namespace internal {
+
+template<typename Scalar, typename CholmodType>
+void cholmod_configure_matrix(CholmodType& mat)
+{
+ if (internal::is_same<Scalar,float>::value)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = CHOLMOD_SINGLE;
+ }
+ else if (internal::is_same<Scalar,double>::value)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = CHOLMOD_DOUBLE;
+ }
+ else if (internal::is_same<Scalar,std::complex<float> >::value)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = CHOLMOD_SINGLE;
+ }
+ else if (internal::is_same<Scalar,std::complex<double> >::value)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = CHOLMOD_DOUBLE;
+ }
+ else
+ {
+ eigen_assert(false && "Scalar type not supported by CHOLMOD");
+ }
+}
+
+} // namespace internal
+
+/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
+ * Note that the data are shared.
+ */
+template<typename _Scalar, int _Options, typename _Index>
+cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+ typedef SparseMatrix<_Scalar,_Options,_Index> MatrixType;
+ cholmod_sparse res;
+ res.nzmax = mat.nonZeros();
+ res.nrow = mat.rows();;
+ res.ncol = mat.cols();
+ res.p = mat._outerIndexPtr();
+ res.i = mat._innerIndexPtr();
+ res.x = mat._valuePtr();
+ res.sorted = 1;
+ res.packed = 1;
+ res.dtype = 0;
+ res.stype = -1;
+
+ if (internal::is_same<_Index,int>::value)
+ {
+ res.itype = CHOLMOD_INT;
+ }
+ else
+ {
+ eigen_assert(false && "Index type different than int is not supported yet");
+ }
+
+ // setup res.xtype
+ internal::cholmod_configure_matrix<_Scalar>(res);
+
+ res.stype = 0;
+
+ return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+ cholmod_sparse res = viewAsCholmod(mat.const_cast_derived());
+ return res;
+}
+
+/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix.
+ * The data are not copied but shared. */
+template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
+cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
+{
+ cholmod_sparse res = viewAsCholmod(mat.matrix().const_cast_derived());
+
+ if(UpLo==Upper) res.stype = 1;
+ if(UpLo==Lower) res.stype = -1;
+
+ return res;
+}
+
+/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix.
+ * The data are not copied but shared. */
+template<typename Derived>
+cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
+{
+ EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ typedef typename Derived::Scalar Scalar;
+
+ cholmod_dense res;
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ res.nzmax = res.nrow * res.ncol;
+ res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
+ res.x = mat.derived().data();
+ res.z = 0;
+
+ internal::cholmod_configure_matrix<Scalar>(res);
+
+ return res;
+}
+
+/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
+ * The data are not copied but shared. */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
+{
+ return MappedSparseMatrix<Scalar,Flags,Index>
+ (cm.nrow, cm.ncol, reinterpret_cast<Index*>(cm.p)[cm.ncol],
+ reinterpret_cast<Index*>(cm.p), reinterpret_cast<Index*>(cm.i),reinterpret_cast<Scalar*>(cm.x) );
+}
+
+enum CholmodMode {
+ CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt
+};
+
+/** \brief A Cholesky factorization and solver based on Cholmod
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
+ * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodDecomposition
+{
+ public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef MatrixType CholMatrixType;
+ typedef typename MatrixType::Index Index;
+
+ public:
+
+ CholmodDecomposition()
+ : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+ {
+ cholmod_start(&m_cholmod);
+ setMode(CholmodLDLt);
+ }
+
+ CholmodDecomposition(const MatrixType& matrix)
+ : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+ {
+ cholmod_start(&m_cholmod);
+ compute(matrix);
+ }
+
+ ~CholmodDecomposition()
+ {
+ if(m_cholmodFactor)
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ cholmod_finish(&m_cholmod);
+ }
+
+ inline Index cols() const { return m_cholmodFactor->n; }
+ inline Index rows() const { return m_cholmodFactor->n; }
+
+ void setMode(CholmodMode mode)
+ {
+ switch(mode)
+ {
+ case CholmodAuto:
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_AUTO;
+ break;
+ case CholmodSimplicialLLt:
+ m_cholmod.final_asis = 0;
+ m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+ m_cholmod.final_ll = 1;
+ break;
+ case CholmodSupernodalLLt:
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
+ break;
+ case CholmodLDLt:
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ void compute(const MatrixType& matrix)
+ {
+ analyzePattern(matrix);
+ factorize(matrix);
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<CholmodDecomposition, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<CholmodDecomposition, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<CholmodDecomposition, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<CholmodDecomposition, Rhs>(*this, b.derived());
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ if(m_cholmodFactor)
+ {
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ m_cholmodFactor = 0;
+ }
+ cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+ m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
+
+ this->m_isInitialized = true;
+ this->m_info = Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix)
+ {
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+ cholmod_factorize(&A, m_cholmodFactor, &m_cholmod);
+
+ this->m_info = Success;
+ m_factorizationIsOk = true;
+ }
+
+ /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
+ * See the Cholmod user guide for details. */
+ cholmod_common& cholmod() { return m_cholmod; }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ const Index size = m_cholmodFactor->n;
+ eigen_assert(size==b.rows());
+
+ // note: cd stands for Cholmod Dense
+ cholmod_dense b_cd = viewAsCholmod(b.const_cast_derived());
+ cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
+ if(!x_cd)
+ {
+ this->m_info = NumericalIssue;
+ }
+ // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+ dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
+ cholmod_free_dense(&x_cd, &m_cholmod);
+ }
+
+ /** \internal */
+ template<typename RhsScalar, int RhsOptions, typename RhsIndex, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve(const SparseMatrix<RhsScalar,RhsOptions,RhsIndex> &b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ const Index size = m_cholmodFactor->n;
+ eigen_assert(size==b.rows());
+
+ // note: cs stands for Cholmod Sparse
+ cholmod_sparse b_cs = viewAsCholmod(b);
+ cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod);
+ if(!x_cs)
+ {
+ this->m_info = NumericalIssue;
+ }
+ // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+ dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
+ cholmod_free_sparse(&x_cs, &m_cholmod);
+ }
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+
+ template<typename Stream>
+ void dumpMemory(Stream& s)
+ {}
+
+ protected:
+ mutable cholmod_common m_cholmod;
+ cholmod_factor* m_cholmodFactor;
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+};
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo, typename Rhs>
+struct solve_retval<CholmodDecomposition<_MatrixType,_UpLo>, Rhs>
+ : solve_retval_base<CholmodDecomposition<_MatrixType,_UpLo>, Rhs>
+{
+ typedef CholmodDecomposition<_MatrixType,_UpLo> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, int _UpLo, typename Rhs>
+struct sparse_solve_retval<CholmodDecomposition<_MatrixType,_UpLo>, Rhs>
+ : sparse_solve_retval_base<CholmodDecomposition<_MatrixType,_UpLo>, Rhs>
+{
+ typedef CholmodDecomposition<_MatrixType,_UpLo> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+}
+
+#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
new file mode 100644
index 000000000..d9edd1461
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
@@ -0,0 +1,139 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BASIC_PRECONDITIONERS_H
+#define EIGEN_BASIC_PRECONDITIONERS_H
+
+/** \brief A preconditioner based on the digonal entries
+ *
+ * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.
+ * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
+ * \code
+ * A.diagonal().asDiagonal() . x = b
+ * \endcode
+ *
+ * \tparam _Scalar the type of the scalar.
+ *
+ * This preconditioner is suitable for both selfadjoint and general problems.
+ * The diagonal entries are pre-inverted and stored into a dense vector.
+ *
+ * \note A variant that has yet to be implemented would attempt to preserve the norm of each column.
+ *
+ */
+template <typename _Scalar>
+class DiagonalPreconditioner
+{
+ typedef _Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef typename Vector::Index Index;
+
+ public:
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+
+ DiagonalPreconditioner() : m_isInitialized(false) {}
+
+ template<typename MatrixType>
+ DiagonalPreconditioner(const MatrixType& mat) : m_invdiag(mat.cols())
+ {
+ compute(mat);
+ }
+
+ Index rows() const { return m_invdiag.size(); }
+ Index cols() const { return m_invdiag.size(); }
+
+ template<typename MatrixType>
+ DiagonalPreconditioner& compute(const MatrixType& mat)
+ {
+ m_invdiag.resize(mat.cols());
+ for(int j=0; j<mat.outerSize(); ++j)
+ {
+ typename MatrixType::InnerIterator it(mat,j);
+ while(it && it.index()!=j) ++it;
+ if(it && it.index()==j)
+ m_invdiag(j) = Scalar(1)/it.value();
+ else
+ m_invdiag(j) = 0;
+ }
+ m_isInitialized = true;
+ return *this;
+ }
+
+ template<typename Rhs, typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x = m_invdiag.array() * b.array() ;
+ }
+
+ template<typename Rhs> inline const internal::solve_retval<DiagonalPreconditioner, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
+ eigen_assert(m_invdiag.size()==b.rows()
+ && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<DiagonalPreconditioner, Rhs>(*this, b.derived());
+ }
+
+ protected:
+ Vector m_invdiag;
+ bool m_isInitialized;
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<DiagonalPreconditioner<_MatrixType>, Rhs>
+ : solve_retval_base<DiagonalPreconditioner<_MatrixType>, Rhs>
+{
+ typedef DiagonalPreconditioner<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+}
+
+/** \brief A naive preconditioner which approximates any matrix as the identity matrix
+ *
+ * \sa class DiagonalPreconditioner
+ */
+class IdentityPreconditioner
+{
+ public:
+
+ IdentityPreconditioner() {}
+
+ template<typename MatrixType>
+ IdentityPreconditioner(const MatrixType& ) {}
+
+ template<typename MatrixType>
+ IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
+
+ template<typename Rhs>
+ inline const Rhs& solve(const Rhs& b) const { return b; }
+};
+
+#endif // EIGEN_BASIC_PRECONDITIONERS_H
diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
new file mode 100644
index 000000000..798f85da5
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -0,0 +1,261 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BICGSTAB_H
+#define EIGEN_BICGSTAB_H
+
+namespace internal {
+
+/** \internal Low-level bi conjugate gradient stabilized algorithm
+ * \param mat The matrix A
+ * \param rhs The right hand side vector b
+ * \param x On input and initial solution, on output the computed solution.
+ * \param precond A preconditioner being able to efficiently solve for an
+ * approximation of Ax=b (regardless of b)
+ * \param iters On input the max number of iteration, on output the number of performed iterations.
+ * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+ */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+void bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
+ const Preconditioner& precond, int& iters,
+ typename Dest::RealScalar& tol_error)
+{
+ using std::sqrt;
+ using std::abs;
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+ RealScalar tol = tol_error;
+ int maxIters = iters;
+
+ int n = mat.cols();
+ VectorType r = rhs - mat * x;
+ VectorType r0 = r;
+ RealScalar r0_sqnorm = r0.squaredNorm();
+ Scalar rho = 1;
+ Scalar alpha = 1;
+ Scalar w = 1;
+
+ VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
+ VectorType y(n), z(n);
+ VectorType kt(n), ks(n);
+
+ VectorType s(n), t(n);
+
+ RealScalar tol2 = tol*tol;
+ int i = 0;
+
+ do
+ {
+ Scalar rho_old = rho;
+
+ rho = r0.dot(r);
+ Scalar beta = (rho/rho_old) * (alpha / w);
+ p = r + beta * (p - w * v);
+
+ y = precond.solve(p);
+ v.noalias() = mat * y;
+
+ alpha = rho / r0.dot(v);
+ s = r - alpha * v;
+
+ z = precond.solve(s);
+ t.noalias() = mat * z;
+
+ kt = precond.solve(t);
+ ks = precond.solve(s);
+
+ w = kt.dot(ks) / kt.squaredNorm();
+ x += alpha * y + w * z;
+ r = s - w * t;
+ ++i;
+ } while ( r.squaredNorm()/r0_sqnorm > tol2 && i<maxIters );
+
+ tol_error = sqrt(r.squaredNorm()/r0_sqnorm);
+ //tol_error = sqrt(abs(absNew / absInit));
+ iters = i;
+}
+
+}
+
+template< typename _MatrixType,
+ typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class BiCGSTAB;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \brief A bi conjugate gradient stabilized solver for sparse square problems
+ *
+ * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
+ * stabilized algorithm. The vectors x and b can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The default are 1000 max iterations and NumTraits<Scalar>::epsilon()
+ * for the tolerance.
+ *
+ * This class can be used as the direct solver classes. Here is a typical usage example:
+ * \code
+ * int n = 10000;
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A(n,n);
+ * // fill A and b
+ * BiCGSTAB<SparseMatrix<double> > solver;
+ * solver(A);
+ * x = solver.solve(b);
+ * std::cout << "#iterations: " << solver.iterations() << std::endl;
+ * std::cout << "estimated error: " << solver.error() << std::endl;
+ * // update b, and solve again
+ * x = solver.solve(b);
+ * \endcode
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method. Here is a step by
+ * step execution example starting with a random guess and printing the evolution
+ * of the estimated error:
+ * * \code
+ * x = VectorXd::Random(n);
+ * solver.setMaxIterations(1);
+ * int i = 0;
+ * do {
+ * x = solver.solveWithGuess(b,x);
+ * std::cout << i << " : " << solver.error() << std::endl;
+ * ++i;
+ * } while (solver.info()!=Success && i<100);
+ * \endcode
+ * Note that such a step by step excution is slightly slower.
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template< typename _MatrixType, typename _Preconditioner>
+class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+ typedef IterativeSolverBase<BiCGSTAB> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+
+public:
+
+ /** Default constructor. */
+ BiCGSTAB() : Base() {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ BiCGSTAB(const MatrixType& A) : Base(A) {}
+
+ ~BiCGSTAB() {}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<BiCGSTAB, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "BiCGSTAB is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <BiCGSTAB, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::m_maxIterations;
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ internal::bicgstab(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
+ }
+
+ m_isInitialized = true;
+ m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x.setOnes();
+ _solveWithGuess(b,x);
+ }
+
+protected:
+
+};
+
+
+namespace internal {
+
+ template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+ : solve_retval_base<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+{
+ typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+}
+
+#endif // EIGEN_BICGSTAB_H
diff --git a/Eigen/src/IterativeLinearSolvers/CMakeLists.txt b/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
new file mode 100644
index 000000000..59ccc0072
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_IterativeLinearSolvers_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_IterativeLinearSolvers_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/IterativeLinearSolvers COMPONENT Devel
+ )
diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
new file mode 100644
index 000000000..ced3e310c
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -0,0 +1,255 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CONJUGATE_GRADIENT_H
+#define EIGEN_CONJUGATE_GRADIENT_H
+
+namespace internal {
+
+/** \internal Low-level conjugate gradient algorithm
+ * \param mat The matrix A
+ * \param rhs The right hand side vector b
+ * \param x On input and initial solution, on output the computed solution.
+ * \param precond A preconditioner being able to efficiently solve for an
+ * approximation of Ax=b (regardless of b)
+ * \param iters On input the max number of iteration, on output the number of performed iterations.
+ * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+ */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+EIGEN_DONT_INLINE
+void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
+ const Preconditioner& precond, int& iters,
+ typename Dest::RealScalar& tol_error)
+{
+ using std::sqrt;
+ using std::abs;
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+ RealScalar tol = tol_error;
+ int maxIters = iters;
+
+ int n = mat.cols();
+ VectorType residual = rhs - mat * x; //initial residual
+ VectorType p(n);
+
+ p = precond.solve(residual); //initial search direction
+
+ VectorType z(n), tmp(n);
+ RealScalar absNew = internal::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
+ RealScalar absInit = absNew; // the initial absolute value
+
+ int i = 0;
+ while ((i < maxIters) && (absNew > tol*tol*absInit))
+ {
+ tmp.noalias() = mat * p; // the bottleneck of the algorithm
+
+ Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
+ x += alpha * p; // update solution
+ residual -= alpha * tmp; // update residue
+ z = precond.solve(residual); // approximately solve for "A z = residual"
+
+ RealScalar absOld = absNew;
+ absNew = internal::real(residual.dot(z)); // update the absolute value of r
+ RealScalar beta = absNew / absOld; // calculate the Gram-Schmidit value used to create the new search direction
+ p = z + beta * p; // update search direction
+ i++;
+ }
+
+ tol_error = sqrt(abs(absNew / absInit));
+ iters = i;
+}
+
+}
+
+template< typename _MatrixType, int _UpLo=Lower,
+ typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class ConjugateGradient;
+
+namespace internal {
+
+template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
+{
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \brief A conjugate gradient solver for sparse self-adjoint problems
+ *
+ * This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
+ * The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The default are 1000 max iterations and NumTraits<Scalar>::epsilon()
+ * for the tolerance.
+ *
+ * This class can be used as the direct solver classes. Here is a typical usage example:
+ * \code
+ * int n = 10000;
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A(n,n);
+ * // fill A and b
+ * ConjugateGradient<SparseMatrix<double> > cg;
+ * cg(A);
+ * x = cg.solve(b);
+ * std::cout << "#iterations: " << cg.iterations() << std::endl;
+ * std::cout << "estimated error: " << cg.error() << std::endl;
+ * // update b, and solve again
+ * x = cg.solve(b);
+ * \endcode
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method. Here is a step by
+ * step execution example starting with a random guess and printing the evolution
+ * of the estimated error:
+ * * \code
+ * x = VectorXd::Random(n);
+ * cg.setMaxIterations(1);
+ * int i = 0;
+ * do {
+ * x = cg.solveWithGuess(b,x);
+ * std::cout << i << " : " << cg.error() << std::endl;
+ * ++i;
+ * } while (cg.info()!=Success && i<100);
+ * \endcode
+ * Note that such a step by step excution is slightly slower.
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
+{
+ typedef IterativeSolverBase<ConjugateGradient> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+
+ enum {
+ UpLo = _UpLo
+ };
+
+public:
+
+ /** Default constructor. */
+ ConjugateGradient() : Base() {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ ConjugateGradient(const MatrixType& A) : Base(A) {}
+
+ ~ConjugateGradient() {}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<ConjugateGradient, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <ConjugateGradient, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ m_iterations = Base::m_maxIterations;
+ m_error = Base::m_tolerance;
+
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::m_maxIterations;
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
+ Base::m_preconditioner, m_iterations, m_error);
+ }
+
+ m_isInitialized = true;
+ m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x.setOnes();
+ _solveWithGuess(b,x);
+ }
+
+protected:
+
+};
+
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
+struct solve_retval<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+ : solve_retval_base<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+{
+ typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+}
+
+#endif // EIGEN_CONJUGATE_GRADIENT_H
diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
new file mode 100644
index 000000000..5e8bbd3e2
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -0,0 +1,225 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
+#define EIGEN_ITERATIVE_SOLVER_BASE_H
+
+
+/** \brief Base class for linear iterative solvers
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template< typename Derived>
+class IterativeSolverBase
+{
+public:
+ typedef typename internal::traits<Derived>::MatrixType MatrixType;
+ typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+
+public:
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ /** Default constructor. */
+ IterativeSolverBase()
+ : mp_matrix(0)
+ {
+ init();
+ }
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ IterativeSolverBase(const MatrixType& A)
+ {
+ init();
+ compute(A);
+ }
+
+ ~IterativeSolverBase() {}
+
+ /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems.
+ *
+ * Currently, this function mostly initialized/compute the preconditioner. In the future
+ * we might, for instance, implement column reodering for faster matrix vector products.
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ Derived& compute(const MatrixType& A)
+ {
+ mp_matrix = &A;
+ m_preconditioner.compute(A);
+ m_isInitialized = true;
+ m_info = Success;
+ return derived();
+ }
+
+ /** \internal */
+ Index rows() const { return mp_matrix->rows(); }
+ /** \internal */
+ Index cols() const { return mp_matrix->cols(); }
+
+ /** \returns the tolerance threshold used by the stopping criteria */
+ RealScalar tolerance() const { return m_tolerance; }
+
+ /** Sets the tolerance threshold used by the stopping criteria */
+ Derived& setTolerance(RealScalar tolerance)
+ {
+ m_tolerance = tolerance;
+ return derived();
+ }
+
+ /** \returns a read-write reference to the preconditioner for custom configuration. */
+ Preconditioner& preconditioner() { return m_preconditioner; }
+
+ /** \returns a read-only reference to the preconditioner. */
+ const Preconditioner& preconditioner() const { return m_preconditioner; }
+
+ /** \returns the max number of iterations */
+ int maxIterations() const { return m_maxIterations; }
+
+ /** Sets the max number of iterations */
+ Derived& setMaxIterations(int maxIters)
+ {
+ m_maxIterations = maxIters;
+ return derived();
+ }
+
+ /** \returns the number of iterations performed during the last solve */
+ int iterations() const
+ {
+ eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+ return m_iterations;
+ }
+
+ /** \returns the tolerance error reached during the last solve */
+ RealScalar error() const
+ {
+ eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+ return m_error;
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs> inline const internal::solve_retval<Derived, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<Derived, Rhs>(derived(), b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<IterativeSolverBase, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<IterativeSolverBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns Success if the iterations converged, and NoConvergence otherwise. */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+ return m_info;
+ }
+
+ /** \internal */
+ template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(rows()==b.rows());
+
+ int rhsCols = b.cols();
+ int size = b.rows();
+ Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
+ Eigen::Matrix<DestScalar,Dynamic,1> tx(size);
+ for(int k=0; k<rhsCols; ++k)
+ {
+ tb = b.col(k);
+ tx = derived().solve(tb);
+ dest.col(k) = tx.sparseView(0);
+ }
+ }
+
+protected:
+ void init()
+ {
+ m_isInitialized = false;
+ m_maxIterations = 1000;
+ m_tolerance = NumTraits<Scalar>::epsilon();
+ }
+ const MatrixType* mp_matrix;
+ Preconditioner m_preconditioner;
+
+ int m_maxIterations;
+ RealScalar m_tolerance;
+
+ mutable RealScalar m_error;
+ mutable int m_iterations;
+ mutable ComputationInfo m_info;
+ mutable bool m_isInitialized;
+};
+
+namespace internal {
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<IterativeSolverBase<Derived>, Rhs>
+ : sparse_solve_retval_base<IterativeSolverBase<Derived>, Rhs>
+{
+ typedef IterativeSolverBase<Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve_sparse(rhs(),dst);
+ }
+};
+
+}
+
+#endif // EIGEN_ITERATIVE_SOLVER_BASE_H
diff --git a/Eigen/src/OrderingMethods/Amd.h b/Eigen/src/OrderingMethods/Amd.h
new file mode 100644
index 000000000..3cf8bd1e1
--- /dev/null
+++ b/Eigen/src/OrderingMethods/Amd.h
@@ -0,0 +1,448 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+/*
+
+NOTE: this routine has been adapted from the CSparse library:
+
+Copyright (c) 2006, Timothy A. Davis.
+http://www.cise.ufl.edu/research/sparse/CSparse
+
+CSparse is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+CSparse is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this Module; if not, write to the Free Software
+Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+*/
+
+#ifndef EIGEN_SPARSE_AMD_H
+#define EIGEN_SPARSE_AMD_H
+
+namespace internal {
+
+
+#define CS_FLIP(i) (-(i)-2)
+#define CS_UNFLIP(i) (((i) < 0) ? CS_FLIP(i) : (i))
+#define CS_MARKED(w,j) (w[j] < 0)
+#define CS_MARK(w,j) { w[j] = CS_FLIP (w[j]); }
+
+/* clear w */
+template<typename Index>
+static int cs_wclear (Index mark, Index lemax, Index *w, Index n)
+{
+ Index k;
+ if(mark < 2 || (mark + lemax < 0))
+ {
+ for(k = 0; k < n; k++)
+ if(w[k] != 0)
+ w[k] = 1;
+ mark = 2;
+ }
+ return (mark); /* at this point, w[0..n-1] < mark holds */
+}
+
+/* depth-first search and postorder of a tree rooted at node j */
+template<typename Index>
+Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack)
+{
+ int i, p, top = 0;
+ if(!head || !next || !post || !stack) return (-1); /* check inputs */
+ stack[0] = j; /* place j on the stack */
+ while (top >= 0) /* while (stack is not empty) */
+ {
+ p = stack[top]; /* p = top of stack */
+ i = head[p]; /* i = youngest child of p */
+ if(i == -1)
+ {
+ top--; /* p has no unordered children left */
+ post[k++] = p; /* node p is the kth postordered node */
+ }
+ else
+ {
+ head[p] = next[i]; /* remove i from children of p */
+ stack[++top] = i; /* start dfs on child node i */
+ }
+ }
+ return k;
+}
+
+
+/** \internal
+ * Approximate minimum degree ordering algorithm.
+ * \returns the permutation P reducing the fill-in of the input matrix \a C
+ * The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
+ * On exit the values of C are destroyed */
+template<typename Scalar, typename Index>
+void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm)
+{
+ typedef SparseMatrix<Scalar,ColMajor,Index> CCS;
+
+ int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
+ k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
+ ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t;
+ unsigned int h;
+
+ Index n = C.cols();
+ dense = std::max<Index> (16, 10 * sqrt ((double) n)); /* find dense threshold */
+ dense = std::min<Index> (n-2, dense);
+
+ Index cnz = C.nonZeros();
+ perm.resize(n+1);
+ t = cnz + cnz/5 + 2*n; /* add elbow room to C */
+ C.resizeNonZeros(t);
+
+ Index* W = new Index[8*(n+1)]; /* get workspace */
+ Index* len = W;
+ Index* nv = W + (n+1);
+ Index* next = W + 2*(n+1);
+ Index* head = W + 3*(n+1);
+ Index* elen = W + 4*(n+1);
+ Index* degree = W + 5*(n+1);
+ Index* w = W + 6*(n+1);
+ Index* hhead = W + 7*(n+1);
+ Index* last = perm.indices().data(); /* use P as workspace for last */
+
+ /* --- Initialize quotient graph ---------------------------------------- */
+ Index* Cp = C._outerIndexPtr();
+ Index* Ci = C._innerIndexPtr();
+ for(k = 0; k < n; k++)
+ len[k] = Cp[k+1] - Cp[k];
+ len[n] = 0;
+ nzmax = t;
+
+ for(i = 0; i <= n; i++)
+ {
+ head[i] = -1; // degree list i is empty
+ last[i] = -1;
+ next[i] = -1;
+ hhead[i] = -1; // hash list i is empty
+ nv[i] = 1; // node i is just one node
+ w[i] = 1; // node i is alive
+ elen[i] = 0; // Ek of node i is empty
+ degree[i] = len[i]; // degree of node i
+ }
+ mark = cs_wclear<Index>(0, 0, w, n); /* clear w */
+ elen[n] = -2; /* n is a dead element */
+ Cp[n] = -1; /* n is a root of assembly tree */
+ w[n] = 0; /* n is a dead element */
+
+ /* --- Initialize degree lists ------------------------------------------ */
+ for(i = 0; i < n; i++)
+ {
+ d = degree[i];
+ if(d == 0) /* node i is empty */
+ {
+ elen[i] = -2; /* element i is dead */
+ nel++;
+ Cp[i] = -1; /* i is a root of assembly tree */
+ w[i] = 0;
+ }
+ else if(d > dense) /* node i is dense */
+ {
+ nv[i] = 0; /* absorb i into element n */
+ elen[i] = -1; /* node i is dead */
+ nel++;
+ Cp[i] = CS_FLIP (n);
+ nv[n]++;
+ }
+ else
+ {
+ if(head[d] != -1) last[head[d]] = i;
+ next[i] = head[d]; /* put node i in degree list d */
+ head[d] = i;
+ }
+ }
+
+ while (nel < n) /* while (selecting pivots) do */
+ {
+ /* --- Select node of minimum approximate degree -------------------- */
+ for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}
+ if(next[k] != -1) last[next[k]] = -1;
+ head[mindeg] = next[k]; /* remove k from degree list */
+ elenk = elen[k]; /* elenk = |Ek| */
+ nvk = nv[k]; /* # of nodes k represents */
+ nel += nvk; /* nv[k] nodes of A eliminated */
+
+ /* --- Garbage collection ------------------------------------------- */
+ if(elenk > 0 && cnz + mindeg >= nzmax)
+ {
+ for(j = 0; j < n; j++)
+ {
+ if((p = Cp[j]) >= 0) /* j is a live node or element */
+ {
+ Cp[j] = Ci[p]; /* save first entry of object */
+ Ci[p] = CS_FLIP (j); /* first entry is now CS_FLIP(j) */
+ }
+ }
+ for(q = 0, p = 0; p < cnz; ) /* scan all of memory */
+ {
+ if((j = CS_FLIP (Ci[p++])) >= 0) /* found object j */
+ {
+ Ci[q] = Cp[j]; /* restore first entry of object */
+ Cp[j] = q++; /* new pointer to object j */
+ for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];
+ }
+ }
+ cnz = q; /* Ci[cnz...nzmax-1] now free */
+ }
+
+ /* --- Construct new element ---------------------------------------- */
+ dk = 0;
+ nv[k] = -nvk; /* flag k as in Lk */
+ p = Cp[k];
+ pk1 = (elenk == 0) ? p : cnz; /* do in place if elen[k] == 0 */
+ pk2 = pk1;
+ for(k1 = 1; k1 <= elenk + 1; k1++)
+ {
+ if(k1 > elenk)
+ {
+ e = k; /* search the nodes in k */
+ pj = p; /* list of nodes starts at Ci[pj]*/
+ ln = len[k] - elenk; /* length of list of nodes in k */
+ }
+ else
+ {
+ e = Ci[p++]; /* search the nodes in e */
+ pj = Cp[e];
+ ln = len[e]; /* length of list of nodes in e */
+ }
+ for(k2 = 1; k2 <= ln; k2++)
+ {
+ i = Ci[pj++];
+ if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */
+ dk += nvi; /* degree[Lk] += size of node i */
+ nv[i] = -nvi; /* negate nv[i] to denote i in Lk*/
+ Ci[pk2++] = i; /* place i in Lk */
+ if(next[i] != -1) last[next[i]] = last[i];
+ if(last[i] != -1) /* remove i from degree list */
+ {
+ next[last[i]] = next[i];
+ }
+ else
+ {
+ head[degree[i]] = next[i];
+ }
+ }
+ if(e != k)
+ {
+ Cp[e] = CS_FLIP (k); /* absorb e into k */
+ w[e] = 0; /* e is now a dead element */
+ }
+ }
+ if(elenk != 0) cnz = pk2; /* Ci[cnz...nzmax] is free */
+ degree[k] = dk; /* external degree of k - |Lk\i| */
+ Cp[k] = pk1; /* element k is in Ci[pk1..pk2-1] */
+ len[k] = pk2 - pk1;
+ elen[k] = -2; /* k is now an element */
+
+ /* --- Find set differences ----------------------------------------- */
+ mark = cs_wclear<Index>(mark, lemax, w, n); /* clear w if necessary */
+ for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
+ {
+ i = Ci[pk];
+ if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */
+ nvi = -nv[i]; /* nv[i] was negated */
+ wnvi = mark - nvi;
+ for(p = Cp[i]; p <= Cp[i] + eln - 1; p++) /* scan Ei */
+ {
+ e = Ci[p];
+ if(w[e] >= mark)
+ {
+ w[e] -= nvi; /* decrement |Le\Lk| */
+ }
+ else if(w[e] != 0) /* ensure e is a live element */
+ {
+ w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */
+ }
+ }
+ }
+
+ /* --- Degree update ------------------------------------------------ */
+ for(pk = pk1; pk < pk2; pk++) /* scan2: degree update */
+ {
+ i = Ci[pk]; /* consider node i in Lk */
+ p1 = Cp[i];
+ p2 = p1 + elen[i] - 1;
+ pn = p1;
+ for(h = 0, d = 0, p = p1; p <= p2; p++) /* scan Ei */
+ {
+ e = Ci[p];
+ if(w[e] != 0) /* e is an unabsorbed element */
+ {
+ dext = w[e] - mark; /* dext = |Le\Lk| */
+ if(dext > 0)
+ {
+ d += dext; /* sum up the set differences */
+ Ci[pn++] = e; /* keep e in Ei */
+ h += e; /* compute the hash of node i */
+ }
+ else
+ {
+ Cp[e] = CS_FLIP (k); /* aggressive absorb. e->k */
+ w[e] = 0; /* e is a dead element */
+ }
+ }
+ }
+ elen[i] = pn - p1 + 1; /* elen[i] = |Ei| */
+ p3 = pn;
+ p4 = p1 + len[i];
+ for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */
+ {
+ j = Ci[p];
+ if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */
+ d += nvj; /* degree(i) += |j| */
+ Ci[pn++] = j; /* place j in node list of i */
+ h += j; /* compute hash for node i */
+ }
+ if(d == 0) /* check for mass elimination */
+ {
+ Cp[i] = CS_FLIP (k); /* absorb i into k */
+ nvi = -nv[i];
+ dk -= nvi; /* |Lk| -= |i| */
+ nvk += nvi; /* |k| += nv[i] */
+ nel += nvi;
+ nv[i] = 0;
+ elen[i] = -1; /* node i is dead */
+ }
+ else
+ {
+ degree[i] = std::min<Index> (degree[i], d); /* update degree(i) */
+ Ci[pn] = Ci[p3]; /* move first node to end */
+ Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */
+ Ci[p1] = k; /* add k as 1st element in of Ei */
+ len[i] = pn - p1 + 1; /* new len of adj. list of node i */
+ h %= n; /* finalize hash of i */
+ next[i] = hhead[h]; /* place i in hash bucket */
+ hhead[h] = i;
+ last[i] = h; /* save hash of i in last[i] */
+ }
+ } /* scan2 is done */
+ degree[k] = dk; /* finalize |Lk| */
+ lemax = std::max<Index>(lemax, dk);
+ mark = cs_wclear<Index>(mark+lemax, lemax, w, n); /* clear w */
+
+ /* --- Supernode detection ------------------------------------------ */
+ for(pk = pk1; pk < pk2; pk++)
+ {
+ i = Ci[pk];
+ if(nv[i] >= 0) continue; /* skip if i is dead */
+ h = last[i]; /* scan hash bucket of node i */
+ i = hhead[h];
+ hhead[h] = -1; /* hash bucket will be empty */
+ for(; i != -1 && next[i] != -1; i = next[i], mark++)
+ {
+ ln = len[i];
+ eln = elen[i];
+ for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;
+ jlast = i;
+ for(j = next[i]; j != -1; ) /* compare i with all j */
+ {
+ ok = (len[j] == ln) && (elen[j] == eln);
+ for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)
+ {
+ if(w[Ci[p]] != mark) ok = 0; /* compare i and j*/
+ }
+ if(ok) /* i and j are identical */
+ {
+ Cp[j] = CS_FLIP (i); /* absorb j into i */
+ nv[i] += nv[j];
+ nv[j] = 0;
+ elen[j] = -1; /* node j is dead */
+ j = next[j]; /* delete j from hash bucket */
+ next[jlast] = j;
+ }
+ else
+ {
+ jlast = j; /* j and i are different */
+ j = next[j];
+ }
+ }
+ }
+ }
+
+ /* --- Finalize new element------------------------------------------ */
+ for(p = pk1, pk = pk1; pk < pk2; pk++) /* finalize Lk */
+ {
+ i = Ci[pk];
+ if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
+ nv[i] = nvi; /* restore nv[i] */
+ d = degree[i] + dk - nvi; /* compute external degree(i) */
+ d = std::min<Index> (d, n - nel - nvi);
+ if(head[d] != -1) last[head[d]] = i;
+ next[i] = head[d]; /* put i back in degree list */
+ last[i] = -1;
+ head[d] = i;
+ mindeg = std::min<Index> (mindeg, d); /* find new minimum degree */
+ degree[i] = d;
+ Ci[p++] = i; /* place i in Lk */
+ }
+ nv[k] = nvk; /* # nodes absorbed into k */
+ if((len[k] = p-pk1) == 0) /* length of adj list of element k*/
+ {
+ Cp[k] = -1; /* k is a root of the tree */
+ w[k] = 0; /* k is now a dead element */
+ }
+ if(elenk != 0) cnz = p; /* free unused space in Lk */
+ }
+
+ /* --- Postordering ----------------------------------------------------- */
+ for(i = 0; i < n; i++) Cp[i] = CS_FLIP (Cp[i]);/* fix assembly tree */
+ for(j = 0; j <= n; j++) head[j] = -1;
+ for(j = n; j >= 0; j--) /* place unordered nodes in lists */
+ {
+ if(nv[j] > 0) continue; /* skip if j is an element */
+ next[j] = head[Cp[j]]; /* place j in list of its parent */
+ head[Cp[j]] = j;
+ }
+ for(e = n; e >= 0; e--) /* place elements in lists */
+ {
+ if(nv[e] <= 0) continue; /* skip unless e is an element */
+ if(Cp[e] != -1)
+ {
+ next[e] = head[Cp[e]]; /* place e in list of its parent */
+ head[Cp[e]] = e;
+ }
+ }
+ for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
+ {
+ if(Cp[i] == -1) k = cs_tdfs<Index>(i, k, head, next, perm.indices().data(), w);
+ }
+
+ perm.indices().conservativeResize(n);
+
+ delete[] W;
+}
+
+} // namespace internal
+
+#endif // EIGEN_SPARSE_AMD_H
diff --git a/Eigen/src/OrderingMethods/CMakeLists.txt b/Eigen/src/OrderingMethods/CMakeLists.txt
new file mode 100644
index 000000000..9f4bb2758
--- /dev/null
+++ b/Eigen/src/OrderingMethods/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_OrderingMethods_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_OrderingMethods_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/OrderingMethods COMPONENT Devel
+ )
diff --git a/Eigen/src/Sparse/CMakeLists.txt b/Eigen/src/Sparse/CMakeLists.txt
deleted file mode 100644
index aa1468812..000000000
--- a/Eigen/src/Sparse/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_Sparse_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_Sparse_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Sparse COMPONENT Devel
- )
diff --git a/Eigen/src/SparseCholesky/CMakeLists.txt b/Eigen/src/SparseCholesky/CMakeLists.txt
new file mode 100644
index 000000000..375a59d7a
--- /dev/null
+++ b/Eigen/src/SparseCholesky/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseCholesky_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SparseCholesky_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCholesky COMPONENT Devel
+ )
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h
new file mode 100644
index 000000000..b9bed5d01
--- /dev/null
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -0,0 +1,803 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+/*
+
+NOTE: the _symbolic, and _numeric functions has been adapted from
+ the LDL library:
+
+LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
+
+LDL License:
+
+ Your use or distribution of LDL or any modified version of
+ LDL implies that you agree to this License.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+ Permission is hereby granted to use or copy this program under the
+ terms of the GNU LGPL, provided that the Copyright, this License,
+ and the Availability of the original version is retained on all copies.
+ User documentation of any code that uses this code or any modified
+ version of this code must cite the Copyright, this License, the
+ Availability note, and "Used by permission." Permission to modify
+ the code and to distribute modified code is granted, provided the
+ Copyright, this License, and the Availability note are retained,
+ and a notice that the code was modified is included.
+ */
+
+#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H
+#define EIGEN_SIMPLICIAL_CHOLESKY_H
+
+enum SimplicialCholeskyMode {
+ SimplicialCholeskyLLt,
+ SimplicialCholeskyLDLt
+};
+
+/** \brief A direct sparse Cholesky factorizations
+ *
+ * These classes provide LL^T and LDL^T Cholesky factorizations of sparse matrices that are
+ * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ */
+template<typename Derived>
+class SimplicialCholeskyBase
+{
+ public:
+ typedef typename internal::traits<Derived>::MatrixType MatrixType;
+ enum { UpLo = internal::traits<Derived>::UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+ public:
+
+ SimplicialCholeskyBase()
+ : m_info(Success), m_isInitialized(false)
+ {}
+
+ SimplicialCholeskyBase(const MatrixType& matrix)
+ : m_info(Success), m_isInitialized(false)
+ {
+ compute(matrix);
+ }
+
+ ~SimplicialCholeskyBase()
+ {
+ }
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ Derived& compute(const MatrixType& matrix)
+ {
+ derived().analyzePattern(matrix);
+ derived().factorize(matrix);
+ return derived();
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<SimplicialCholeskyBase, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Simplicial LLt or LDLt is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "SimplicialCholeskyBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<SimplicialCholeskyBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<SimplicialCholeskyBase, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Simplicial LLt or LDLt is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<SimplicialCholeskyBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the permutation P
+ * \sa permutationPinv() */
+ const PermutationMatrix<Dynamic,Dynamic,Index>& permutationP() const
+ { return m_P; }
+
+ /** \returns the inverse P^-1 of the permutation P
+ * \sa permutationP() */
+ const PermutationMatrix<Dynamic,Dynamic,Index>& permutationPinv() const
+ { return m_Pinv; }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Stream>
+ void dumpMemory(Stream& s)
+ {
+ int total = 0;
+ s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n";
+ s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n";
+ s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " TOTAL: " << (total>> 20) << "Mb" << "\n";
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ eigen_assert(m_matrix.rows()==b.rows());
+
+ if(m_info!=Success)
+ return;
+
+ if(m_P.size()>0)
+ dest = m_Pinv * b;
+ else
+ dest = b;
+
+ if(m_matrix.nonZeros()>0) // otherwise L==I
+ derived().matrixL().solveInPlace(dest);
+
+ if(m_diag.size()>0)
+ dest = m_diag.asDiagonal().inverse() * dest;
+
+ if (m_matrix.nonZeros()>0) // otherwise I==I
+ derived().matrixU().solveInPlace(dest);
+
+ if(m_P.size()>0)
+ dest = m_P * dest;
+ }
+
+ /** \internal */
+ template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ eigen_assert(m_matrix.rows()==b.rows());
+
+ // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+ static const int NbColsAtOnce = 4;
+ int rhsCols = b.cols();
+ int size = b.rows();
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
+ for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+ {
+ int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+ tmp.leftCols(actualCols) = b.middleCols(k,actualCols);
+ tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols));
+ dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView();
+ }
+ }
+
+#endif // EIGEN_PARSED_BY_DOXYGEN
+
+ protected:
+
+ template<bool DoLDLt>
+ void factorize(const MatrixType& a);
+
+ void analyzePattern(const MatrixType& a, bool doLDLt);
+
+ /** keeps off-diagonal entries; drops diagonal entries */
+ struct keep_diag {
+ inline bool operator() (const Index& row, const Index& col, const Scalar&) const
+ {
+ return row!=col;
+ }
+ };
+
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_factorizationIsOk;
+ bool m_analysisIsOk;
+
+ CholMatrixType m_matrix;
+ VectorType m_diag; // the diagonal coefficients (LDLt mode)
+ VectorXi m_parent; // elimination tree
+ VectorXi m_nonZerosPerCol;
+ PermutationMatrix<Dynamic,Dynamic,Index> m_P; // the permutation
+ PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // the inverse permutation
+};
+
+template<typename _MatrixType, int _UpLo = Lower> class SimplicialLLt;
+template<typename _MatrixType, int _UpLo = Lower> class SimplicialLDLt;
+template<typename _MatrixType, int _UpLo = Lower> class SimplicialCholesky;
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLt<_MatrixType,_UpLo> >
+{
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
+ typedef SparseTriangularView<CholMatrixType, Eigen::Lower> MatrixL;
+ typedef SparseTriangularView<typename CholMatrixType::AdjointReturnType, Eigen::Upper> MatrixU;
+ inline static MatrixL getL(const MatrixType& m) { return m; }
+ inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+//template<typename _MatrixType> struct traits<SimplicialLLt<_MatrixType,Upper> >
+//{
+// typedef _MatrixType MatrixType;
+// enum { UpLo = Upper };
+// typedef typename MatrixType::Scalar Scalar;
+// typedef typename MatrixType::Index Index;
+// typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
+// typedef TriangularView<CholMatrixType, Eigen::Lower> MatrixL;
+// typedef TriangularView<CholMatrixType, Eigen::Upper> MatrixU;
+// inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); }
+// inline static MatrixU getU(const MatrixType& m) { return m; }
+//};
+
+template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLt<_MatrixType,_UpLo> >
+{
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
+ typedef SparseTriangularView<CholMatrixType, Eigen::UnitLower> MatrixL;
+ typedef SparseTriangularView<typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
+ inline static MatrixL getL(const MatrixType& m) { return m; }
+ inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+//template<typename _MatrixType> struct traits<SimplicialLDLt<_MatrixType,Upper> >
+//{
+// typedef _MatrixType MatrixType;
+// enum { UpLo = Upper };
+// typedef typename MatrixType::Scalar Scalar;
+// typedef typename MatrixType::Index Index;
+// typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
+// typedef TriangularView<CholMatrixType, Eigen::UnitLower> MatrixL;
+// typedef TriangularView<CholMatrixType, Eigen::UnitUpper> MatrixU;
+// inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); }
+// inline static MatrixU getU(const MatrixType& m) { return m; }
+//};
+
+template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_MatrixType,_UpLo> >
+{
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+};
+
+}
+
+/** \class SimplicialLLt
+ * \brief A direct sparse LLt Cholesky factorizations
+ *
+ * This class provides a LL^T Cholesky factorizations of sparse matrices that are
+ * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * \sa class SimplicialLDLt
+ */
+template<typename _MatrixType, int _UpLo>
+ class SimplicialLLt : public SimplicialCholeskyBase<SimplicialLLt<_MatrixType,_UpLo> >
+{
+public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef SimplicialCholeskyBase<SimplicialLLt> Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef internal::traits<SimplicialLLt> Traits;
+ typedef typename Traits::MatrixL MatrixL;
+ typedef typename Traits::MatrixU MatrixU;
+public:
+ SimplicialLLt() : Base() {}
+ SimplicialLLt(const MatrixType& matrix)
+ : Base(matrix) {}
+
+ inline const MatrixL matrixL() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LLt not factorized");
+ return Traits::getL(Base::m_matrix);
+ }
+
+ inline const MatrixU matrixU() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LLt not factorized");
+ return Traits::getU(Base::m_matrix);
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& a)
+ {
+ Base::analyzePattern(a, false);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& a)
+ {
+ Base::template factorize<false>(a);
+ }
+
+ Scalar determinant() const
+ {
+ Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
+ return internal::abs2(detL);
+ }
+};
+
+/** \class SimplicialLDLt
+ * \brief A direct sparse LDLt Cholesky factorizations without square root.
+ *
+ * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are
+ * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * \sa class SimplicialLLt
+ */
+template<typename _MatrixType, int _UpLo>
+ class SimplicialLDLt : public SimplicialCholeskyBase<SimplicialLDLt<_MatrixType,_UpLo> >
+{
+public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef SimplicialCholeskyBase<SimplicialLDLt> Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef internal::traits<SimplicialLDLt> Traits;
+ typedef typename Traits::MatrixL MatrixL;
+ typedef typename Traits::MatrixU MatrixU;
+public:
+ SimplicialLDLt() : Base() {}
+ SimplicialLDLt(const MatrixType& matrix)
+ : Base(matrix) {}
+
+ inline const VectorType vectorD() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLt not factorized");
+ return Base::m_diag;
+ }
+ inline const MatrixL matrixL() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLt not factorized");
+ return Traits::getL(Base::m_matrix);
+ }
+
+ inline const MatrixU matrixU() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLt not factorized");
+ return Traits::getU(Base::m_matrix);
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& a)
+ {
+ Base::analyzePattern(a, true);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& a)
+ {
+ Base::template factorize<true>(a);
+ }
+
+ Scalar determinant() const
+ {
+ return Base::m_diag.prod();
+ }
+};
+
+/** \class SimplicialCholesky
+ * \deprecated use SimplicialLDLt or class SimplicialLLt
+ * \sa class SimplicialLDLt, class SimplicialLLt
+ */
+template<typename _MatrixType, int _UpLo>
+ class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo> >
+{
+public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef SimplicialCholeskyBase<SimplicialCholesky> Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef internal::traits<SimplicialCholesky> Traits;
+ typedef internal::traits<SimplicialLDLt<MatrixType,UpLo> > LDLtTraits;
+ typedef internal::traits<SimplicialLLt<MatrixType,UpLo> > LLtTraits;
+ public:
+ SimplicialCholesky() : Base(), m_LDLt(true) {}
+
+ SimplicialCholesky(const MatrixType& matrix)
+ : Base(), m_LDLt(true)
+ {
+ Base::compute(matrix);
+ }
+
+ SimplicialCholesky& setMode(SimplicialCholeskyMode mode)
+ {
+ switch(mode)
+ {
+ case SimplicialCholeskyLLt:
+ m_LDLt = false;
+ break;
+ case SimplicialCholeskyLDLt:
+ m_LDLt = true;
+ break;
+ default:
+ break;
+ }
+
+ return *this;
+ }
+
+ inline const VectorType vectorD() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
+ return Base::m_diag;
+ }
+ inline const CholMatrixType rawMatrix() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
+ return Base::m_matrix;
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& a)
+ {
+ Base::analyzePattern(a, m_LDLt);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& a)
+ {
+ if(m_LDLt)
+ Base::template factorize<true>(a);
+ else
+ Base::template factorize<false>(a);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ eigen_assert(Base::m_matrix.rows()==b.rows());
+
+ if(Base::m_info!=Success)
+ return;
+
+ if(Base::m_P.size()>0)
+ dest = Base::m_Pinv * b;
+ else
+ dest = b;
+
+ if(Base::m_matrix.nonZeros()>0) // otherwise L==I
+ {
+ if(m_LDLt)
+ LDLtTraits::getL(Base::m_matrix).solveInPlace(dest);
+ else
+ LLtTraits::getL(Base::m_matrix).solveInPlace(dest);
+ }
+
+ if(Base::m_diag.size()>0)
+ dest = Base::m_diag.asDiagonal().inverse() * dest;
+
+ if (Base::m_matrix.nonZeros()>0) // otherwise I==I
+ {
+ if(m_LDLt)
+ LDLtTraits::getU(Base::m_matrix).solveInPlace(dest);
+ else
+ LLtTraits::getU(Base::m_matrix).solveInPlace(dest);
+ }
+
+ if(Base::m_P.size()>0)
+ dest = Base::m_P * dest;
+ }
+
+ Scalar determinant() const
+ {
+ if(m_LDLt)
+ {
+ return Base::m_diag.prod();
+ }
+ else
+ {
+ Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
+ return internal::abs2(detL);
+ }
+ }
+
+ protected:
+ bool m_LDLt;
+};
+
+template<typename Derived>
+void SimplicialCholeskyBase<Derived>::analyzePattern(const MatrixType& a, bool doLDLt)
+{
+ eigen_assert(a.rows()==a.cols());
+ const Index size = a.rows();
+ m_matrix.resize(size, size);
+ m_parent.resize(size);
+ m_nonZerosPerCol.resize(size);
+
+ ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+ // TODO allows to configure the permutation
+ {
+ CholMatrixType C;
+ C = a.template selfadjointView<UpLo>();
+ // remove diagonal entries:
+ C.prune(keep_diag());
+ internal::minimum_degree_ordering(C, m_P);
+ }
+
+ if(m_P.size()>0)
+ m_Pinv = m_P.inverse();
+ else
+ m_Pinv.resize(0);
+
+ SparseMatrix<Scalar,ColMajor,Index> ap(size,size);
+ ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_Pinv);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+ m_parent[k] = -1; /* parent of k is not yet known */
+ tags[k] = k; /* mark node k as visited */
+ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
+ for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
+ {
+ Index i = it.index();
+ if(i < k)
+ {
+ /* follow path from i to root of etree, stop at flagged node */
+ for(; tags[i] != k; i = m_parent[i])
+ {
+ /* find parent of i if not yet determined */
+ if (m_parent[i] == -1)
+ m_parent[i] = k;
+ m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ }
+ }
+ }
+
+ /* construct Lp index array from m_nonZerosPerCol column counts */
+ Index* Lp = m_matrix._outerIndexPtr();
+ Lp[0] = 0;
+ for(Index k = 0; k < size; ++k)
+ Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLt ? 0 : 1);
+
+ m_matrix.resizeNonZeros(Lp[size]);
+
+ m_isInitialized = true;
+ m_info = Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+}
+
+
+template<typename Derived>
+template<bool DoLDLt>
+void SimplicialCholeskyBase<Derived>::factorize(const MatrixType& a)
+{
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ eigen_assert(a.rows()==a.cols());
+ const Index size = a.rows();
+ eigen_assert(m_parent.size()==size);
+ eigen_assert(m_nonZerosPerCol.size()==size);
+
+ const Index* Lp = m_matrix._outerIndexPtr();
+ Index* Li = m_matrix._innerIndexPtr();
+ Scalar* Lx = m_matrix._valuePtr();
+
+ ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+ SparseMatrix<Scalar,ColMajor,Index> ap(size,size);
+ ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_Pinv);
+
+ bool ok = true;
+ m_diag.resize(DoLDLt ? size : 0);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ // compute nonzero pattern of kth row of L, in topological order
+ y[k] = 0.0; // Y(0:k) is now all zero
+ Index top = size; // stack for pattern is empty
+ tags[k] = k; // mark node k as visited
+ m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
+ for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
+ {
+ Index i = it.index();
+ if(i <= k)
+ {
+ y[i] += internal::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
+ Index len;
+ for(len = 0; tags[i] != k; i = m_parent[i])
+ {
+ pattern[len++] = i; /* L(k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ while(len > 0)
+ pattern[--top] = pattern[--len];
+ }
+ }
+
+ /* compute numerical values kth row of L (a sparse triangular solve) */
+ Scalar d = y[k]; // get D(k,k) and clear Y(k)
+ y[k] = 0.0;
+ for(; top < size; ++top)
+ {
+ Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
+ Scalar yi = y[i]; /* get and clear Y(i) */
+ y[i] = 0.0;
+
+ /* the nonzero entry L(k,i) */
+ Scalar l_ki;
+ if(DoLDLt)
+ l_ki = yi / m_diag[i];
+ else
+ yi = l_ki = yi / Lx[Lp[i]];
+
+ Index p2 = Lp[i] + m_nonZerosPerCol[i];
+ Index p;
+ for(p = Lp[i] + (DoLDLt ? 0 : 1); p < p2; ++p)
+ y[Li[p]] -= internal::conj(Lx[p]) * yi;
+ d -= l_ki * internal::conj(yi);
+ Li[p] = k; /* store L(k,i) in column form of L */
+ Lx[p] = l_ki;
+ ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
+ }
+ if(DoLDLt)
+ m_diag[k] = d;
+ else
+ {
+ Index p = Lp[k]+m_nonZerosPerCol[k]++;
+ Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */
+ Lx[p] = internal::sqrt(d) ;
+ }
+ if(d == Scalar(0))
+ {
+ ok = false; /* failure, D(k,k) is zero */
+ break;
+ }
+ }
+
+ m_info = ok ? Success : NumericalIssue;
+ m_factorizationIsOk = true;
+}
+
+namespace internal {
+
+template<typename Derived, typename Rhs>
+struct solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
+ : solve_retval_base<SimplicialCholeskyBase<Derived>, Rhs>
+{
+ typedef SimplicialCholeskyBase<Derived> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve(rhs(),dst);
+ }
+};
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
+ : sparse_solve_retval_base<SimplicialCholeskyBase<Derived>, Rhs>
+{
+ typedef SimplicialCholeskyBase<Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve_sparse(rhs(),dst);
+ }
+};
+
+}
+
+#endif // EIGEN_SIMPLICIAL_CHOLESKY_H
diff --git a/Eigen/src/Sparse/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h
index 22f3ff062..22f3ff062 100644
--- a/Eigen/src/Sparse/AmbiVector.h
+++ b/Eigen/src/SparseCore/AmbiVector.h
diff --git a/Eigen/src/SparseCore/CMakeLists.txt b/Eigen/src/SparseCore/CMakeLists.txt
new file mode 100644
index 000000000..d860452a6
--- /dev/null
+++ b/Eigen/src/SparseCore/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseCore_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SparseCore_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCore COMPONENT Devel
+ )
diff --git a/Eigen/src/Sparse/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h
index 9d2fb7231..9d2fb7231 100644
--- a/Eigen/src/Sparse/CompressedStorage.h
+++ b/Eigen/src/SparseCore/CompressedStorage.h
diff --git a/Eigen/src/Sparse/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index ac5303dd0..ac5303dd0 100644
--- a/Eigen/src/Sparse/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
diff --git a/Eigen/src/Sparse/CoreIterators.h b/Eigen/src/SparseCore/CoreIterators.h
index b4beaeee6..b4beaeee6 100644
--- a/Eigen/src/Sparse/CoreIterators.h
+++ b/Eigen/src/SparseCore/CoreIterators.h
diff --git a/Eigen/src/Sparse/MappedSparseMatrix.h b/Eigen/src/SparseCore/MappedSparseMatrix.h
index 31a431fb2..31a431fb2 100644
--- a/Eigen/src/Sparse/MappedSparseMatrix.h
+++ b/Eigen/src/SparseCore/MappedSparseMatrix.h
diff --git a/Eigen/src/Sparse/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h
index e69de29bb..e69de29bb 100644
--- a/Eigen/src/Sparse/SparseAssign.h
+++ b/Eigen/src/SparseCore/SparseAssign.h
diff --git a/Eigen/src/Sparse/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 5054b9755..5054b9755 100644
--- a/Eigen/src/Sparse/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
diff --git a/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index cde5bbc03..cde5bbc03 100644
--- a/Eigen/src/Sparse/SparseCwiseBinaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
diff --git a/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
index aa068835f..aa068835f 100644
--- a/Eigen/src/Sparse/SparseCwiseUnaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
diff --git a/Eigen/src/Sparse/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index fff97ccd1..fff97ccd1 100644
--- a/Eigen/src/Sparse/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
diff --git a/Eigen/src/Sparse/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h
index fb9a29c05..fb9a29c05 100644
--- a/Eigen/src/Sparse/SparseDiagonalProduct.h
+++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h
diff --git a/Eigen/src/Sparse/SparseDot.h b/Eigen/src/SparseCore/SparseDot.h
index 1f10f71a4..1f10f71a4 100644
--- a/Eigen/src/Sparse/SparseDot.h
+++ b/Eigen/src/SparseCore/SparseDot.h
diff --git a/Eigen/src/Sparse/SparseFuzzy.h b/Eigen/src/SparseCore/SparseFuzzy.h
index f00b3d646..f00b3d646 100644
--- a/Eigen/src/Sparse/SparseFuzzy.h
+++ b/Eigen/src/SparseCore/SparseFuzzy.h
diff --git a/Eigen/src/Sparse/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 9f0cbc2b7..9f0cbc2b7 100644
--- a/Eigen/src/Sparse/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
diff --git a/Eigen/src/Sparse/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 182ea0c7e..182ea0c7e 100644
--- a/Eigen/src/Sparse/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
diff --git a/Eigen/src/Sparse/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
index f3b9b40b0..f3b9b40b0 100644
--- a/Eigen/src/Sparse/SparseProduct.h
+++ b/Eigen/src/SparseCore/SparseProduct.h
diff --git a/Eigen/src/Sparse/SparseRedux.h b/Eigen/src/SparseCore/SparseRedux.h
index afc49de7a..afc49de7a 100644
--- a/Eigen/src/Sparse/SparseRedux.h
+++ b/Eigen/src/SparseCore/SparseRedux.h
diff --git a/Eigen/src/Sparse/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h
index a11d12aee..a11d12aee 100644
--- a/Eigen/src/Sparse/SparseSelfAdjointView.h
+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
diff --git a/Eigen/src/Sparse/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 773d8110c..773d8110c 100644
--- a/Eigen/src/Sparse/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
diff --git a/Eigen/src/Sparse/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h
index 2aea2fa32..2aea2fa32 100644
--- a/Eigen/src/Sparse/SparseTranspose.h
+++ b/Eigen/src/SparseCore/SparseTranspose.h
diff --git a/Eigen/src/Sparse/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h
index dff5ae2c4..dff5ae2c4 100644
--- a/Eigen/src/Sparse/SparseTriangularView.h
+++ b/Eigen/src/SparseCore/SparseTriangularView.h
diff --git a/Eigen/src/Sparse/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h
index db9ae98e7..db9ae98e7 100644
--- a/Eigen/src/Sparse/SparseUtil.h
+++ b/Eigen/src/SparseCore/SparseUtil.h
diff --git a/Eigen/src/Sparse/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index ce4bb51a2..ce4bb51a2 100644
--- a/Eigen/src/Sparse/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
diff --git a/Eigen/src/Sparse/SparseView.h b/Eigen/src/SparseCore/SparseView.h
index 243065610..243065610 100644
--- a/Eigen/src/Sparse/SparseView.h
+++ b/Eigen/src/SparseCore/SparseView.h
diff --git a/Eigen/src/Sparse/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h
index 6c66052bf..6c66052bf 100644
--- a/Eigen/src/Sparse/TriangularSolver.h
+++ b/Eigen/src/SparseCore/TriangularSolver.h
diff --git a/Eigen/src/SuperLUSupport/CMakeLists.txt b/Eigen/src/SuperLUSupport/CMakeLists.txt
new file mode 100644
index 000000000..b28ebe583
--- /dev/null
+++ b/Eigen/src/SuperLUSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SuperLUSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SuperLUSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SuperLUSupport COMPONENT Devel
+ )
diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h
new file mode 100644
index 000000000..e485a9f50
--- /dev/null
+++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -0,0 +1,989 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SUPERLUSUPPORT_H
+#define EIGEN_SUPERLUSUPPORT_H
+
+#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \
+ extern "C" { \
+ typedef struct { FLOATTYPE for_lu; FLOATTYPE total_needed; int expansions; } PREFIX##mem_usage_t; \
+ extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \
+ char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \
+ void *, int, SuperMatrix *, SuperMatrix *, \
+ FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \
+ PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \
+ } \
+ inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \
+ int *perm_c, int *perm_r, int *etree, char *equed, \
+ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
+ SuperMatrix *U, void *work, int lwork, \
+ SuperMatrix *B, SuperMatrix *X, \
+ FLOATTYPE *recip_pivot_growth, \
+ FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \
+ SuperLUStat_t *stats, int *info, KEYTYPE) { \
+ PREFIX##mem_usage_t mem_usage; \
+ PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \
+ U, work, lwork, B, X, recip_pivot_growth, rcond, \
+ ferr, berr, &mem_usage, stats, info); \
+ return mem_usage.for_lu; /* bytes used by the factor storage */ \
+ }
+
+DECL_GSSVX(s,float,float)
+DECL_GSSVX(c,float,std::complex<float>)
+DECL_GSSVX(d,double,double)
+DECL_GSSVX(z,double,std::complex<double>)
+
+#ifdef MILU_ALPHA
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+// similarly for the incomplete factorization using gsisx
+#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE) \
+ extern "C" { \
+ extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \
+ char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \
+ void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *, \
+ PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \
+ } \
+ inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \
+ int *perm_c, int *perm_r, int *etree, char *equed, \
+ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
+ SuperMatrix *U, void *work, int lwork, \
+ SuperMatrix *B, SuperMatrix *X, \
+ FLOATTYPE *recip_pivot_growth, \
+ FLOATTYPE *rcond, \
+ SuperLUStat_t *stats, int *info, KEYTYPE) { \
+ PREFIX##mem_usage_t mem_usage; \
+ PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L, \
+ U, work, lwork, B, X, recip_pivot_growth, rcond, \
+ &mem_usage, stats, info); \
+ return mem_usage.for_lu; /* bytes used by the factor storage */ \
+ }
+
+DECL_GSISX(s,float,float)
+DECL_GSISX(c,float,std::complex<float>)
+DECL_GSISX(d,double,double)
+DECL_GSISX(z,double,std::complex<double>)
+
+#endif
+
+template<typename MatrixType>
+struct SluMatrixMapHelper;
+
+/** \internal
+ *
+ * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
+ * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
+ *
+ * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
+ */
+struct SluMatrix : SuperMatrix
+{
+ SluMatrix()
+ {
+ Store = &storage;
+ }
+
+ SluMatrix(const SluMatrix& other)
+ : SuperMatrix(other)
+ {
+ Store = &storage;
+ storage = other.storage;
+ }
+
+ SluMatrix& operator=(const SluMatrix& other)
+ {
+ SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
+ Store = &storage;
+ storage = other.storage;
+ return *this;
+ }
+
+ struct
+ {
+ union {int nnz;int lda;};
+ void *values;
+ int *innerInd;
+ int *outerInd;
+ } storage;
+
+ void setStorageType(Stype_t t)
+ {
+ Stype = t;
+ if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
+ Store = &storage;
+ else
+ {
+ eigen_assert(false && "storage type not supported");
+ Store = 0;
+ }
+ }
+
+ template<typename Scalar>
+ void setScalarType()
+ {
+ if (internal::is_same<Scalar,float>::value)
+ Dtype = SLU_S;
+ else if (internal::is_same<Scalar,double>::value)
+ Dtype = SLU_D;
+ else if (internal::is_same<Scalar,std::complex<float> >::value)
+ Dtype = SLU_C;
+ else if (internal::is_same<Scalar,std::complex<double> >::value)
+ Dtype = SLU_Z;
+ else
+ {
+ eigen_assert(false && "Scalar type not supported by SuperLU");
+ }
+ }
+
+ template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+ static SluMatrix Map(Matrix<Scalar,Rows,Cols,Options,MRows,MCols>& mat)
+ {
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+ eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ SluMatrix res;
+ res.setStorageType(SLU_DN);
+ res.setScalarType<Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
+ res.storage.values = mat.data();
+ return res;
+ }
+
+ template<typename MatrixType>
+ static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
+ {
+ SluMatrix res;
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat.derived()._valuePtr();
+ res.storage.innerInd = mat.derived()._innerIndexPtr();
+ res.storage.outerInd = mat.derived()._outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & Upper)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & Lower)
+ res.Mtype = SLU_TRL;
+
+ eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+
+ return res;
+ }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
+{
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ res.setStorageType(SLU_DN);
+ res.setScalarType<Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = mat.outerStride();
+ res.storage.values = mat.data();
+ }
+};
+
+template<typename Derived>
+struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
+{
+ typedef Derived MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat._valuePtr();
+ res.storage.innerInd = mat._innerIndexPtr();
+ res.storage.outerInd = mat._outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & Upper)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & Lower)
+ res.Mtype = SLU_TRL;
+
+ eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+ }
+};
+
+namespace internal {
+
+template<typename MatrixType>
+SluMatrix asSluMatrix(MatrixType& mat)
+{
+ return SluMatrix::Map(mat);
+}
+
+/** View a Super LU matrix as an Eigen expression */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
+{
+ eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
+ || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
+
+ Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
+
+ return MappedSparseMatrix<Scalar,Flags,Index>(
+ sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
+ sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
+}
+
+} // end namespace internal
+
+
+template<typename _MatrixType, typename Derived>
+class SuperLUBase
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar> LUMatrixType;
+
+ public:
+
+ SuperLUBase() {}
+
+ ~SuperLUBase()
+ {
+ clearFactors();
+ }
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */
+ inline superlu_options_t& options() { return m_sluOptions; }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ void compute(const MatrixType& matrix)
+ {
+ derived().analyzePattern(matrix);
+ derived().factorize(matrix);
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<SuperLUBase, Rhs> solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<SuperLUBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+// template<typename Rhs>
+// inline const internal::sparse_solve_retval<SuperLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+// {
+// eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+// eigen_assert(rows()==b.rows()
+// && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+// return internal::sparse_solve_retval<SuperLU, Rhs>(*this, b.derived());
+// }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& /*matrix*/)
+ {
+ m_isInitialized = true;
+ m_info = Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ }
+
+ template<typename Stream>
+ void dumpMemory(Stream& s)
+ {}
+
+ protected:
+
+ void initFactorization(const MatrixType& a)
+ {
+ const int size = a.rows();
+ m_matrix = a;
+
+ m_sluA = internal::asSluMatrix(m_matrix);
+ clearFactors();
+
+ m_p.resize(size);
+ m_q.resize(size);
+ m_sluRscale.resize(size);
+ m_sluCscale.resize(size);
+ m_sluEtree.resize(size);
+
+ // set empty B and X
+ m_sluB.setStorageType(SLU_DN);
+ m_sluB.setScalarType<Scalar>();
+ m_sluB.Mtype = SLU_GE;
+ m_sluB.storage.values = 0;
+ m_sluB.nrow = 0;
+ m_sluB.ncol = 0;
+ m_sluB.storage.lda = size;
+ m_sluX = m_sluB;
+
+ m_extractedDataAreDirty = true;
+ }
+
+ void init()
+ {
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ m_sluL.Store = 0;
+ m_sluU.Store = 0;
+ }
+
+ void extractData() const;
+
+ void clearFactors()
+ {
+ if(m_sluL.Store)
+ Destroy_SuperNode_Matrix(&m_sluL);
+ if(m_sluU.Store)
+ Destroy_CompCol_Matrix(&m_sluU);
+
+ m_sluL.Store = 0;
+ m_sluU.Store = 0;
+
+ memset(&m_sluL,0,sizeof m_sluL);
+ memset(&m_sluU,0,sizeof m_sluU);
+ }
+
+ // cached data to reduce reallocation, etc.
+ mutable LUMatrixType m_l;
+ mutable LUMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+
+ mutable LUMatrixType m_matrix; // copy of the factorized matrix
+ mutable SluMatrix m_sluA;
+ mutable SuperMatrix m_sluL, m_sluU;
+ mutable SluMatrix m_sluB, m_sluX;
+ mutable SuperLUStat_t m_sluStat;
+ mutable superlu_options_t m_sluOptions;
+ mutable std::vector<int> m_sluEtree;
+ mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale;
+ mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr;
+ mutable char m_sluEqued;
+
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+ mutable bool m_extractedDataAreDirty;
+};
+
+
+template<typename _MatrixType>
+class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
+{
+ public:
+ typedef SuperLUBase<_MatrixType,SuperLU> Base;
+ typedef _MatrixType MatrixType;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef typename Base::Index Index;
+ typedef typename Base::IntRowVectorType IntRowVectorType;
+ typedef typename Base::IntColVectorType IntColVectorType;
+ typedef typename Base::LUMatrixType LUMatrixType;
+ typedef TriangularView<LUMatrixType, Lower|UnitDiag> LMatrixType;
+ typedef TriangularView<LUMatrixType, Upper> UMatrixType;
+
+ public:
+
+ SuperLU() : Base() { init(); }
+
+ SuperLU(const MatrixType& matrix) : Base()
+ {
+ Base::init();
+ compute(matrix);
+ }
+
+ ~SuperLU()
+ {
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ init();
+ Base::analyzePattern(matrix);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix);
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+
+ inline const LMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_l;
+ }
+
+ inline const UMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_q;
+ }
+
+ Scalar determinant() const;
+
+ protected:
+
+ using Base::m_matrix;
+ using Base::m_sluOptions;
+ using Base::m_sluA;
+ using Base::m_sluB;
+ using Base::m_sluX;
+ using Base::m_p;
+ using Base::m_q;
+ using Base::m_sluEtree;
+ using Base::m_sluEqued;
+ using Base::m_sluRscale;
+ using Base::m_sluCscale;
+ using Base::m_sluL;
+ using Base::m_sluU;
+ using Base::m_sluStat;
+ using Base::m_sluFerr;
+ using Base::m_sluBerr;
+ using Base::m_l;
+ using Base::m_u;
+
+ using Base::m_analysisIsOk;
+ using Base::m_factorizationIsOk;
+ using Base::m_extractedDataAreDirty;
+ using Base::m_isInitialized;
+ using Base::m_info;
+
+ void init()
+ {
+ Base::init();
+
+ set_default_options(&this->m_sluOptions);
+ m_sluOptions.PrintStat = NO;
+ m_sluOptions.ConditionNumber = NO;
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.ColPerm = COLAMD;
+ }
+};
+
+template<typename MatrixType>
+void SuperLU<MatrixType>::factorize(const MatrixType& a)
+{
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ if(!m_analysisIsOk)
+ {
+ m_info = InvalidInput;
+ return;
+ }
+
+ initFactorization(a);
+
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+ RealScalar ferr, berr;
+
+ StatInit(&m_sluStat);
+ SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &ferr, &berr,
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ m_extractedDataAreDirty = true;
+
+ // FIXME how to better check for errors ???
+ m_info = info == 0 ? Success : NumericalIssue;
+ m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperLU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+ const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+ eigen_assert(size==b.rows());
+
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.Fact = FACTORED;
+ m_sluOptions.IterRefine = NOREFINE;
+
+
+ m_sluFerr.resize(rhsCols);
+ m_sluBerr.resize(rhsCols);
+ m_sluB = SluMatrix::Map(b.const_cast_derived());
+ m_sluX = SluMatrix::Map(x.derived());
+
+ typename Rhs::PlainObject b_cpy;
+ if(m_sluEqued!='N')
+ {
+ b_cpy = b;
+ m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());
+ }
+
+ StatInit(&m_sluStat);
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+ SuperLU_gssvx(&m_sluOptions, &m_sluA,
+ m_q.data(), m_p.data(),
+ &m_sluEtree[0], &m_sluEqued,
+ &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &m_sluFerr[0], &m_sluBerr[0],
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+ m_info = info==0 ? Success : NumericalIssue;
+}
+
+// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
+//
+// Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+//
+// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+//
+template<typename MatrixType, typename Derived>
+void SuperLUBase<MatrixType,Derived>::extractData() const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()");
+ if (m_extractedDataAreDirty)
+ {
+ int upper;
+ int fsupc, istart, nsupr;
+ int lastl = 0, lastu = 0;
+ SCformat *Lstore = static_cast<SCformat*>(m_sluL.Store);
+ NCformat *Ustore = static_cast<NCformat*>(m_sluU.Store);
+ Scalar *SNptr;
+
+ const int size = m_matrix.rows();
+ m_l.resize(size,size);
+ m_l.resizeNonZeros(Lstore->nnz);
+ m_u.resize(size,size);
+ m_u.resizeNonZeros(Ustore->nnz);
+
+ int* Lcol = m_l._outerIndexPtr();
+ int* Lrow = m_l._innerIndexPtr();
+ Scalar* Lval = m_l._valuePtr();
+
+ int* Ucol = m_u._outerIndexPtr();
+ int* Urow = m_u._innerIndexPtr();
+ Scalar* Uval = m_u._valuePtr();
+
+ Ucol[0] = 0;
+ Ucol[0] = 0;
+
+ /* for each supernode */
+ for (int k = 0; k <= Lstore->nsuper; ++k)
+ {
+ fsupc = L_FST_SUPC(k);
+ istart = L_SUB_START(fsupc);
+ nsupr = L_SUB_START(fsupc+1) - istart;
+ upper = 1;
+
+ /* for each column in the supernode */
+ for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
+ {
+ SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
+
+ /* Extract U */
+ for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
+ {
+ Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = U_SUB(i);
+ }
+ for (int i = 0; i < upper; ++i)
+ {
+ /* upper triangle in the supernode */
+ Uval[lastu] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = L_SUB(istart+i);
+ }
+ Ucol[j+1] = lastu;
+
+ /* Extract L */
+ Lval[lastl] = 1.0; /* unit diagonal */
+ Lrow[lastl++] = L_SUB(istart + upper - 1);
+ for (int i = upper; i < nsupr; ++i)
+ {
+ Lval[lastl] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Lval[lastl] != 0.0)
+ Lrow[lastl++] = L_SUB(istart+i);
+ }
+ Lcol[j+1] = lastl;
+
+ ++upper;
+ } /* for j ... */
+
+ } /* for k ... */
+
+ // squeeze the matrices :
+ m_l.resizeNonZeros(lastl);
+ m_u.resizeNonZeros(lastu);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()");
+
+ if (m_extractedDataAreDirty)
+ this->extractData();
+
+ Scalar det = Scalar(1);
+ for (int j=0; j<m_u.cols(); ++j)
+ {
+ if (m_u._outerIndexPtr()[j+1]-m_u._outerIndexPtr()[j] > 0)
+ {
+ int lastId = m_u._outerIndexPtr()[j+1]-1;
+ eigen_assert(m_u._innerIndexPtr()[lastId]<=j);
+ if (m_u._innerIndexPtr()[lastId]==j)
+ det *= m_u._valuePtr()[lastId];
+ }
+ }
+ if(m_sluEqued!='N')
+ return det/m_sluRscale.prod()/m_sluCscale.prod();
+ else
+ return det;
+}
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+template<typename _MatrixType>
+class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
+{
+ public:
+ typedef SuperLUBase<_MatrixType,SuperILU> Base;
+ typedef _MatrixType MatrixType;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef typename Base::Index Index;
+
+ public:
+
+ SuperILU() : Base() { init(); }
+
+ SuperILU(const MatrixType& matrix) : Base()
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~SuperILU()
+ {
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ Base::analyzePattern(matrix);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix);
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+
+ protected:
+
+ using Base::m_matrix;
+ using Base::m_sluOptions;
+ using Base::m_sluA;
+ using Base::m_sluB;
+ using Base::m_sluX;
+ using Base::m_p;
+ using Base::m_q;
+ using Base::m_sluEtree;
+ using Base::m_sluEqued;
+ using Base::m_sluRscale;
+ using Base::m_sluCscale;
+ using Base::m_sluL;
+ using Base::m_sluU;
+ using Base::m_sluStat;
+ using Base::m_sluFerr;
+ using Base::m_sluBerr;
+ using Base::m_l;
+ using Base::m_u;
+
+ using Base::m_analysisIsOk;
+ using Base::m_factorizationIsOk;
+ using Base::m_extractedDataAreDirty;
+ using Base::m_isInitialized;
+ using Base::m_info;
+
+ void init()
+ {
+ Base::init();
+
+ ilu_set_default_options(&m_sluOptions);
+ m_sluOptions.PrintStat = NO;
+ m_sluOptions.ConditionNumber = NO;
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.ColPerm = MMD_AT_PLUS_A;
+
+ // no attempt to preserve column sum
+ m_sluOptions.ILU_MILU = SILU;
+ // only basic ILU(k) support -- no direct control over memory consumption
+ // better to use ILU_DropRule = DROP_BASIC | DROP_AREA
+ // and set ILU_FillFactor to max memory growth
+ m_sluOptions.ILU_DropRule = DROP_BASIC;
+ m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10;
+ }
+};
+
+template<typename MatrixType>
+void SuperILU<MatrixType>::factorize(const MatrixType& a)
+{
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ if(!m_analysisIsOk)
+ {
+ m_info = InvalidInput;
+ return;
+ }
+
+ this->initFactorization(a);
+
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+
+ StatInit(&m_sluStat);
+ SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ // FIXME how to better check for errors ???
+ m_info = info == 0 ? Success : NumericalIssue;
+ m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperILU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+ const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+ eigen_assert(size==b.rows());
+
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.Fact = FACTORED;
+ m_sluOptions.IterRefine = NOREFINE;
+
+ m_sluFerr.resize(rhsCols);
+ m_sluBerr.resize(rhsCols);
+ m_sluB = SluMatrix::Map(b.const_cast_derived());
+ m_sluX = SluMatrix::Map(x.derived());
+
+ typename Rhs::PlainObject b_cpy;
+ if(m_sluEqued!='N')
+ {
+ b_cpy = b;
+ m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());
+ }
+
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+
+ StatInit(&m_sluStat);
+ SuperLU_gsisx(&m_sluOptions, &m_sluA,
+ m_q.data(), m_p.data(),
+ &m_sluEtree[0], &m_sluEqued,
+ &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ m_info = info==0 ? Success : NumericalIssue;
+}
+#endif
+
+namespace internal {
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+ : solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+ typedef SuperLUBase<_MatrixType,Derived> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct sparse_solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+ : sparse_solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+ typedef SuperLUBase<_MatrixType,Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve(rhs(),dst);
+ }
+};
+
+}
+
+#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/Eigen/src/UmfPackSupport/CMakeLists.txt b/Eigen/src/UmfPackSupport/CMakeLists.txt
new file mode 100644
index 000000000..a57de0020
--- /dev/null
+++ b/Eigen/src/UmfPackSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_UmfPackSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_UmfPackSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/UmfPackSupport COMPONENT Devel
+ )
diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h
new file mode 100644
index 000000000..e41de8337
--- /dev/null
+++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -0,0 +1,406 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_UMFPACKSUPPORT_H
+#define EIGEN_UMFPACKSUPPORT_H
+
+/* TODO extract L, extract U, compute det, etc... */
+
+// generic double/complex<double> wrapper functions:
+
+inline void umfpack_free_numeric(void **Numeric, double)
+{ umfpack_di_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
+{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, double)
+{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
+{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
+}
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_numeric(Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
+ double X[], const double B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ std::complex<double> X[], const std::complex<double> B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_zi_solve(sys,Ap,Ai,&internal::real_ref(Ax[0]),0,&internal::real_ref(X[0]),0,&internal::real_ref(B[0]),0,Numeric,Control,Info);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
+{
+ return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)
+{
+ return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
+ int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
+ int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ double& lx0_real = internal::real_ref(Lx[0]);
+ double& ux0_real = internal::real_ref(Ux[0]);
+ double& dx0_real = internal::real_ref(Dx[0]);
+ return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
+ Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
+}
+
+inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ double& mx_real = internal::real_ref(*Mx);
+ return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
+}
+
+/** \brief A sparse LU factorization and solver based on UmfPack
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LU factorization
+ * using the UmfPack library. The sparse matrix A must be column-major, squared and full rank.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ *
+ */
+template<typename _MatrixType>
+class UmfPackLU
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar> LUMatrixType;
+
+ public:
+
+ UmfPackLU() { init(); }
+
+ UmfPackLU(const MatrixType& matrix)
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~UmfPackLU()
+ {
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ }
+
+ inline Index rows() const { return m_matrixRef->rows(); }
+ inline Index cols() const { return m_matrixRef->cols(); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ inline const LUMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const LUMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ void compute(const MatrixType& matrix)
+ {
+ analyzePattern(matrix);
+ factorize(matrix);
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<UmfPackLU, Rhs> solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "UmfPAckLU is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "UmfPAckLU::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<UmfPackLU, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+// template<typename Rhs>
+// inline const internal::sparse_solve_retval<UmfPAckLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+// {
+// eigen_assert(m_isInitialized && "UmfPAckLU is not initialized.");
+// eigen_assert(rows()==b.rows()
+// && "UmfPAckLU::solve(): invalid number of rows of the right hand side matrix b");
+// return internal::sparse_solve_retval<UmfPAckLU, Rhs>(*this, b.derived());
+// }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ eigen_assert((MatrixType::Flags&RowMajorBit)==0 && "UmfPackLU: Row major matrices are not supported yet");
+
+ if(m_symbolic)
+ umfpack_free_symbolic(&m_symbolic,Scalar());
+ if(m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+
+ int errorCode = 0;
+ errorCode = umfpack_symbolic(matrix.rows(), matrix.cols(), matrix._outerIndexPtr(), matrix._innerIndexPtr(), matrix._valuePtr(),
+ &m_symbolic, 0, 0);
+
+ m_isInitialized = true;
+ m_info = errorCode ? InvalidInput : Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix)
+ {
+ eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
+ if(m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+
+ m_matrixRef = &matrix;
+
+ int errorCode;
+ errorCode = umfpack_numeric(matrix._outerIndexPtr(), matrix._innerIndexPtr(), matrix._valuePtr(),
+ m_symbolic, &m_numeric, 0, 0);
+
+ m_info = errorCode ? NumericalIssue : Success;
+ m_factorizationIsOk = true;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename BDerived,typename XDerived>
+ bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const;
+ #endif
+
+ Scalar determinant() const;
+
+ void extractData() const;
+
+ protected:
+
+
+ void init()
+ {
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ m_numeric = 0;
+ m_symbolic = 0;
+ }
+
+ // cached data to reduce reallocation, etc.
+ mutable LUMatrixType m_l;
+ mutable LUMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+
+ const MatrixType* m_matrixRef;
+ void* m_numeric;
+ void* m_symbolic;
+
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+ mutable bool m_extractedDataAreDirty;
+};
+
+
+template<typename MatrixType>
+void UmfPackLU<MatrixType>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ // get size of the data
+ int lnz, unz, rows, cols, nz_udiag;
+ umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
+
+ // allocate data
+ m_l.resize(rows,(std::min)(rows,cols));
+ m_l.resizeNonZeros(lnz);
+
+ m_u.resize((std::min)(rows,cols),cols);
+ m_u.resizeNonZeros(unz);
+
+ m_p.resize(rows);
+ m_q.resize(cols);
+
+ // extract
+ umfpack_get_numeric(m_l._outerIndexPtr(), m_l._innerIndexPtr(), m_l._valuePtr(),
+ m_u._outerIndexPtr(), m_u._innerIndexPtr(), m_u._valuePtr(),
+ m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename UmfPackLU<MatrixType>::Scalar UmfPackLU<MatrixType>::determinant() const
+{
+ Scalar det;
+ umfpack_get_determinant(&det, 0, m_numeric, 0);
+ return det;
+}
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool UmfPackLU<MatrixType>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const
+{
+ const int rhsCols = b.cols();
+ eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet");
+ eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
+
+ int errorCode;
+ for (int j=0; j<rhsCols; ++j)
+ {
+ errorCode = umfpack_solve(UMFPACK_A,
+ m_matrixRef->_outerIndexPtr(), m_matrixRef->_innerIndexPtr(), m_matrixRef->_valuePtr(),
+ &x.col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0);
+ if (errorCode!=0)
+ return false;
+ }
+
+ return true;
+}
+
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<UmfPackLU<_MatrixType>, Rhs>
+ : solve_retval_base<UmfPackLU<_MatrixType>, Rhs>
+{
+ typedef UmfPackLU<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, typename Rhs>
+struct sparse_solve_retval<UmfPackLU<_MatrixType>, Rhs>
+ : sparse_solve_retval_base<UmfPackLU<_MatrixType>, Rhs>
+{
+ typedef UmfPackLU<_MatrixType> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+}
+
+#endif // EIGEN_UMFPACKSUPPORT_H
diff --git a/Eigen/src/misc/SparseSolve.h b/Eigen/src/misc/SparseSolve.h
new file mode 100644
index 000000000..5b6c859ae
--- /dev/null
+++ b/Eigen/src/misc/SparseSolve.h
@@ -0,0 +1,122 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_SOLVE_H
+#define EIGEN_SPARSE_SOLVE_H
+
+namespace internal {
+
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_base;
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval;
+
+template<typename DecompositionType, typename Rhs>
+struct traits<sparse_solve_retval_base<DecompositionType, Rhs> >
+{
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef SparseMatrix<typename Rhs::Scalar, Rhs::Options, typename Rhs::Index> ReturnType;
+};
+
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_base
+ : public ReturnByValue<sparse_solve_retval_base<_DecompositionType, Rhs> >
+{
+ typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
+ typedef _DecompositionType DecompositionType;
+ typedef ReturnByValue<sparse_solve_retval_base> Base;
+ typedef typename Base::Index Index;
+
+ sparse_solve_retval_base(const DecompositionType& dec, const Rhs& rhs)
+ : m_dec(dec), m_rhs(rhs)
+ {}
+
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_rhs.cols(); }
+ inline const DecompositionType& dec() const { return m_dec; }
+ inline const RhsNestedCleaned& rhs() const { return m_rhs; }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ static_cast<const sparse_solve_retval<DecompositionType,Rhs>*>(this)->evalTo(dst);
+ }
+
+ protected:
+ const DecompositionType& m_dec;
+ const typename Rhs::Nested m_rhs;
+};
+
+#define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType,Rhs) \
+ typedef typename DecompositionType::MatrixType MatrixType; \
+ typedef typename MatrixType::Scalar Scalar; \
+ typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
+ typedef Eigen::internal::sparse_solve_retval_base<DecompositionType,Rhs> Base; \
+ using Base::dec; \
+ using Base::rhs; \
+ using Base::rows; \
+ using Base::cols; \
+ sparse_solve_retval(const DecompositionType& dec, const Rhs& rhs) \
+ : Base(dec, rhs) {}
+
+
+
+template<typename DecompositionType, typename Rhs, typename Guess> struct solve_retval_with_guess;
+
+template<typename DecompositionType, typename Rhs, typename Guess>
+struct traits<solve_retval_with_guess<DecompositionType, Rhs, Guess> >
+{
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef Matrix<typename Rhs::Scalar,
+ MatrixType::ColsAtCompileTime,
+ Rhs::ColsAtCompileTime,
+ Rhs::PlainObject::Options,
+ MatrixType::MaxColsAtCompileTime,
+ Rhs::MaxColsAtCompileTime> ReturnType;
+};
+
+template<typename DecompositionType, typename Rhs, typename Guess> struct solve_retval_with_guess
+ : public ReturnByValue<solve_retval_with_guess<DecompositionType, Rhs, Guess> >
+{
+ typedef typename DecompositionType::Index Index;
+
+ solve_retval_with_guess(const DecompositionType& dec, const Rhs& rhs, const Guess& guess)
+ : m_dec(dec), m_rhs(rhs), m_guess(guess)
+ {}
+
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_rhs.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ dst = m_guess;
+ m_dec._solveWithGuess(m_rhs,dst);
+ }
+
+ protected:
+ const DecompositionType& m_dec;
+ const typename Rhs::Nested m_rhs;
+ const typename Guess::Nested m_guess;
+};
+
+} // namepsace internal
+
+#endif // EIGEN_SPARSE_SOLVE_H