aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2010-06-18 11:28:30 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2010-06-18 11:28:30 +0200
commitece48a645051a9984a78a3197027c9c861a0c702 (patch)
treec52c6400b2a839f62877c168c193fc024ccf68d6 /unsupported/Eigen
parent22d07ec2e3324d09c7dff36c9642f0ed74f3e994 (diff)
split the Sparse module into multiple ones, and move non stable parts to unsupported/
(see the ML for details)
Diffstat (limited to 'unsupported/Eigen')
-rw-r--r--unsupported/Eigen/CholmodSupport33
-rw-r--r--unsupported/Eigen/SparseExtra67
-rw-r--r--unsupported/Eigen/SuperLUSupport44
-rw-r--r--unsupported/Eigen/TaucsSupport49
-rw-r--r--unsupported/Eigen/UmfPackSupport28
-rw-r--r--unsupported/Eigen/src/CMakeLists.txt1
-rw-r--r--unsupported/Eigen/src/SparseExtra/CholmodSupport.h243
-rw-r--r--unsupported/Eigen/src/SparseExtra/RandomSetter.h340
-rw-r--r--unsupported/Eigen/src/SparseExtra/SparseLDLT.h348
-rw-r--r--unsupported/Eigen/src/SparseExtra/SparseLLT.h203
-rw-r--r--unsupported/Eigen/src/SparseExtra/SparseLU.h162
-rw-r--r--unsupported/Eigen/src/SparseExtra/SuperLUSupport.h651
-rw-r--r--unsupported/Eigen/src/SparseExtra/TaucsSupport.h219
-rw-r--r--unsupported/Eigen/src/SparseExtra/UmfPackSupport.h289
14 files changed, 2677 insertions, 0 deletions
diff --git a/unsupported/Eigen/CholmodSupport b/unsupported/Eigen/CholmodSupport
new file mode 100644
index 000000000..e1c50e536
--- /dev/null
+++ b/unsupported/Eigen/CholmodSupport
@@ -0,0 +1,33 @@
+#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H
+#define EIGEN_CHOLMODSUPPORT_MODULE_H
+
+#include "SparseExtra"
+
+#include "src/Core/util/DisableMSVCWarnings.h"
+
+#ifdef EIGEN_CHOLMOD_SUPPORT
+ extern "C" {
+ #include <cholmod.h>
+ }
+#endif
+
+namespace Eigen {
+
+/** \defgroup CholmodSupport_Module Cholmod Support module
+ *
+ * \nonstableyet
+ *
+ *
+ * \code
+ * #include <Eigen/CholmodSupport>
+ * \endcode
+ */
+
+#include "src/SparseExtra/CholmodSupport.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/EnableMSVCWarnings.h"
+
+#endif // EIGEN_CHOLMODSUPPORT_MODULE_H
+
diff --git a/unsupported/Eigen/SparseExtra b/unsupported/Eigen/SparseExtra
new file mode 100644
index 000000000..ba44a9c42
--- /dev/null
+++ b/unsupported/Eigen/SparseExtra
@@ -0,0 +1,67 @@
+#ifndef EIGEN_SPARSE_EXTRA_MODULE_H
+#define EIGEN_SPARSE_EXTRA_MODULE_H
+
+#include "../Eigen/Sparse"
+
+#include "src/Core/util/DisableMSVCWarnings.h"
+
+#include <vector>
+#include <map>
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
+
+#ifdef EIGEN_GOOGLEHASH_SUPPORT
+ #include <google/dense_hash_map>
+#endif
+
+namespace Eigen {
+
+/** \defgroup SparseExtra_Module SparseExtra module
+ *
+ * This module contains some experimental features extending the sparse module.
+ *
+ * \code
+ * #include <Eigen/SparseExtra>
+ * \endcode
+ */
+
+enum SparseBackend {
+ DefaultBackend,
+ Taucs,
+ Cholmod,
+ SuperLU,
+ UmfPack
+};
+
+// solver flags
+enum {
+ CompleteFactorization = 0x0000, // the default
+ IncompleteFactorization = 0x0001,
+ MemoryEfficient = 0x0002,
+
+ // For LLT Cholesky:
+ SupernodalMultifrontal = 0x0010,
+ SupernodalLeftLooking = 0x0020,
+
+ // Ordering methods:
+ NaturalOrdering = 0x0100, // the default
+ MinimumDegree_AT_PLUS_A = 0x0200,
+ MinimumDegree_ATA = 0x0300,
+ ColApproxMinimumDegree = 0x0400,
+ Metis = 0x0500,
+ Scotch = 0x0600,
+ Chaco = 0x0700,
+ OrderingMask = 0x0f00
+};
+
+#include "src/SparseExtra/RandomSetter.h"
+#include "src/SparseExtra/SparseLLT.h"
+#include "src/SparseExtra/SparseLDLT.h"
+#include "src/SparseExtra/SparseLU.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/EnableMSVCWarnings.h"
+
+#endif // EIGEN_SPARSE_EXTRA_MODULE_H
diff --git a/unsupported/Eigen/SuperLUSupport b/unsupported/Eigen/SuperLUSupport
new file mode 100644
index 000000000..6a2a3b15a
--- /dev/null
+++ b/unsupported/Eigen/SuperLUSupport
@@ -0,0 +1,44 @@
+#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H
+#define EIGEN_SUPERLUSUPPORT_MODULE_H
+
+#include "SparseExtra"
+
+#include "src/Core/util/DisableMSVCWarnings.h"
+
+typedef int int_t;
+#include <slu_Cnames.h>
+#include <supermatrix.h>
+#include <slu_util.h>
+
+namespace SuperLU_S {
+#include <slu_sdefs.h>
+}
+namespace SuperLU_D {
+#include <slu_ddefs.h>
+}
+namespace SuperLU_C {
+#include <slu_cdefs.h>
+}
+namespace SuperLU_Z {
+#include <slu_zdefs.h>
+}
+namespace Eigen { struct SluMatrix; }
+
+namespace Eigen {
+
+/** \defgroup SuperLUSupport_Module Super LU support
+ *
+ * \nonstableyet
+ *
+ * \code
+ * #include <Eigen/SuperLUSupport>
+ * \endcode
+ */
+
+#include "src/SparseExtra/SuperLUSupport.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/EnableMSVCWarnings.h"
+
+#endif // EIGEN_SUPERLUSUPPORT_MODULE_H
diff --git a/unsupported/Eigen/TaucsSupport b/unsupported/Eigen/TaucsSupport
new file mode 100644
index 000000000..93e4bd7aa
--- /dev/null
+++ b/unsupported/Eigen/TaucsSupport
@@ -0,0 +1,49 @@
+#ifndef EIGEN_TAUCSSUPPORT_MODULE_H
+#define EIGEN_TAUCSSUPPORT_MODULE_H
+
+#include "SparseExtra"
+
+#include "src/Core/util/DisableMSVCWarnings.h"
+
+// taucs.h declares a lot of mess
+#define isnan
+#define finite
+#define isinf
+extern "C" {
+ #include <taucs.h>
+}
+#undef isnan
+#undef finite
+#undef isinf
+
+#ifdef min
+ #undef min
+#endif
+#ifdef max
+ #undef max
+#endif
+#ifdef complex
+ #undef complex
+#endif
+
+namespace Eigen {
+
+/** \defgroup TaucsSupport_Module Taucs support module
+ *
+ * \nonstableyet
+ *
+ *
+ * \code
+ * #include <Eigen/TaucsSupport>
+ * \endcode
+ */
+
+#ifdef EIGEN_TAUCS_SUPPORT
+# include "src/SparseExtra/TaucsSupport.h"
+#endif
+
+} // namespace Eigen
+
+#include "src/Core/util/EnableMSVCWarnings.h"
+
+#endif // EIGEN_TAUCSSUPPORT_MODULE_H
diff --git a/unsupported/Eigen/UmfPackSupport b/unsupported/Eigen/UmfPackSupport
new file mode 100644
index 000000000..6fcf40ec0
--- /dev/null
+++ b/unsupported/Eigen/UmfPackSupport
@@ -0,0 +1,28 @@
+#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H
+#define EIGEN_UMFPACKSUPPORT_MODULE_H
+
+#include "SparseExtra"
+
+#include "src/Core/util/DisableMSVCWarnings.h"
+
+#include <umfpack.h>
+
+namespace Eigen {
+
+/** \defgroup Sparse_Module Sparse module
+ *
+ * \nonstableyet
+ *
+ *
+ * \code
+ * #include <Eigen/UmfPackSupport>
+ * \endcode
+ */
+
+#include "src/SparseExtra/UmfPackSupport.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/EnableMSVCWarnings.h"
+
+#endif // EIGEN_UMFPACKSUPPORT_MODULE_H
diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt
index 035a84ca8..4578c2e85 100644
--- a/unsupported/Eigen/src/CMakeLists.txt
+++ b/unsupported/Eigen/src/CMakeLists.txt
@@ -6,3 +6,4 @@ ADD_SUBDIRECTORY(MoreVectorization)
# ADD_SUBDIRECTORY(Skyline)
ADD_SUBDIRECTORY(MatrixFunctions)
ADD_SUBDIRECTORY(Polynomials)
+ADD_SUBDIRECTORY(SparseExtra)
diff --git a/unsupported/Eigen/src/SparseExtra/CholmodSupport.h b/unsupported/Eigen/src/SparseExtra/CholmodSupport.h
new file mode 100644
index 000000000..92e79f2a6
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/CholmodSupport.h
@@ -0,0 +1,243 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CHOLMODSUPPORT_H
+#define EIGEN_CHOLMODSUPPORT_H
+
+template<typename Scalar, typename CholmodType>
+void ei_cholmod_configure_matrix(CholmodType& mat)
+{
+ if (ei_is_same_type<Scalar,float>::ret)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = CHOLMOD_SINGLE;
+ }
+ else if (ei_is_same_type<Scalar,double>::ret)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = CHOLMOD_DOUBLE;
+ }
+ else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = CHOLMOD_SINGLE;
+ }
+ else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = CHOLMOD_DOUBLE;
+ }
+ else
+ {
+ ei_assert(false && "Scalar type not supported by CHOLMOD");
+ }
+}
+
+template<typename MatrixType>
+cholmod_sparse ei_asCholmodMatrix(MatrixType& mat)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ cholmod_sparse res;
+ res.nzmax = mat.nonZeros();
+ res.nrow = mat.rows();;
+ res.ncol = mat.cols();
+ res.p = mat._outerIndexPtr();
+ res.i = mat._innerIndexPtr();
+ res.x = mat._valuePtr();
+ res.xtype = CHOLMOD_REAL;
+ res.itype = CHOLMOD_INT;
+ res.sorted = 1;
+ res.packed = 1;
+ res.dtype = 0;
+ res.stype = -1;
+
+ ei_cholmod_configure_matrix<Scalar>(res);
+
+
+ if (MatrixType::Flags & SelfAdjoint)
+ {
+ if (MatrixType::Flags & Upper)
+ res.stype = 1;
+ else if (MatrixType::Flags & Lower)
+ res.stype = -1;
+ else
+ res.stype = 0;
+ }
+ else
+ res.stype = -1; // by default we consider the lower part
+
+ return res;
+}
+
+template<typename Derived>
+cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
+{
+ EIGEN_STATIC_ASSERT((ei_traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ typedef typename Derived::Scalar Scalar;
+
+ cholmod_dense res;
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ res.nzmax = res.nrow * res.ncol;
+ res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
+ res.x = mat.derived().data();
+ res.z = 0;
+
+ ei_cholmod_configure_matrix<Scalar>(res);
+
+ return res;
+}
+
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> ei_map_cholmod(cholmod_sparse& cm)
+{
+ return MappedSparseMatrix<Scalar,Flags,Index>
+ (cm.nrow, cm.ncol, reinterpret_cast<Index*>(cm.p)[cm.ncol],
+ reinterpret_cast<Index*>(cm.p), reinterpret_cast<Index*>(cm.i),reinterpret_cast<Scalar*>(cm.x) );
+}
+
+template<typename MatrixType>
+class SparseLLT<MatrixType,Cholmod> : public SparseLLT<MatrixType>
+{
+ protected:
+ typedef SparseLLT<MatrixType> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef typename Base::CholMatrixType CholMatrixType;
+ typedef typename MatrixType::Index Index;
+ using Base::MatrixLIsDirty;
+ using Base::SupernodalFactorIsDirty;
+ using Base::m_flags;
+ using Base::m_matrix;
+ using Base::m_status;
+
+ public:
+
+ SparseLLT(int flags = 0)
+ : Base(flags), m_cholmodFactor(0)
+ {
+ cholmod_start(&m_cholmod);
+ }
+
+ SparseLLT(const MatrixType& matrix, int flags = 0)
+ : Base(flags), m_cholmodFactor(0)
+ {
+ cholmod_start(&m_cholmod);
+ compute(matrix);
+ }
+
+ ~SparseLLT()
+ {
+ if (m_cholmodFactor)
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ cholmod_finish(&m_cholmod);
+ }
+
+ inline const CholMatrixType& matrixL() const;
+
+ template<typename Derived>
+ bool solveInPlace(MatrixBase<Derived> &b) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+ mutable cholmod_common m_cholmod;
+ cholmod_factor* m_cholmodFactor;
+};
+
+template<typename MatrixType>
+void SparseLLT<MatrixType,Cholmod>::compute(const MatrixType& a)
+{
+ if (m_cholmodFactor)
+ {
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ m_cholmodFactor = 0;
+ }
+
+ cholmod_sparse A = ei_asCholmodMatrix(const_cast<MatrixType&>(a));
+// m_cholmod.supernodal = CHOLMOD_AUTO;
+ // TODO
+// if (m_flags&IncompleteFactorization)
+// {
+// m_cholmod.nmethods = 1;
+// m_cholmod.method[0].ordering = CHOLMOD_NATURAL;
+// m_cholmod.postorder = 0;
+// }
+// else
+// {
+// m_cholmod.nmethods = 1;
+// m_cholmod.method[0].ordering = CHOLMOD_NATURAL;
+// m_cholmod.postorder = 0;
+// }
+// m_cholmod.final_ll = 1;
+ m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
+ cholmod_factorize(&A, m_cholmodFactor, &m_cholmod);
+
+ m_status = (m_status & ~SupernodalFactorIsDirty) | MatrixLIsDirty;
+}
+
+template<typename MatrixType>
+inline const typename SparseLLT<MatrixType,Cholmod>::CholMatrixType&
+SparseLLT<MatrixType,Cholmod>::matrixL() const
+{
+ if (m_status & MatrixLIsDirty)
+ {
+ ei_assert(!(m_status & SupernodalFactorIsDirty));
+
+ cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod);
+ const_cast<typename Base::CholMatrixType&>(m_matrix) = ei_map_cholmod<Scalar,ColMajor,Index>(*cmRes);
+ free(cmRes);
+
+ m_status = (m_status & ~MatrixLIsDirty);
+ }
+ return m_matrix;
+}
+
+template<typename MatrixType>
+template<typename Derived>
+bool SparseLLT<MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ const Index size = m_cholmodFactor->n;
+ ei_assert(size==b.rows());
+
+ // this uses Eigen's triangular sparse solver
+// if (m_status & MatrixLIsDirty)
+// matrixL();
+// Base::solveInPlace(b);
+ // as long as our own triangular sparse solver is not fully optimal,
+ // let's use CHOLMOD's one:
+ cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(b);
+ //cholmod_dense* x = cholmod_solve(CHOLMOD_LDLt, m_cholmodFactor, &cdb, &m_cholmod);
+ cholmod_dense* x = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &cdb, &m_cholmod);
+ if(!x)
+ {
+ std::cerr << "Eigen: cholmod_solve failed\n";
+ return false;
+ }
+ b = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x),b.rows());
+ cholmod_free_dense(&x, &m_cholmod);
+ return true;
+}
+
+#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
new file mode 100644
index 000000000..18777e23d
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
@@ -0,0 +1,340 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_RANDOMSETTER_H
+#define EIGEN_RANDOMSETTER_H
+
+/** Represents a std::map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct StdMapTraits
+{
+ typedef int KeyType;
+ typedef std::map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 1
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+
+#ifdef EIGEN_UNORDERED_MAP_SUPPORT
+/** Represents a std::unordered_map
+ *
+ * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file
+ * yourself making sure that unordered_map is defined in the std namespace.
+ *
+ * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do:
+ * \code
+ * #include <tr1/unordered_map>
+ * #define EIGEN_UNORDERED_MAP_SUPPORT
+ * namespace std {
+ * using std::tr1::unordered_map;
+ * }
+ * \endcode
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct StdUnorderedMapTraits
+{
+ typedef int KeyType;
+ typedef std::unordered_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif // EIGEN_UNORDERED_MAP_SUPPORT
+
+#ifdef _DENSE_HASH_MAP_H_
+/** Represents a google::dense_hash_map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct GoogleDenseHashMapTraits
+{
+ typedef int KeyType;
+ typedef google::dense_hash_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type& map, const KeyType& k)
+ { map.set_empty_key(k); }
+};
+#endif
+
+#ifdef _SPARSE_HASH_MAP_H_
+/** Represents a google::sparse_hash_map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct GoogleSparseHashMapTraits
+{
+ typedef int KeyType;
+ typedef google::sparse_hash_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif
+
+/** \class RandomSetter
+ *
+ * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
+ *
+ * \param SparseMatrixType the type of the sparse matrix we are updating
+ * \param MapTraits a traits class representing the map implementation used for the temporary sparse storage.
+ * Its default value depends on the system.
+ * \param OuterPacketBits defines the number of rows (or columns) manage by a single map object
+ * as a power of two exponent.
+ *
+ * This class temporarily represents a sparse matrix object using a generic map implementation allowing for
+ * efficient random access. The conversion from the compressed representation to a hash_map object is performed
+ * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy
+ * suggest the use of nested blocks as in this example:
+ *
+ * \code
+ * SparseMatrix<double> m(rows,cols);
+ * {
+ * RandomSetter<SparseMatrix<double> > w(m);
+ * // don't use m but w instead with read/write random access to the coefficients:
+ * for(;;)
+ * w(rand(),rand()) = rand;
+ * }
+ * // when w is deleted, the data are copied back to m
+ * // and m is ready to use.
+ * \endcode
+ *
+ * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would
+ * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter
+ * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order.
+ * To reach optimal performance, this value should be adjusted according to the average number of nonzeros
+ * per rows/columns.
+ *
+ * The possible values for the template parameter MapTraits are:
+ * - \b StdMapTraits: corresponds to std::map. (does not perform very well)
+ * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC)
+ * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption)
+ * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance)
+ *
+ * The default map implementation depends on the availability, and the preferred order is:
+ * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits.
+ *
+ * For performance and memory consumption reasons it is highly recommended to use one of
+ * the Google's hash_map implementation. To enable the support for them, you have two options:
+ * - \#include <google/dense_hash_map> yourself \b before Eigen/Sparse header
+ * - define EIGEN_GOOGLEHASH_SUPPORT
+ * In the later case the inclusion of <google/dense_hash_map> is made for you.
+ *
+ * \see http://code.google.com/p/google-sparsehash/
+ */
+template<typename SparseMatrixType,
+ template <typename T> class MapTraits =
+#if defined _DENSE_HASH_MAP_H_
+ GoogleDenseHashMapTraits
+#elif defined _HASH_MAP
+ GnuHashMapTraits
+#else
+ StdMapTraits
+#endif
+ ,int OuterPacketBits = 6>
+class RandomSetter
+{
+ typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::Index Index;
+
+ struct ScalarWrapper
+ {
+ ScalarWrapper() : value(0) {}
+ Scalar value;
+ };
+ typedef typename MapTraits<ScalarWrapper>::KeyType KeyType;
+ typedef typename MapTraits<ScalarWrapper>::Type HashMapType;
+ static const int OuterPacketMask = (1 << OuterPacketBits) - 1;
+ enum {
+ SwapStorage = 1 - MapTraits<ScalarWrapper>::IsSorted,
+ TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0,
+ SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor,
+ IsUpper = SparseMatrixType::Flags & Upper,
+ IsLower = SparseMatrixType::Flags & Lower
+ };
+
+ public:
+
+ /** Constructs a random setter object from the sparse matrix \a target
+ *
+ * Note that the initial value of \a target are imported. If you want to re-set
+ * a sparse matrix from scratch, then you must set it to zero first using the
+ * setZero() function.
+ */
+ inline RandomSetter(SparseMatrixType& target)
+ : mp_target(&target)
+ {
+ const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize();
+ const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize();
+ m_outerPackets = outerSize >> OuterPacketBits;
+ if (outerSize&OuterPacketMask)
+ m_outerPackets += 1;
+ m_hashmaps = new HashMapType[m_outerPackets];
+ // compute number of bits needed to store inner indices
+ Index aux = innerSize - 1;
+ m_keyBitsOffset = 0;
+ while (aux)
+ {
+ ++m_keyBitsOffset;
+ aux = aux >> 1;
+ }
+ KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset));
+ for (Index k=0; k<m_outerPackets; ++k)
+ MapTraits<ScalarWrapper>::setInvalidKey(m_hashmaps[k],ik);
+
+ // insert current coeffs
+ for (Index j=0; j<mp_target->outerSize(); ++j)
+ for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it)
+ (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value();
+ }
+
+ /** Destructor updating back the sparse matrix target */
+ ~RandomSetter()
+ {
+ KeyType keyBitsMask = (1<<m_keyBitsOffset)-1;
+ if (!SwapStorage) // also means the map is sorted
+ {
+ mp_target->setZero();
+ mp_target->reserve(nonZeros());
+ Index prevOuter = -1;
+ for (Index k=0; k<m_outerPackets; ++k)
+ {
+ const Index outerOffset = (1<<OuterPacketBits) * k;
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const Index outer = (it->first >> m_keyBitsOffset) + outerOffset;
+ const Index inner = it->first & keyBitsMask;
+ if (prevOuter!=outer)
+ {
+ for (Index j=prevOuter+1;j<=outer;++j)
+ mp_target->startVec(j);
+ prevOuter = outer;
+ }
+ mp_target->insertBackByOuterInner(outer, inner) = it->second.value;
+ }
+ }
+ mp_target->finalize();
+ }
+ else
+ {
+ VectorXi positions(mp_target->outerSize());
+ positions.setZero();
+ // pass 1
+ for (Index k=0; k<m_outerPackets; ++k)
+ {
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const Index outer = it->first & keyBitsMask;
+ ++positions[outer];
+ }
+ }
+ // prefix sum
+ Index count = 0;
+ for (Index j=0; j<mp_target->outerSize(); ++j)
+ {
+ Index tmp = positions[j];
+ mp_target->_outerIndexPtr()[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ mp_target->_outerIndexPtr()[mp_target->outerSize()] = count;
+ mp_target->resizeNonZeros(count);
+ // pass 2
+ for (Index k=0; k<m_outerPackets; ++k)
+ {
+ const Index outerOffset = (1<<OuterPacketBits) * k;
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const Index inner = (it->first >> m_keyBitsOffset) + outerOffset;
+ const Index outer = it->first & keyBitsMask;
+ // sorted insertion
+ // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients,
+ // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a
+ // small fraction of them have to be sorted, whence the following simple procedure:
+ Index posStart = mp_target->_outerIndexPtr()[outer];
+ Index i = (positions[outer]++) - 1;
+ while ( (i >= posStart) && (mp_target->_innerIndexPtr()[i] > inner) )
+ {
+ mp_target->_valuePtr()[i+1] = mp_target->_valuePtr()[i];
+ mp_target->_innerIndexPtr()[i+1] = mp_target->_innerIndexPtr()[i];
+ --i;
+ }
+ mp_target->_innerIndexPtr()[i+1] = inner;
+ mp_target->_valuePtr()[i+1] = it->second.value;
+ }
+ }
+ }
+ delete[] m_hashmaps;
+ }
+
+ /** \returns a reference to the coefficient at given coordinates \a row, \a col */
+ Scalar& operator() (Index row, Index col)
+ {
+ ei_assert(((!IsUpper) || (row<=col)) && "Invalid access to an upper triangular matrix");
+ ei_assert(((!IsLower) || (col<=row)) && "Invalid access to an upper triangular matrix");
+ const Index outer = SetterRowMajor ? row : col;
+ const Index inner = SetterRowMajor ? col : row;
+ const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
+ const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
+ const KeyType key = (KeyType(outerMinor)<<m_keyBitsOffset) | inner;
+ return m_hashmaps[outerMajor][key].value;
+ }
+
+ /** \returns the number of non zero coefficients
+ *
+ * \note According to the underlying map/hash_map implementation,
+ * this function might be quite expensive.
+ */
+ Index nonZeros() const
+ {
+ Index nz = 0;
+ for (Index k=0; k<m_outerPackets; ++k)
+ nz += static_cast<Index>(m_hashmaps[k].size());
+ return nz;
+ }
+
+
+ protected:
+
+ HashMapType* m_hashmaps;
+ SparseMatrixType* mp_target;
+ Index m_outerPackets;
+ unsigned char m_keyBitsOffset;
+};
+
+#endif // EIGEN_RANDOMSETTER_H
diff --git a/unsupported/Eigen/src/SparseExtra/SparseLDLT.h b/unsupported/Eigen/src/SparseExtra/SparseLDLT.h
new file mode 100644
index 000000000..a6785d0af
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/SparseLDLT.h
@@ -0,0 +1,348 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+/*
+
+NOTE: the _symbolic, and _numeric functions has been adapted from
+ the LDL library:
+
+LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
+
+LDL License:
+
+ Your use or distribution of LDL or any modified version of
+ LDL implies that you agree to this License.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+ Permission is hereby granted to use or copy this program under the
+ terms of the GNU LGPL, provided that the Copyright, this License,
+ and the Availability of the original version is retained on all copies.
+ User documentation of any code that uses this code or any modified
+ version of this code must cite the Copyright, this License, the
+ Availability note, and "Used by permission." Permission to modify
+ the code and to distribute modified code is granted, provided the
+ Copyright, this License, and the Availability note are retained,
+ and a notice that the code was modified is included.
+ */
+
+#ifndef EIGEN_SPARSELDLT_H
+#define EIGEN_SPARSELDLT_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseLDLT
+ *
+ * \brief LDLT Cholesky decomposition of a sparse matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LDLT Cholesky decomposition
+ *
+ * \warning the upper triangular part has to be specified. The rest of the matrix is not used. The input matrix must be column major.
+ *
+ * \sa class LDLT, class LDLT
+ */
+template<typename MatrixType, int Backend = DefaultBackend>
+class SparseLDLT
+{
+ protected:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef SparseMatrix<Scalar> CholMatrixType;
+ typedef Matrix<Scalar,MatrixType::ColsAtCompileTime,1> VectorType;
+
+ enum {
+ SupernodalFactorIsDirty = 0x10000,
+ MatrixLIsDirty = 0x20000
+ };
+
+ public:
+
+ /** Creates a dummy LDLT factorization object with flags \a flags. */
+ SparseLDLT(int flags = 0)
+ : m_flags(flags), m_status(0)
+ {
+ ei_assert((MatrixType::Flags&RowMajorBit)==0);
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
+ }
+
+ /** Creates a LDLT object and compute the respective factorization of \a matrix using
+ * flags \a flags. */
+ SparseLDLT(const MatrixType& matrix, int flags = 0)
+ : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
+ {
+ ei_assert((MatrixType::Flags&RowMajorBit)==0);
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
+ compute(matrix);
+ }
+
+ /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
+ *
+ * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * factorization with fewer non zero coefficients. Such approximate factors are especially
+ * useful to initialize an iterative solver.
+ *
+ * \warning if precision is greater that zero, the LDLT factorization is not guaranteed to succeed
+ * even if the matrix is positive definite.
+ *
+ * Note that the exact meaning of this parameter might depends on the actual
+ * backend. Moreover, not all backends support this feature.
+ *
+ * \sa precision() */
+ void setPrecision(RealScalar v) { m_precision = v; }
+
+ /** \returns the current precision.
+ *
+ * \sa setPrecision() */
+ RealScalar precision() const { return m_precision; }
+
+ /** Sets the flags. Possible values are:
+ * - CompleteFactorization
+ * - IncompleteFactorization
+ * - MemoryEfficient (hint to use the memory most efficient method offered by the backend)
+ * - SupernodalMultifrontal (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ * - SupernodalLeftLooking (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ *
+ * \sa flags() */
+ void settags(int f) { m_flags = f; }
+ /** \returns the current flags */
+ int flags() const { return m_flags; }
+
+ /** Computes/re-computes the LDLT factorization */
+ void compute(const MatrixType& matrix);
+
+ /** Perform a symbolic factorization */
+ void _symbolic(const MatrixType& matrix);
+ /** Perform the actual factorization using the previously
+ * computed symbolic factorization */
+ bool _numeric(const MatrixType& matrix);
+
+ /** \returns the lower triangular matrix L */
+ inline const CholMatrixType& matrixL(void) const { return m_matrix; }
+
+ /** \returns the coefficients of the diagonal matrix D */
+ inline VectorType vectorD(void) const { return m_diag; }
+
+ template<typename Derived>
+ bool solveInPlace(MatrixBase<Derived> &b) const;
+
+ /** \returns true if the factorization succeeded */
+ inline bool succeeded(void) const { return m_succeeded; }
+
+ protected:
+ CholMatrixType m_matrix;
+ VectorType m_diag;
+ VectorXi m_parent; // elimination tree
+ VectorXi m_nonZerosPerCol;
+// VectorXi m_w; // workspace
+ RealScalar m_precision;
+ int m_flags;
+ mutable int m_status;
+ bool m_succeeded;
+};
+
+/** Computes / recomputes the LDLT decomposition of matrix \a a
+ * using the default algorithm.
+ */
+template<typename MatrixType, int Backend>
+void SparseLDLT<MatrixType,Backend>::compute(const MatrixType& a)
+{
+ _symbolic(a);
+ m_succeeded = _numeric(a);
+}
+
+template<typename MatrixType, int Backend>
+void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
+{
+ assert(a.rows()==a.cols());
+ const Index size = a.rows();
+ m_matrix.resize(size, size);
+ m_parent.resize(size);
+ m_nonZerosPerCol.resize(size);
+ Index * tags = ei_aligned_stack_new(Index, size);
+
+ const Index* Ap = a._outerIndexPtr();
+ const Index* Ai = a._innerIndexPtr();
+ Index* Lp = m_matrix._outerIndexPtr();
+ const Index* P = 0;
+ Index* Pinv = 0;
+
+ if (P)
+ {
+ /* If P is present then compute Pinv, the inverse of P */
+ for (Index k = 0; k < size; ++k)
+ Pinv[P[k]] = k;
+ }
+ for (Index k = 0; k < size; ++k)
+ {
+ /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+ m_parent[k] = -1; /* parent of k is not yet known */
+ tags[k] = k; /* mark node k as visited */
+ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
+ Index kk = P ? P[k] : k; /* kth original, or permuted, column */
+ Index p2 = Ap[kk+1];
+ for (Index p = Ap[kk]; p < p2; ++p)
+ {
+ /* A (i,k) is nonzero (original or permuted A) */
+ Index i = Pinv ? Pinv[Ai[p]] : Ai[p];
+ if (i < k)
+ {
+ /* follow path from i to root of etree, stop at flagged node */
+ for (; tags[i] != k; i = m_parent[i])
+ {
+ /* find parent of i if not yet determined */
+ if (m_parent[i] == -1)
+ m_parent[i] = k;
+ ++m_nonZerosPerCol[i]; /* L (k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ }
+ }
+ }
+ /* construct Lp index array from m_nonZerosPerCol column counts */
+ Lp[0] = 0;
+ for (Index k = 0; k < size; ++k)
+ Lp[k+1] = Lp[k] + m_nonZerosPerCol[k];
+
+ m_matrix.resizeNonZeros(Lp[size]);
+ ei_aligned_stack_delete(Index, tags, size);
+}
+
+template<typename MatrixType, int Backend>
+bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
+{
+ assert(a.rows()==a.cols());
+ const Index size = a.rows();
+ assert(m_parent.size()==size);
+ assert(m_nonZerosPerCol.size()==size);
+
+ const Index* Ap = a._outerIndexPtr();
+ const Index* Ai = a._innerIndexPtr();
+ const Scalar* Ax = a._valuePtr();
+ const Index* Lp = m_matrix._outerIndexPtr();
+ Index* Li = m_matrix._innerIndexPtr();
+ Scalar* Lx = m_matrix._valuePtr();
+ m_diag.resize(size);
+
+ Scalar * y = ei_aligned_stack_new(Scalar, size);
+ Index * pattern = ei_aligned_stack_new(Index, size);
+ Index * tags = ei_aligned_stack_new(Index, size);
+
+ const Index* P = 0;
+ const Index* Pinv = 0;
+ bool ok = true;
+
+ for (Index k = 0; k < size; ++k)
+ {
+ /* compute nonzero pattern of kth row of L, in topological order */
+ y[k] = 0.0; /* Y(0:k) is now all zero */
+ Index top = size; /* stack for pattern is empty */
+ tags[k] = k; /* mark node k as visited */
+ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
+ Index kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */
+ Index p2 = Ap[kk+1];
+ for (Index p = Ap[kk]; p < p2; ++p)
+ {
+ Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */
+ if (i <= k)
+ {
+ y[i] += ei_conj(Ax[p]); /* scatter A(i,k) into Y (sum duplicates) */
+ Index len;
+ for (len = 0; tags[i] != k; i = m_parent[i])
+ {
+ pattern[len++] = i; /* L(k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ while (len > 0)
+ pattern[--top] = pattern[--len];
+ }
+ }
+
+ /* compute numerical values kth row of L (a sparse triangular solve) */
+ m_diag[k] = y[k]; /* get D(k,k) and clear Y(k) */
+ y[k] = 0.0;
+ for (; top < size; ++top)
+ {
+ Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
+ Scalar yi = (y[i]); /* get and clear Y(i) */
+ y[i] = 0.0;
+ Index p2 = Lp[i] + m_nonZerosPerCol[i];
+ Index p;
+ for (p = Lp[i]; p < p2; ++p)
+ y[Li[p]] -= ei_conj(Lx[p]) * (yi);
+ Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */
+ m_diag[k] -= l_ki * ei_conj(yi);
+ Li[p] = k; /* store L(k,i) in column form of L */
+ Lx[p] = (l_ki);
+ ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
+ }
+ if (m_diag[k] == 0.0)
+ {
+ ok = false; /* failure, D(k,k) is zero */
+ break;
+ }
+ }
+
+ ei_aligned_stack_delete(Scalar, y, size);
+ ei_aligned_stack_delete(Index, pattern, size);
+ ei_aligned_stack_delete(Index, tags, size);
+
+ return ok; /* success, diagonal of D is all nonzero */
+}
+
+/** Computes b = L^-T D^-1 L^-1 b */
+template<typename MatrixType, int Backend>
+template<typename Derived>
+bool SparseLDLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ const Index size = m_matrix.rows();
+ ei_assert(size==b.rows());
+ if (!m_succeeded)
+ return false;
+
+ if (m_matrix.nonZeros()>0) // otherwise L==I
+ m_matrix.template triangularView<UnitLower>().solveInPlace(b);
+ b = b.cwiseQuotient(m_diag);
+ if (m_matrix.nonZeros()>0) // otherwise L==I
+ m_matrix.adjoint().template triangularView<UnitUpper>().solveInPlace(b);
+
+ return true;
+}
+
+#endif // EIGEN_SPARSELDLT_H
diff --git a/unsupported/Eigen/src/SparseExtra/SparseLLT.h b/unsupported/Eigen/src/SparseExtra/SparseLLT.h
new file mode 100644
index 000000000..47d58f8e6
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/SparseLLT.h
@@ -0,0 +1,203 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSELLT_H
+#define EIGEN_SPARSELLT_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseLLT
+ *
+ * \brief LLT Cholesky decomposition of a sparse matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LLT Cholesky decomposition
+ *
+ * \sa class LLT, class LDLT
+ */
+template<typename MatrixType, int Backend = DefaultBackend>
+class SparseLLT
+{
+ protected:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef SparseMatrix<Scalar> CholMatrixType;
+
+ enum {
+ SupernodalFactorIsDirty = 0x10000,
+ MatrixLIsDirty = 0x20000
+ };
+
+ public:
+
+ /** Creates a dummy LLT factorization object with flags \a flags. */
+ SparseLLT(int flags = 0)
+ : m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
+ }
+
+ /** Creates a LLT object and compute the respective factorization of \a matrix using
+ * flags \a flags. */
+ SparseLLT(const MatrixType& matrix, int flags = 0)
+ : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
+ compute(matrix);
+ }
+
+ /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
+ *
+ * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * factorization with fewer non zero coefficients. Such approximate factors are especially
+ * useful to initialize an iterative solver.
+ *
+ * \warning if precision is greater that zero, the LLT factorization is not guaranteed to succeed
+ * even if the matrix is positive definite.
+ *
+ * Note that the exact meaning of this parameter might depends on the actual
+ * backend. Moreover, not all backends support this feature.
+ *
+ * \sa precision() */
+ void setPrecision(RealScalar v) { m_precision = v; }
+
+ /** \returns the current precision.
+ *
+ * \sa setPrecision() */
+ RealScalar precision() const { return m_precision; }
+
+ /** Sets the flags. Possible values are:
+ * - CompleteFactorization
+ * - IncompleteFactorization
+ * - MemoryEfficient (hint to use the memory most efficient method offered by the backend)
+ * - SupernodalMultifrontal (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ * - SupernodalLeftLooking (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ *
+ * \sa flags() */
+ void setFlags(int f) { m_flags = f; }
+ /** \returns the current flags */
+ int flags() const { return m_flags; }
+
+ /** Computes/re-computes the LLT factorization */
+ void compute(const MatrixType& matrix);
+
+ /** \returns the lower triangular matrix L */
+ inline const CholMatrixType& matrixL(void) const { return m_matrix; }
+
+ template<typename Derived>
+ bool solveInPlace(MatrixBase<Derived> &b) const;
+
+ /** \returns true if the factorization succeeded */
+ inline bool succeeded(void) const { return m_succeeded; }
+
+ protected:
+ CholMatrixType m_matrix;
+ RealScalar m_precision;
+ int m_flags;
+ mutable int m_status;
+ bool m_succeeded;
+};
+
+/** Computes / recomputes the LLT decomposition of matrix \a a
+ * using the default algorithm.
+ */
+template<typename MatrixType, int Backend>
+void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
+{
+ assert(a.rows()==a.cols());
+ const Index size = a.rows();
+ m_matrix.resize(size, size);
+
+ // allocate a temporary vector for accumulations
+ AmbiVector<Scalar,Index> tempVector(size);
+ RealScalar density = a.nonZeros()/RealScalar(size*size);
+
+ // TODO estimate the number of non zeros
+ m_matrix.setZero();
+ m_matrix.reserve(a.nonZeros()*2);
+ for (Index j = 0; j < size; ++j)
+ {
+ Scalar x = ei_real(a.coeff(j,j));
+
+ // TODO better estimate of the density !
+ tempVector.init(density>0.001? IsDense : IsSparse);
+ tempVector.setBounds(j+1,size);
+ tempVector.setZero();
+ // init with current matrix a
+ {
+ typename MatrixType::InnerIterator it(a,j);
+ ei_assert(it.index()==j &&
+ "matrix must has non zero diagonal entries and only the lower triangular part must be stored");
+ ++it; // skip diagonal element
+ for (; it; ++it)
+ tempVector.coeffRef(it.index()) = it.value();
+ }
+ for (Index k=0; k<j+1; ++k)
+ {
+ typename CholMatrixType::InnerIterator it(m_matrix, k);
+ while (it && it.index()<j)
+ ++it;
+ if (it && it.index()==j)
+ {
+ Scalar y = it.value();
+ x -= ei_abs2(y);
+ ++it; // skip j-th element, and process remaining column coefficients
+ tempVector.restart();
+ for (; it; ++it)
+ {
+ tempVector.coeffRef(it.index()) -= it.value() * y;
+ }
+ }
+ }
+ // copy the temporary vector to the respective m_matrix.col()
+ // while scaling the result by 1/real(x)
+ RealScalar rx = ei_sqrt(ei_real(x));
+ m_matrix.insert(j,j) = rx; // FIXME use insertBack
+ Scalar y = Scalar(1)/rx;
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector, m_precision*rx); it; ++it)
+ {
+ // FIXME use insertBack
+ m_matrix.insert(it.index(), j) = it.value() * y;
+ }
+ }
+ m_matrix.finalize();
+}
+
+/** Computes b = L^-T L^-1 b */
+template<typename MatrixType, int Backend>
+template<typename Derived>
+bool SparseLLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ const Index size = m_matrix.rows();
+ ei_assert(size==b.rows());
+
+ m_matrix.template triangularView<Lower>().solveInPlace(b);
+ m_matrix.adjoint().template triangularView<Upper>().solveInPlace(b);
+
+ return true;
+}
+
+#endif // EIGEN_SPARSELLT_H
diff --git a/unsupported/Eigen/src/SparseExtra/SparseLU.h b/unsupported/Eigen/src/SparseExtra/SparseLU.h
new file mode 100644
index 000000000..0211b78e2
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/SparseLU.h
@@ -0,0 +1,162 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSELU_H
+#define EIGEN_SPARSELU_H
+
+enum {
+ SvNoTrans = 0,
+ SvTranspose = 1,
+ SvAdjoint = 2
+};
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseLU
+ *
+ * \brief LU decomposition of a sparse matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LU factorization
+ *
+ * \sa class FullPivLU, class SparseLLT
+ */
+template<typename MatrixType, int Backend = DefaultBackend>
+class SparseLU
+{
+ protected:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef SparseMatrix<Scalar> LUMatrixType;
+
+ enum {
+ MatrixLUIsDirty = 0x10000
+ };
+
+ public:
+
+ /** Creates a dummy LU factorization object with flags \a flags. */
+ SparseLU(int flags = 0)
+ : m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
+ }
+
+ /** Creates a LU object and compute the respective factorization of \a matrix using
+ * flags \a flags. */
+ SparseLU(const MatrixType& matrix, int flags = 0)
+ : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
+ compute(matrix);
+ }
+
+ /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
+ *
+ * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * factorization with fewer non zero coefficients. Such approximate factors are especially
+ * useful to initialize an iterative solver.
+ *
+ * Note that the exact meaning of this parameter might depends on the actual
+ * backend. Moreover, not all backends support this feature.
+ *
+ * \sa precision() */
+ void setPrecision(RealScalar v) { m_precision = v; }
+
+ /** \returns the current precision.
+ *
+ * \sa setPrecision() */
+ RealScalar precision() const { return m_precision; }
+
+ /** Sets the flags. Possible values are:
+ * - CompleteFactorization
+ * - IncompleteFactorization
+ * - MemoryEfficient
+ * - one of the ordering methods
+ * - etc...
+ *
+ * \sa flags() */
+ void setFlags(int f) { m_flags = f; }
+ /** \returns the current flags */
+ int flags() const { return m_flags; }
+
+ void setOrderingMethod(int m)
+ {
+ ei_assert( (m&~OrderingMask) == 0 && m!=0 && "invalid ordering method");
+ m_flags = m_flags&~OrderingMask | m&OrderingMask;
+ }
+
+ int orderingMethod() const
+ {
+ return m_flags&OrderingMask;
+ }
+
+ /** Computes/re-computes the LU factorization */
+ void compute(const MatrixType& matrix);
+
+ /** \returns the lower triangular matrix L */
+ //inline const MatrixType& matrixL() const { return m_matrixL; }
+
+ /** \returns the upper triangular matrix U */
+ //inline const MatrixType& matrixU() const { return m_matrixU; }
+
+ template<typename BDerived, typename XDerived>
+ bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x,
+ const int transposed = SvNoTrans) const;
+
+ /** \returns true if the factorization succeeded */
+ inline bool succeeded(void) const { return m_succeeded; }
+
+ protected:
+ RealScalar m_precision;
+ int m_flags;
+ mutable int m_status;
+ bool m_succeeded;
+};
+
+/** Computes / recomputes the LU decomposition of matrix \a a
+ * using the default algorithm.
+ */
+template<typename MatrixType, int Backend>
+void SparseLU<MatrixType,Backend>::compute(const MatrixType& )
+{
+ ei_assert(false && "not implemented yet");
+}
+
+/** Computes *x = U^-1 L^-1 b
+ *
+ * If \a transpose is set to SvTranspose or SvAdjoint, the solution
+ * of the transposed/adjoint system is computed instead.
+ *
+ * Not all backends implement the solution of the transposed or
+ * adjoint system.
+ */
+template<typename MatrixType, int Backend>
+template<typename BDerived, typename XDerived>
+bool SparseLU<MatrixType,Backend>::solve(const MatrixBase<BDerived> &, MatrixBase<XDerived>* , const int ) const
+{
+ ei_assert(false && "not implemented yet");
+ return false;
+}
+
+#endif // EIGEN_SPARSELU_H
diff --git a/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h b/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h
new file mode 100644
index 000000000..9766d8fa2
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h
@@ -0,0 +1,651 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SUPERLUSUPPORT_H
+#define EIGEN_SUPERLUSUPPORT_H
+
+// declaration of gssvx taken from GMM++
+#define DECL_GSSVX(NAMESPACE,FNAME,FLOATTYPE,KEYTYPE) \
+ inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \
+ int *perm_c, int *perm_r, int *etree, char *equed, \
+ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
+ SuperMatrix *U, void *work, int lwork, \
+ SuperMatrix *B, SuperMatrix *X, \
+ FLOATTYPE *recip_pivot_growth, \
+ FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \
+ SuperLUStat_t *stats, int *info, KEYTYPE) { \
+ using namespace NAMESPACE; \
+ mem_usage_t mem_usage; \
+ NAMESPACE::FNAME(options, A, perm_c, perm_r, etree, equed, R, C, L, \
+ U, work, lwork, B, X, recip_pivot_growth, rcond, \
+ ferr, berr, &mem_usage, stats, info); \
+ return mem_usage.for_lu; /* bytes used by the factor storage */ \
+ }
+
+DECL_GSSVX(SuperLU_S,sgssvx,float,float)
+DECL_GSSVX(SuperLU_C,cgssvx,float,std::complex<float>)
+DECL_GSSVX(SuperLU_D,dgssvx,double,double)
+DECL_GSSVX(SuperLU_Z,zgssvx,double,std::complex<double>)
+
+#ifdef MILU_ALPHA
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+// similarly for the incomplete factorization using gsisx
+#define DECL_GSISX(NAMESPACE,FNAME,FLOATTYPE,KEYTYPE) \
+ inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \
+ int *perm_c, int *perm_r, int *etree, char *equed, \
+ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
+ SuperMatrix *U, void *work, int lwork, \
+ SuperMatrix *B, SuperMatrix *X, \
+ FLOATTYPE *recip_pivot_growth, \
+ FLOATTYPE *rcond, \
+ SuperLUStat_t *stats, int *info, KEYTYPE) { \
+ using namespace NAMESPACE; \
+ mem_usage_t mem_usage; \
+ NAMESPACE::FNAME(options, A, perm_c, perm_r, etree, equed, R, C, L, \
+ U, work, lwork, B, X, recip_pivot_growth, rcond, \
+ &mem_usage, stats, info); \
+ return mem_usage.for_lu; /* bytes used by the factor storage */ \
+ }
+
+DECL_GSISX(SuperLU_S,sgsisx,float,float)
+DECL_GSISX(SuperLU_C,cgsisx,float,std::complex<float>)
+DECL_GSISX(SuperLU_D,dgsisx,double,double)
+DECL_GSISX(SuperLU_Z,zgsisx,double,std::complex<double>)
+
+#endif
+
+template<typename MatrixType>
+struct SluMatrixMapHelper;
+
+/** \internal
+ *
+ * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
+ * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
+ *
+ * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
+ */
+struct SluMatrix : SuperMatrix
+{
+ SluMatrix()
+ {
+ Store = &storage;
+ }
+
+ SluMatrix(const SluMatrix& other)
+ : SuperMatrix(other)
+ {
+ Store = &storage;
+ storage = other.storage;
+ }
+
+ SluMatrix& operator=(const SluMatrix& other)
+ {
+ SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
+ Store = &storage;
+ storage = other.storage;
+ return *this;
+ }
+
+ struct
+ {
+ union {int nnz;int lda;};
+ void *values;
+ int *innerInd;
+ int *outerInd;
+ } storage;
+
+ void setStorageType(Stype_t t)
+ {
+ Stype = t;
+ if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
+ Store = &storage;
+ else
+ {
+ ei_assert(false && "storage type not supported");
+ Store = 0;
+ }
+ }
+
+ template<typename Scalar>
+ void setScalarType()
+ {
+ if (ei_is_same_type<Scalar,float>::ret)
+ Dtype = SLU_S;
+ else if (ei_is_same_type<Scalar,double>::ret)
+ Dtype = SLU_D;
+ else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ Dtype = SLU_C;
+ else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ Dtype = SLU_Z;
+ else
+ {
+ ei_assert(false && "Scalar type not supported by SuperLU");
+ }
+ }
+
+ template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+ static SluMatrix Map(Matrix<Scalar,Rows,Cols,Options,MRows,MCols>& mat)
+ {
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+ ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ SluMatrix res;
+ res.setStorageType(SLU_DN);
+ res.setScalarType<Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
+ res.storage.values = mat.data();
+ return res;
+ }
+
+ template<typename MatrixType>
+ static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
+ {
+ SluMatrix res;
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat.derived()._valuePtr();
+ res.storage.innerInd = mat.derived()._innerIndexPtr();
+ res.storage.outerInd = mat.derived()._outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & Upper)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & Lower)
+ res.Mtype = SLU_TRL;
+ if (MatrixType::Flags & SelfAdjoint)
+ ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
+ return res;
+ }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
+{
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ res.setStorageType(SLU_DN);
+ res.setScalarType<Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = mat.outerStride();
+ res.storage.values = mat.data();
+ }
+};
+
+template<typename Derived>
+struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
+{
+ typedef Derived MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat._valuePtr();
+ res.storage.innerInd = mat._innerIndexPtr();
+ res.storage.outerInd = mat._outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & Upper)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & Lower)
+ res.Mtype = SLU_TRL;
+ if (MatrixType::Flags & SelfAdjoint)
+ ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
+ }
+};
+
+template<typename MatrixType>
+SluMatrix ei_asSluMatrix(MatrixType& mat)
+{
+ return SluMatrix::Map(mat);
+}
+
+/** View a Super LU matrix as an Eigen expression */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> ei_map_superlu(SluMatrix& sluMat)
+{
+ ei_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
+ || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
+
+ Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
+
+ return MappedSparseMatrix<Scalar,Flags,Index>(
+ sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
+ sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
+}
+
+template<typename MatrixType>
+class SparseLU<MatrixType,SuperLU> : public SparseLU<MatrixType>
+{
+ protected:
+ typedef SparseLU<MatrixType> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar,Lower|UnitDiag> LMatrixType;
+ typedef SparseMatrix<Scalar,Upper> UMatrixType;
+ using Base::m_flags;
+ using Base::m_status;
+
+ public:
+
+ SparseLU(int flags = NaturalOrdering)
+ : Base(flags)
+ {
+ }
+
+ SparseLU(const MatrixType& matrix, int flags = NaturalOrdering)
+ : Base(flags)
+ {
+ compute(matrix);
+ }
+
+ ~SparseLU()
+ {
+ Destroy_SuperNode_Matrix(&m_sluL);
+ Destroy_CompCol_Matrix(&m_sluU);
+ }
+
+ inline const LMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const UMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+
+ Scalar determinant() const;
+
+ template<typename BDerived, typename XDerived>
+ bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x, const int transposed = SvNoTrans) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+
+ void extractData() const;
+
+ protected:
+ // cached data to reduce reallocation, etc.
+ mutable LMatrixType m_l;
+ mutable UMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+
+ mutable SparseMatrix<Scalar> m_matrix;
+ mutable SluMatrix m_sluA;
+ mutable SuperMatrix m_sluL, m_sluU;
+ mutable SluMatrix m_sluB, m_sluX;
+ mutable SuperLUStat_t m_sluStat;
+ mutable superlu_options_t m_sluOptions;
+ mutable std::vector<int> m_sluEtree;
+ mutable std::vector<RealScalar> m_sluRscale, m_sluCscale;
+ mutable std::vector<RealScalar> m_sluFerr, m_sluBerr;
+ mutable char m_sluEqued;
+ mutable bool m_extractedDataAreDirty;
+};
+
+template<typename MatrixType>
+void SparseLU<MatrixType,SuperLU>::compute(const MatrixType& a)
+{
+ const int size = a.rows();
+ m_matrix = a;
+
+ set_default_options(&m_sluOptions);
+ m_sluOptions.ColPerm = NATURAL;
+ m_sluOptions.PrintStat = NO;
+ m_sluOptions.ConditionNumber = NO;
+ m_sluOptions.Trans = NOTRANS;
+ // m_sluOptions.Equil = NO;
+
+ switch (Base::orderingMethod())
+ {
+ case NaturalOrdering : m_sluOptions.ColPerm = NATURAL; break;
+ case MinimumDegree_AT_PLUS_A : m_sluOptions.ColPerm = MMD_AT_PLUS_A; break;
+ case MinimumDegree_ATA : m_sluOptions.ColPerm = MMD_ATA; break;
+ case ColApproxMinimumDegree : m_sluOptions.ColPerm = COLAMD; break;
+ default:
+ //std::cerr << "Eigen: ordering method \"" << Base::orderingMethod() << "\" not supported by the SuperLU backend\n";
+ m_sluOptions.ColPerm = NATURAL;
+ };
+
+ m_sluA = ei_asSluMatrix(m_matrix);
+ memset(&m_sluL,0,sizeof m_sluL);
+ memset(&m_sluU,0,sizeof m_sluU);
+ //m_sluEqued = 'B';
+ int info = 0;
+
+ m_p.resize(size);
+ m_q.resize(size);
+ m_sluRscale.resize(size);
+ m_sluCscale.resize(size);
+ m_sluEtree.resize(size);
+
+ RealScalar recip_pivot_gross, rcond;
+ RealScalar ferr, berr;
+
+ // set empty B and X
+ m_sluB.setStorageType(SLU_DN);
+ m_sluB.setScalarType<Scalar>();
+ m_sluB.Mtype = SLU_GE;
+ m_sluB.storage.values = 0;
+ m_sluB.nrow = m_sluB.ncol = 0;
+ m_sluB.storage.lda = size;
+ m_sluX = m_sluB;
+
+ StatInit(&m_sluStat);
+ if (m_flags&IncompleteFactorization)
+ {
+ #ifdef EIGEN_SUPERLU_HAS_ILU
+ ilu_set_default_options(&m_sluOptions);
+
+ // no attempt to preserve column sum
+ m_sluOptions.ILU_MILU = SILU;
+
+ // only basic ILU(k) support -- no direct control over memory consumption
+ // better to use ILU_DropRule = DROP_BASIC | DROP_AREA
+ // and set ILU_FillFactor to max memory growth
+ m_sluOptions.ILU_DropRule = DROP_BASIC;
+ m_sluOptions.ILU_DropTol = Base::m_precision;
+
+ SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_gross, &rcond,
+ &m_sluStat, &info, Scalar());
+ #else
+ //std::cerr << "Incomplete factorization is only available in SuperLU v4\n";
+ Base::m_succeeded = false;
+ return;
+ #endif
+ }
+ else
+ {
+ SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_gross, &rcond,
+ &ferr, &berr,
+ &m_sluStat, &info, Scalar());
+ }
+ StatFree(&m_sluStat);
+
+ m_extractedDataAreDirty = true;
+
+ // FIXME how to better check for errors ???
+ Base::m_succeeded = (info == 0);
+}
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool SparseLU<MatrixType,SuperLU>::solve(const MatrixBase<BDerived> &b,
+ MatrixBase<XDerived> *x, const int transposed) const
+{
+ const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+ ei_assert(size==b.rows());
+
+ switch (transposed) {
+ case SvNoTrans : m_sluOptions.Trans = NOTRANS; break;
+ case SvTranspose : m_sluOptions.Trans = TRANS; break;
+ case SvAdjoint : m_sluOptions.Trans = CONJ; break;
+ default:
+ //std::cerr << "Eigen: transposition option \"" << transposed << "\" not supported by the SuperLU backend\n";
+ m_sluOptions.Trans = NOTRANS;
+ }
+
+ m_sluOptions.Fact = FACTORED;
+ m_sluOptions.IterRefine = NOREFINE;
+
+ m_sluFerr.resize(rhsCols);
+ m_sluBerr.resize(rhsCols);
+ m_sluB = SluMatrix::Map(b.const_cast_derived());
+ m_sluX = SluMatrix::Map(x->derived());
+
+ StatInit(&m_sluStat);
+ int info = 0;
+ RealScalar recip_pivot_gross, rcond;
+
+ if (m_flags&IncompleteFactorization)
+ {
+ #ifdef EIGEN_SUPERLU_HAS_ILU
+ SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_gross, &rcond,
+ &m_sluStat, &info, Scalar());
+ #else
+ //std::cerr << "Incomplete factorization is only available in SuperLU v4\n";
+ return false;
+ #endif
+ }
+ else
+ {
+ SuperLU_gssvx(
+ &m_sluOptions, &m_sluA,
+ m_q.data(), m_p.data(),
+ &m_sluEtree[0], &m_sluEqued,
+ &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_gross, &rcond,
+ &m_sluFerr[0], &m_sluBerr[0],
+ &m_sluStat, &info, Scalar());
+ }
+ StatFree(&m_sluStat);
+
+ // reset to previous state
+ m_sluOptions.Trans = NOTRANS;
+ return info==0;
+}
+
+//
+// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
+//
+// Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+//
+// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+//
+template<typename MatrixType>
+void SparseLU<MatrixType,SuperLU>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ int upper;
+ int fsupc, istart, nsupr;
+ int lastl = 0, lastu = 0;
+ SCformat *Lstore = static_cast<SCformat*>(m_sluL.Store);
+ NCformat *Ustore = static_cast<NCformat*>(m_sluU.Store);
+ Scalar *SNptr;
+
+ const int size = m_matrix.rows();
+ m_l.resize(size,size);
+ m_l.resizeNonZeros(Lstore->nnz);
+ m_u.resize(size,size);
+ m_u.resizeNonZeros(Ustore->nnz);
+
+ int* Lcol = m_l._outerIndexPtr();
+ int* Lrow = m_l._innerIndexPtr();
+ Scalar* Lval = m_l._valuePtr();
+
+ int* Ucol = m_u._outerIndexPtr();
+ int* Urow = m_u._innerIndexPtr();
+ Scalar* Uval = m_u._valuePtr();
+
+ Ucol[0] = 0;
+ Ucol[0] = 0;
+
+ /* for each supernode */
+ for (int k = 0; k <= Lstore->nsuper; ++k)
+ {
+ fsupc = L_FST_SUPC(k);
+ istart = L_SUB_START(fsupc);
+ nsupr = L_SUB_START(fsupc+1) - istart;
+ upper = 1;
+
+ /* for each column in the supernode */
+ for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
+ {
+ SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
+
+ /* Extract U */
+ for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
+ {
+ Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = U_SUB(i);
+ }
+ for (int i = 0; i < upper; ++i)
+ {
+ /* upper triangle in the supernode */
+ Uval[lastu] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = L_SUB(istart+i);
+ }
+ Ucol[j+1] = lastu;
+
+ /* Extract L */
+ Lval[lastl] = 1.0; /* unit diagonal */
+ Lrow[lastl++] = L_SUB(istart + upper - 1);
+ for (int i = upper; i < nsupr; ++i)
+ {
+ Lval[lastl] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Lval[lastl] != 0.0)
+ Lrow[lastl++] = L_SUB(istart+i);
+ }
+ Lcol[j+1] = lastl;
+
+ ++upper;
+ } /* for j ... */
+
+ } /* for k ... */
+
+ // squeeze the matrices :
+ m_l.resizeNonZeros(lastl);
+ m_u.resizeNonZeros(lastu);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename SparseLU<MatrixType,SuperLU>::Scalar SparseLU<MatrixType,SuperLU>::determinant() const
+{
+ if (m_extractedDataAreDirty)
+ extractData();
+
+ // TODO this code could be moved to the default/base backend
+ // FIXME perhaps we have to take into account the scale factors m_sluRscale and m_sluCscale ???
+ Scalar det = Scalar(1);
+ for (int j=0; j<m_u.cols(); ++j)
+ {
+ if (m_u._outerIndexPtr()[j+1]-m_u._outerIndexPtr()[j] > 0)
+ {
+ int lastId = m_u._outerIndexPtr()[j+1]-1;
+ ei_assert(m_u._innerIndexPtr()[lastId]<=j);
+ if (m_u._innerIndexPtr()[lastId]==j)
+ {
+ det *= m_u._valuePtr()[lastId];
+ }
+ }
+ // std::cout << m_sluRscale[j] << " " << m_sluCscale[j] << " ";
+ }
+ return det;
+}
+
+#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/unsupported/Eigen/src/SparseExtra/TaucsSupport.h b/unsupported/Eigen/src/SparseExtra/TaucsSupport.h
new file mode 100644
index 000000000..1e7adfef4
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/TaucsSupport.h
@@ -0,0 +1,219 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TAUCSSUPPORT_H
+#define EIGEN_TAUCSSUPPORT_H
+
+template<typename MatrixType>
+taucs_ccs_matrix ei_asTaucsMatrix(MatrixType& mat)
+{
+ typedef typename MatrixType::Scalar Scalar;
+enum { Flags = MatrixType::Flags };
+ taucs_ccs_matrix res;
+ res.n = mat.cols();
+ res.m = mat.rows();
+ res.flags = 0;
+ res.colptr = mat._outerIndexPtr();
+ res.rowind = mat._innerIndexPtr();
+ res.values.v = mat._valuePtr();
+ if (ei_is_same_type<Scalar,int>::ret)
+ res.flags |= TAUCS_INT;
+ else if (ei_is_same_type<Scalar,float>::ret)
+ res.flags |= TAUCS_SINGLE;
+ else if (ei_is_same_type<Scalar,double>::ret)
+ res.flags |= TAUCS_DOUBLE;
+ else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ res.flags |= TAUCS_SCOMPLEX;
+ else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ res.flags |= TAUCS_DCOMPLEX;
+ else
+ {
+ ei_assert(false && "Scalar type not supported by TAUCS");
+ }
+
+ // FIXME 1) shapes are not in the Flags and 2) it seems Taucs ignores these flags anyway and only accept lower symmetric matrices
+ if (Flags & Upper)
+ res.flags |= TAUCS_UPPER;
+ if (Flags & Lower)
+ res.flags |= TAUCS_LOWER;
+ if (Flags & SelfAdjoint)
+ res.flags |= (NumTraits<Scalar>::IsComplex ? TAUCS_HERMITIAN : TAUCS_SYMMETRIC);
+ else if ((Flags & Upper) || (Flags & Lower))
+ res.flags |= TAUCS_TRIANGULAR;
+
+ return res;
+}
+
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> ei_map_taucs(taucs_ccs_matrix& taucsMat)
+{
+ return MappedSparseMatrix<Scalar,Flags,Index>
+ (taucsMat.m, taucsMat.n, taucsMat.colptr[taucsMat.n],
+ taucsMat.colptr, taucsMat.rowind, reinterpret_cast<Scalar*>(taucsMat.values.v));
+}
+
+template<typename MatrixType>
+class SparseLLT<MatrixType,Taucs> : public SparseLLT<MatrixType>
+{
+ protected:
+ typedef SparseLLT<MatrixType> Base;
+ typedef typename Base::Index Index;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef typename Base::CholMatrixType CholMatrixType;
+ using Base::MatrixLIsDirty;
+ using Base::SupernodalFactorIsDirty;
+ using Base::m_flags;
+ using Base::m_matrix;
+ using Base::m_status;
+ using Base::m_succeeded;
+
+ public:
+
+ SparseLLT(int flags = SupernodalMultifrontal)
+ : Base(flags), m_taucsSupernodalFactor(0)
+ {
+ }
+
+ SparseLLT(const MatrixType& matrix, int flags = SupernodalMultifrontal)
+ : Base(flags), m_taucsSupernodalFactor(0)
+ {
+ compute(matrix);
+ }
+
+ ~SparseLLT()
+ {
+ if (m_taucsSupernodalFactor)
+ taucs_supernodal_factor_free(m_taucsSupernodalFactor);
+ }
+
+ inline const CholMatrixType& matrixL() const;
+
+ template<typename Derived>
+ void solveInPlace(MatrixBase<Derived> &b) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+ void* m_taucsSupernodalFactor;
+};
+
+template<typename MatrixType>
+void SparseLLT<MatrixType,Taucs>::compute(const MatrixType& a)
+{
+ if (m_taucsSupernodalFactor)
+ {
+ taucs_supernodal_factor_free(m_taucsSupernodalFactor);
+ m_taucsSupernodalFactor = 0;
+ }
+
+ taucs_ccs_matrix taucsMatA = ei_asTaucsMatrix(const_cast<MatrixType&>(a));
+
+ if (m_flags & IncompleteFactorization)
+ {
+ taucs_ccs_matrix* taucsRes = taucs_ccs_factor_llt(&taucsMatA, Base::m_precision, 0);
+ if(!taucsRes)
+ {
+ m_succeeded = false;
+ return;
+ }
+ // the matrix returned by Taucs is not necessarily sorted,
+ // so let's copy it in two steps
+ DynamicSparseMatrix<Scalar,RowMajor> tmp = ei_map_taucs<Scalar,ColMajor,Index>(*taucsRes);
+ m_matrix = tmp;
+ free(taucsRes);
+ m_status = (m_status & ~(CompleteFactorization|MatrixLIsDirty))
+ | IncompleteFactorization
+ | SupernodalFactorIsDirty;
+ }
+ else
+ {
+ if ( (m_flags & SupernodalLeftLooking)
+ || ((!(m_flags & SupernodalMultifrontal)) && (m_flags & MemoryEfficient)) )
+ {
+ m_taucsSupernodalFactor = taucs_ccs_factor_llt_ll(&taucsMatA);
+ }
+ else
+ {
+ // use the faster Multifrontal routine
+ m_taucsSupernodalFactor = taucs_ccs_factor_llt_mf(&taucsMatA);
+ }
+ m_status = (m_status & ~IncompleteFactorization) | CompleteFactorization | MatrixLIsDirty;
+ }
+ m_succeeded = true;
+}
+
+template<typename MatrixType>
+inline const typename SparseLLT<MatrixType,Taucs>::CholMatrixType&
+SparseLLT<MatrixType,Taucs>::matrixL() const
+{
+ if (m_status & MatrixLIsDirty)
+ {
+ ei_assert(!(m_status & SupernodalFactorIsDirty));
+
+ taucs_ccs_matrix* taucsL = taucs_supernodal_factor_to_ccs(m_taucsSupernodalFactor);
+
+ // the matrix returned by Taucs is not necessarily sorted,
+ // so let's copy it in two steps
+ DynamicSparseMatrix<Scalar,RowMajor> tmp = ei_map_taucs<Scalar,ColMajor,Index>(*taucsL);
+ const_cast<typename Base::CholMatrixType&>(m_matrix) = tmp;
+ free(taucsL);
+ m_status = (m_status & ~MatrixLIsDirty);
+ }
+ return m_matrix;
+}
+
+template<typename MatrixType>
+template<typename Derived>
+void SparseLLT<MatrixType,Taucs>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ bool inputIsCompatibleWithTaucs = (Derived::Flags&RowMajorBit)==0;
+
+ if (!inputIsCompatibleWithTaucs)
+ {
+ matrixL();
+ Base::solveInPlace(b);
+ }
+ else if (m_flags & IncompleteFactorization)
+ {
+ taucs_ccs_matrix taucsLLT = ei_asTaucsMatrix(const_cast<typename Base::CholMatrixType&>(m_matrix));
+ typename ei_plain_matrix_type<Derived>::type x(b.rows());
+ for (int j=0; j<b.cols(); ++j)
+ {
+ taucs_ccs_solve_llt(&taucsLLT,x.data(),&b.col(j).coeffRef(0));
+ b.col(j) = x;
+ }
+ }
+ else
+ {
+ typename ei_plain_matrix_type<Derived>::type x(b.rows());
+ for (int j=0; j<b.cols(); ++j)
+ {
+ taucs_supernodal_solve_llt(m_taucsSupernodalFactor,x.data(),&b.col(j).coeffRef(0));
+ b.col(j) = x;
+ }
+ }
+}
+
+#endif // EIGEN_TAUCSSUPPORT_H
diff --git a/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h b/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
new file mode 100644
index 000000000..950624758
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
@@ -0,0 +1,289 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_UMFPACKSUPPORT_H
+#define EIGEN_UMFPACKSUPPORT_H
+
+/* TODO extract L, extract U, compute det, etc... */
+
+// generic double/complex<double> wrapper functions:
+
+inline void umfpack_free_numeric(void **Numeric, double)
+{ umfpack_di_free_numeric(Numeric); }
+
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
+{ umfpack_zi_free_numeric(Numeric); }
+
+inline void umfpack_free_symbolic(void **Symbolic, double)
+{ umfpack_di_free_symbolic(Symbolic); }
+
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
+{ umfpack_zi_free_symbolic(Symbolic); }
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
+}
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&Ax[0].real(),0,Symbolic,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_numeric(Ap,Ai,&Ax[0].real(),0,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
+ double X[], const double B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ std::complex<double> X[], const std::complex<double> B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_zi_solve(sys,Ap,Ai,&Ax[0].real(),0,&X[0].real(),0,&B[0].real(),0,Numeric,Control,Info);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
+{
+ return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)
+{
+ return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
+ int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
+ int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_zi_get_numeric(Lp,Lj,Lx?&Lx[0].real():0,0,Up,Ui,Ux?&Ux[0].real():0,0,P,Q,
+ Dx?&Dx[0].real():0,0,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
+}
+
+inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ return umfpack_zi_get_determinant(&Mx->real(),0,Ex,NumericHandle,User_Info);
+}
+
+
+template<typename MatrixType>
+class SparseLU<MatrixType,UmfPack> : public SparseLU<MatrixType>
+{
+ protected:
+ typedef SparseLU<MatrixType> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar,Lower|UnitDiag> LMatrixType;
+ typedef SparseMatrix<Scalar,Upper> UMatrixType;
+ using Base::m_flags;
+ using Base::m_status;
+
+ public:
+
+ SparseLU(int flags = NaturalOrdering)
+ : Base(flags), m_numeric(0)
+ {
+ }
+
+ SparseLU(const MatrixType& matrix, int flags = NaturalOrdering)
+ : Base(flags), m_numeric(0)
+ {
+ compute(matrix);
+ }
+
+ ~SparseLU()
+ {
+ if (m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+ }
+
+ inline const LMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const UMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+
+ Scalar determinant() const;
+
+ template<typename BDerived, typename XDerived>
+ bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+
+ void extractData() const;
+
+ protected:
+ // cached data:
+ void* m_numeric;
+ const MatrixType* m_matrixRef;
+ mutable LMatrixType m_l;
+ mutable UMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+ mutable bool m_extractedDataAreDirty;
+};
+
+template<typename MatrixType>
+void SparseLU<MatrixType,UmfPack>::compute(const MatrixType& a)
+{
+ const int rows = a.rows();
+ const int cols = a.cols();
+ ei_assert((MatrixType::Flags&RowMajorBit)==0 && "Row major matrices are not supported yet");
+
+ m_matrixRef = &a;
+
+ if (m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+
+ void* symbolic;
+ int errorCode = 0;
+ errorCode = umfpack_symbolic(rows, cols, a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(),
+ &symbolic, 0, 0);
+ if (errorCode==0)
+ errorCode = umfpack_numeric(a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(),
+ symbolic, &m_numeric, 0, 0);
+
+ umfpack_free_symbolic(&symbolic,Scalar());
+
+ m_extractedDataAreDirty = true;
+
+ Base::m_succeeded = (errorCode==0);
+}
+
+template<typename MatrixType>
+void SparseLU<MatrixType,UmfPack>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ // get size of the data
+ int lnz, unz, rows, cols, nz_udiag;
+ umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
+
+ // allocate data
+ m_l.resize(rows,std::min(rows,cols));
+ m_l.resizeNonZeros(lnz);
+
+ m_u.resize(std::min(rows,cols),cols);
+ m_u.resizeNonZeros(unz);
+
+ m_p.resize(rows);
+ m_q.resize(cols);
+
+ // extract
+ umfpack_get_numeric(m_l._outerIndexPtr(), m_l._innerIndexPtr(), m_l._valuePtr(),
+ m_u._outerIndexPtr(), m_u._innerIndexPtr(), m_u._valuePtr(),
+ m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename SparseLU<MatrixType,UmfPack>::Scalar SparseLU<MatrixType,UmfPack>::determinant() const
+{
+ Scalar det;
+ umfpack_get_determinant(&det, 0, m_numeric, 0);
+ return det;
+}
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool SparseLU<MatrixType,UmfPack>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> *x) const
+{
+ //const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+// ei_assert(size==b.rows());
+ ei_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet");
+ ei_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet");
+
+ int errorCode;
+ for (int j=0; j<rhsCols; ++j)
+ {
+ errorCode = umfpack_solve(UMFPACK_A,
+ m_matrixRef->_outerIndexPtr(), m_matrixRef->_innerIndexPtr(), m_matrixRef->_valuePtr(),
+ &x->col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0);
+ if (errorCode!=0)
+ return false;
+ }
+// errorCode = umfpack_di_solve(UMFPACK_A,
+// m_matrixRef._outerIndexPtr(), m_matrixRef._innerIndexPtr(), m_matrixRef._valuePtr(),
+// x->derived().data(), b.derived().data(), m_numeric, 0, 0);
+
+ return true;
+}
+
+#endif // EIGEN_UMFPACKSUPPORT_H