aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2011-06-06 10:17:28 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2011-06-06 10:17:28 +0200
commit421ece38e1995ec4df12213d6fd567fa18222cca (patch)
tree6966bc7910a6a91f6970e16532a50aab284b8c3e
parent7a61a564ef7d5403bcf3eef0c84252cc8bf73705 (diff)
Sparse: fix long int as index type in simplicial cholesky and other decompositions
-rw-r--r--Eigen/src/Sparse/SparseSelfAdjointView.h12
-rw-r--r--test/sparse.h12
-rw-r--r--unsupported/Eigen/src/SparseExtra/Amd.h10
-rw-r--r--unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h8
-rw-r--r--unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h9
-rw-r--r--unsupported/test/sparse_ldlt.cpp40
-rw-r--r--unsupported/test/sparse_llt.cpp40
7 files changed, 68 insertions, 63 deletions
diff --git a/Eigen/src/Sparse/SparseSelfAdjointView.h b/Eigen/src/Sparse/SparseSelfAdjointView.h
index 651daaa4d..a69682997 100644
--- a/Eigen/src/Sparse/SparseSelfAdjointView.h
+++ b/Eigen/src/Sparse/SparseSelfAdjointView.h
@@ -116,21 +116,21 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
/** \internal triggered by sparse_matrix = SparseSelfadjointView; */
- template<typename DestScalar> void evalTo(SparseMatrix<DestScalar>& _dest) const
+ template<typename DestScalar> void evalTo(SparseMatrix<DestScalar,ColMajor,Index>& _dest) const
{
internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
}
- template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar>& _dest) const
+ template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
{
// TODO directly evaluate into _dest;
- SparseMatrix<DestScalar> tmp(_dest.rows(),_dest.cols());
+ SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
_dest = tmp;
}
/** \returns an expression of P^-1 H P */
- SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic>& perm) const
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
{
return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
}
@@ -419,10 +419,12 @@ template<typename MatrixType,int UpLo>
class SparseSymmetricPermutationProduct
: public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
{
- typedef PermutationMatrix<Dynamic> Perm;
public:
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
+ protected:
+ typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
+ public:
typedef Matrix<Index,Dynamic,1> VectorI;
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
diff --git a/test/sparse.h b/test/sparse.h
index 530ae30bc..9944a2934 100644
--- a/test/sparse.h
+++ b/test/sparse.h
@@ -58,15 +58,15 @@ enum {
* \param zeroCoords and nonzeroCoords allows to get the coordinate lists of the non zero,
* and zero coefficients respectively.
*/
-template<typename Scalar,int Opt1,int Opt2> void
+template<typename Scalar,int Opt1,int Opt2,typename Index> void
initSparse(double density,
Matrix<Scalar,Dynamic,Dynamic,Opt1>& refMat,
- SparseMatrix<Scalar,Opt2>& sparseMat,
+ SparseMatrix<Scalar,Opt2,Index>& sparseMat,
int flags = 0,
std::vector<Vector2i>* zeroCoords = 0,
std::vector<Vector2i>* nonzeroCoords = 0)
{
- enum { IsRowMajor = SparseMatrix<Scalar,Opt2>::IsRowMajor };
+ enum { IsRowMajor = SparseMatrix<Scalar,Opt2,Index>::IsRowMajor };
sparseMat.setZero();
sparseMat.reserve(int(refMat.rows()*refMat.cols()*density));
@@ -108,15 +108,15 @@ initSparse(double density,
sparseMat.finalize();
}
-template<typename Scalar,int Opt1,int Opt2> void
+template<typename Scalar,int Opt1,int Opt2,typename Index> void
initSparse(double density,
Matrix<Scalar,Dynamic,Dynamic, Opt1>& refMat,
- DynamicSparseMatrix<Scalar, Opt2>& sparseMat,
+ DynamicSparseMatrix<Scalar, Opt2, Index>& sparseMat,
int flags = 0,
std::vector<Vector2i>* zeroCoords = 0,
std::vector<Vector2i>* nonzeroCoords = 0)
{
- enum { IsRowMajor = DynamicSparseMatrix<Scalar,Opt2>::IsRowMajor };
+ enum { IsRowMajor = DynamicSparseMatrix<Scalar,Opt2,Index>::IsRowMajor };
sparseMat.setZero();
sparseMat.reserve(int(refMat.rows()*refMat.cols()*density));
for(int j=0; j<sparseMat.outerSize(); j++)
diff --git a/unsupported/Eigen/src/SparseExtra/Amd.h b/unsupported/Eigen/src/SparseExtra/Amd.h
index 52fd56bc4..3cf8bd1e1 100644
--- a/unsupported/Eigen/src/SparseExtra/Amd.h
+++ b/unsupported/Eigen/src/SparseExtra/Amd.h
@@ -103,7 +103,7 @@ Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Ind
* The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
* On exit the values of C are destroyed */
template<typename Scalar, typename Index>
-void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic>& perm)
+void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm)
{
typedef SparseMatrix<Scalar,ColMajor,Index> CCS;
@@ -151,7 +151,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
elen[i] = 0; // Ek of node i is empty
degree[i] = len[i]; // degree of node i
}
- mark = cs_wclear (0, 0, w, n); /* clear w */
+ mark = cs_wclear<Index>(0, 0, w, n); /* clear w */
elen[n] = -2; /* n is a dead element */
Cp[n] = -1; /* n is a root of assembly tree */
w[n] = 0; /* n is a dead element */
@@ -266,7 +266,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
elen[k] = -2; /* k is now an element */
/* --- Find set differences ----------------------------------------- */
- mark = cs_wclear (mark, lemax, w, n); /* clear w if necessary */
+ mark = cs_wclear<Index>(mark, lemax, w, n); /* clear w if necessary */
for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
{
i = Ci[pk];
@@ -349,7 +349,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
} /* scan2 is done */
degree[k] = dk; /* finalize |Lk| */
lemax = std::max<Index>(lemax, dk);
- mark = cs_wclear (mark+lemax, lemax, w, n); /* clear w */
+ mark = cs_wclear<Index>(mark+lemax, lemax, w, n); /* clear w */
/* --- Supernode detection ------------------------------------------ */
for(pk = pk1; pk < pk2; pk++)
@@ -435,7 +435,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
}
for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
{
- if(Cp[i] == -1) k = cs_tdfs (i, k, head, next, perm.indices().data(), w);
+ if(Cp[i] == -1) k = cs_tdfs<Index>(i, k, head, next, perm.indices().data(), w);
}
perm.indices().conservativeResize(n);
diff --git a/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h b/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h
index 6af6407c7..dd13dc714 100644
--- a/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h
+++ b/unsupported/Eigen/src/SparseExtra/SimplicialCholesky.h
@@ -193,12 +193,12 @@ class SimplicialCholesky
/** \returns the permutation P
* \sa permutationPinv() */
- const PermutationMatrix<Dynamic>& permutationP() const
+ const PermutationMatrix<Dynamic,Dynamic,Index>& permutationP() const
{ return m_P; }
/** \returns the inverse P^-1 of the permutation P
* \sa permutationP() */
- const PermutationMatrix<Dynamic>& permutationPinv() const
+ const PermutationMatrix<Dynamic,Dynamic,Index>& permutationPinv() const
{ return m_Pinv; }
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -282,8 +282,8 @@ class SimplicialCholesky
VectorType m_diag; // the diagonal coefficients in case of a LDLt decomposition
VectorXi m_parent; // elimination tree
VectorXi m_nonZerosPerCol;
- PermutationMatrix<Dynamic> m_P; // the permutation
- PermutationMatrix<Dynamic> m_Pinv; // the inverse permutation
+ PermutationMatrix<Dynamic,Dynamic,Index> m_P; // the permutation
+ PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // the inverse permutation
};
template<typename _MatrixType, int _UpLo>
diff --git a/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h b/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h
index 14283c117..6b240f169 100644
--- a/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h
+++ b/unsupported/Eigen/src/SparseExtra/SparseLDLTLegacy.h
@@ -90,10 +90,9 @@ class SparseLDLT
};
public:
- typedef SparseMatrix<Scalar> CholMatrixType;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Index Index;
-
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
/** Creates a dummy LDLT factorization object with flags \a flags. */
SparseLDLT(int flags = 0)
@@ -187,8 +186,8 @@ class SparseLDLT
VectorXi m_parent; // elimination tree
VectorXi m_nonZerosPerCol;
// VectorXi m_w; // workspace
- PermutationMatrix<Dynamic> m_P;
- PermutationMatrix<Dynamic> m_Pinv;
+ PermutationMatrix<Dynamic,Dynamic,Index> m_P;
+ PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv;
RealScalar m_precision;
int m_flags;
mutable int m_status;
@@ -257,7 +256,7 @@ void SparseLDLT<_MatrixType,Backend>::_symbolic(const _MatrixType& a)
if(P)
{
- m_P.indices() = VectorXi::Map(P,size);
+ m_P.indices() = Map<const Matrix<Index,Dynamic,1> >(P,size);
m_Pinv = m_P.inverse();
Pinv = m_Pinv.indices().data();
}
diff --git a/unsupported/test/sparse_ldlt.cpp b/unsupported/test/sparse_ldlt.cpp
index 4ceda3188..43ff2682f 100644
--- a/unsupported/test/sparse_ldlt.cpp
+++ b/unsupported/test/sparse_ldlt.cpp
@@ -29,15 +29,16 @@
#include <Eigen/CholmodSupport>
#endif
-template<typename Scalar> void sparse_ldlt(int rows, int cols)
+template<typename Scalar,typename Index> void sparse_ldlt(int rows, int cols)
{
static bool odd = true;
odd = !odd;
double density = std::max(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
-
- SparseMatrix<Scalar> m2(rows, cols);
+ typedef SparseMatrix<Scalar,ColMajor,Index> SparseMatrixType;
+
+ SparseMatrixType m2(rows, cols);
DenseMatrix refMat2(rows, cols);
DenseVector b = DenseVector::Random(cols);
@@ -45,11 +46,11 @@ template<typename Scalar> void sparse_ldlt(int rows, int cols)
initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, 0, 0);
- SparseMatrix<Scalar> m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
+ SparseMatrixType m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
DenseMatrix refMat3 = refMat2 * refMat2.adjoint();
refX = refMat3.template selfadjointView<Upper>().ldlt().solve(b);
- typedef SparseMatrix<Scalar,Upper|SelfAdjoint> SparseSelfAdjointMatrix;
+ typedef SparseMatrix<Scalar,Upper|SelfAdjoint,Index> SparseSelfAdjointMatrix;
x = b;
SparseLDLT<SparseSelfAdjointMatrix> ldlt(m3);
if (ldlt.succeeded())
@@ -84,7 +85,7 @@ template<typename Scalar> void sparse_ldlt(int rows, int cols)
// new API
{
- SparseMatrix<Scalar> m2(rows, cols);
+ SparseMatrixType m2(rows, cols);
DenseMatrix refMat2(rows, cols);
DenseVector b = DenseVector::Random(cols);
@@ -98,7 +99,7 @@ template<typename Scalar> void sparse_ldlt(int rows, int cols)
m2.coeffRef(i,i) = refMat2(i,i) = internal::abs(internal::real(refMat2(i,i)));
- SparseMatrix<Scalar> m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
+ SparseMatrixType m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
DenseMatrix refMat3 = refMat2 * refMat2.adjoint();
m3_lo.template selfadjointView<Lower>().rankUpdate(m2,0);
@@ -107,40 +108,40 @@ template<typename Scalar> void sparse_ldlt(int rows, int cols)
// with a single vector as the rhs
ref_x = refMat3.template selfadjointView<Lower>().llt().solve(b);
- x = SimplicialCholesky<SparseMatrix<Scalar>, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b);
+ x = SimplicialCholesky<SparseMatrixType, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, lower, single dense rhs");
- x = SimplicialCholesky<SparseMatrix<Scalar>, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b);
+ x = SimplicialCholesky<SparseMatrixType, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, upper, single dense rhs");
- x = SimplicialCholesky<SparseMatrix<Scalar>, Lower>(m3_lo).solve(b);
+ x = SimplicialCholesky<SparseMatrixType, Lower>(m3_lo).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, lower only, single dense rhs");
- x = SimplicialCholesky<SparseMatrix<Scalar>, Upper>(m3_up).solve(b);
+ x = SimplicialCholesky<SparseMatrixType, Upper>(m3_up).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "SimplicialCholesky: solve, upper only, single dense rhs");
// with multiple rhs
ref_X = refMat3.template selfadjointView<Lower>().llt().solve(B);
- X = SimplicialCholesky<SparseMatrix<Scalar>, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B);
+ X = SimplicialCholesky<SparseMatrixType, Lower>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B);
VERIFY(ref_X.isApprox(X,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, lower, multiple dense rhs");
- X = SimplicialCholesky<SparseMatrix<Scalar>, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B);
+ X = SimplicialCholesky<SparseMatrixType, Upper>().setMode(odd ? SimplicialCholeskyLLt : SimplicialCholeskyLDLt).compute(m3).solve(B);
VERIFY(ref_X.isApprox(X,test_precision<Scalar>()) && "SimplicialCholesky: solve, full storage, upper, multiple dense rhs");
// with a sparse rhs
-// SparseMatrix<Scalar> spB(rows,cols), spX(rows,cols);
+// SparseMatrixType spB(rows,cols), spX(rows,cols);
// B.diagonal().array() += 1;
// spB = B.sparseView(0.5,1);
//
// ref_X = refMat3.template selfadjointView<Lower>().llt().solve(DenseMatrix(spB));
//
-// spX = SimplicialCholesky<SparseMatrix<Scalar>, Lower>(m3).solve(spB);
+// spX = SimplicialCholesky<SparseMatrixType, Lower>(m3).solve(spB);
// VERIFY(ref_X.isApprox(spX.toDense(),test_precision<Scalar>()) && "LLT: cholmod solve, multiple sparse rhs");
//
-// spX = SimplicialCholesky<SparseMatrix<Scalar>, Upper>(m3).solve(spB);
+// spX = SimplicialCholesky<SparseMatrixType, Upper>(m3).solve(spB);
// VERIFY(ref_X.isApprox(spX.toDense(),test_precision<Scalar>()) && "LLT: cholmod solve, multiple sparse rhs");
}
@@ -167,9 +168,10 @@ template<typename Scalar> void sparse_ldlt(int rows, int cols)
void test_sparse_ldlt()
{
for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1(sparse_ldlt<double>(8, 8) );
+ CALL_SUBTEST_1( (sparse_ldlt<double,int>(8, 8)) );
+ CALL_SUBTEST_1( (sparse_ldlt<double,long int>(8, 8)) );
int s = internal::random<int>(1,300);
- CALL_SUBTEST_2(sparse_ldlt<std::complex<double> >(s,s) );
- CALL_SUBTEST_1(sparse_ldlt<double>(s,s) );
+ CALL_SUBTEST_2( (sparse_ldlt<std::complex<double>,int>(s,s)) );
+ CALL_SUBTEST_1( (sparse_ldlt<double,int>(s,s)) );
}
}
diff --git a/unsupported/test/sparse_llt.cpp b/unsupported/test/sparse_llt.cpp
index df198cd52..a997deb82 100644
--- a/unsupported/test/sparse_llt.cpp
+++ b/unsupported/test/sparse_llt.cpp
@@ -29,14 +29,15 @@
#include <Eigen/CholmodSupport>
#endif
-template<typename Scalar> void sparse_llt(int rows, int cols)
+template<typename Scalar,typename Index> void sparse_llt(int rows, int cols)
{
double density = std::max(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
+ typedef SparseMatrix<Scalar,ColMajor,Index> SparseMatrixType;
// TODO fix the issue with complex (see SparseLLT::solveInPlace)
- SparseMatrix<Scalar> m2(rows, cols);
+ SparseMatrixType m2(rows, cols);
DenseMatrix refMat2(rows, cols);
DenseVector b = DenseVector::Random(cols);
@@ -53,7 +54,7 @@ template<typename Scalar> void sparse_llt(int rows, int cols)
if (!NumTraits<Scalar>::IsComplex)
{
x = b;
- SparseLLT<SparseMatrix<Scalar> > (m2).solveInPlace(x);
+ SparseLLT<SparseMatrixType > (m2).solveInPlace(x);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "LLT: default");
}
@@ -61,23 +62,23 @@ template<typename Scalar> void sparse_llt(int rows, int cols)
// legacy API
{
// Cholmod, as configured in CholmodSupport.h, only supports self-adjoint matrices
- SparseMatrix<Scalar> m3 = m2.adjoint()*m2;
+ SparseMatrixType m3 = m2.adjoint()*m2;
DenseMatrix refMat3 = refMat2.adjoint()*refMat2;
ref_x = refMat3.template selfadjointView<Lower>().llt().solve(b);
x = b;
- SparseLLT<SparseMatrix<Scalar>, Cholmod>(m3).solveInPlace(x);
+ SparseLLT<SparseMatrixType, Cholmod>(m3).solveInPlace(x);
VERIFY((m3*x).isApprox(b,test_precision<Scalar>()) && "LLT legacy: cholmod solveInPlace");
- x = SparseLLT<SparseMatrix<Scalar>, Cholmod>(m3).solve(b);
+ x = SparseLLT<SparseMatrixType, Cholmod>(m3).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "LLT legacy: cholmod solve");
}
// new API
{
// Cholmod, as configured in CholmodSupport.h, only supports self-adjoint matrices
- SparseMatrix<Scalar> m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
+ SparseMatrixType m3 = m2 * m2.adjoint(), m3_lo(rows,rows), m3_up(rows,rows);
DenseMatrix refMat3 = refMat2 * refMat2.adjoint();
m3_lo.template selfadjointView<Lower>().rankUpdate(m2,0);
@@ -86,16 +87,16 @@ template<typename Scalar> void sparse_llt(int rows, int cols)
// with a single vector as the rhs
ref_x = refMat3.template selfadjointView<Lower>().llt().solve(b);
- x = CholmodDecomposition<SparseMatrix<Scalar>, Lower>(m3).solve(b);
+ x = CholmodDecomposition<SparseMatrixType, Lower>(m3).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "LLT: cholmod solve, single dense rhs");
- x = CholmodDecomposition<SparseMatrix<Scalar>, Upper>(m3).solve(b);
+ x = CholmodDecomposition<SparseMatrixType, Upper>(m3).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "LLT: cholmod solve, single dense rhs");
- x = CholmodDecomposition<SparseMatrix<Scalar>, Lower>(m3_lo).solve(b);
+ x = CholmodDecomposition<SparseMatrixType, Lower>(m3_lo).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "LLT: cholmod solve, single dense rhs");
- x = CholmodDecomposition<SparseMatrix<Scalar>, Upper>(m3_up).solve(b);
+ x = CholmodDecomposition<SparseMatrixType, Upper>(m3_up).solve(b);
VERIFY(ref_x.isApprox(x,test_precision<Scalar>()) && "LLT: cholmod solve, single dense rhs");
@@ -104,25 +105,25 @@ template<typename Scalar> void sparse_llt(int rows, int cols)
#ifndef EIGEN_DEFAULT_TO_ROW_MAJOR
// TODO make sure the API is properly documented about this fact
- X = CholmodDecomposition<SparseMatrix<Scalar>, Lower>(m3).solve(B);
+ X = CholmodDecomposition<SparseMatrixType, Lower>(m3).solve(B);
VERIFY(ref_X.isApprox(X,test_precision<Scalar>()) && "LLT: cholmod solve, multiple dense rhs");
- X = CholmodDecomposition<SparseMatrix<Scalar>, Upper>(m3).solve(B);
+ X = CholmodDecomposition<SparseMatrixType, Upper>(m3).solve(B);
VERIFY(ref_X.isApprox(X,test_precision<Scalar>()) && "LLT: cholmod solve, multiple dense rhs");
#endif
// with a sparse rhs
- SparseMatrix<Scalar> spB(rows,cols), spX(rows,cols);
+ SparseMatrixType spB(rows,cols), spX(rows,cols);
B.diagonal().array() += 1;
spB = B.sparseView(0.5,1);
ref_X = refMat3.template selfadjointView<Lower>().llt().solve(DenseMatrix(spB));
- spX = CholmodDecomposition<SparseMatrix<Scalar>, Lower>(m3).solve(spB);
+ spX = CholmodDecomposition<SparseMatrixType, Lower>(m3).solve(spB);
VERIFY(ref_X.isApprox(spX.toDense(),test_precision<Scalar>()) && "LLT: cholmod solve, multiple sparse rhs");
- spX = CholmodDecomposition<SparseMatrix<Scalar>, Upper>(m3).solve(spB);
+ spX = CholmodDecomposition<SparseMatrixType, Upper>(m3).solve(spB);
VERIFY(ref_X.isApprox(spX.toDense(),test_precision<Scalar>()) && "LLT: cholmod solve, multiple sparse rhs");
}
#endif
@@ -132,9 +133,10 @@ template<typename Scalar> void sparse_llt(int rows, int cols)
void test_sparse_llt()
{
for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1(sparse_llt<double>(8, 8) );
+ CALL_SUBTEST_1( (sparse_llt<double,int>(8, 8)) );
int s = internal::random<int>(1,300);
- CALL_SUBTEST_2(sparse_llt<std::complex<double> >(s,s) );
- CALL_SUBTEST_1(sparse_llt<double>(s,s) );
+ CALL_SUBTEST_2( (sparse_llt<std::complex<double>,int>(s,s)) );
+ CALL_SUBTEST_1( (sparse_llt<double,int>(s,s)) );
+ CALL_SUBTEST_1( (sparse_llt<double,long int>(s,s)) );
}
}