aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/src/SparseExtra
diff options
context:
space:
mode:
Diffstat (limited to 'unsupported/Eigen/src/SparseExtra')
-rw-r--r--unsupported/Eigen/src/SparseExtra/CholmodSupport.h75
-rw-r--r--unsupported/Eigen/src/SparseExtra/RandomSetter.h4
-rw-r--r--unsupported/Eigen/src/SparseExtra/SparseLDLT.h30
-rw-r--r--unsupported/Eigen/src/SparseExtra/SparseLLT.h28
-rw-r--r--unsupported/Eigen/src/SparseExtra/SparseLU.h6
-rw-r--r--unsupported/Eigen/src/SparseExtra/SuperLUSupport.h36
-rw-r--r--unsupported/Eigen/src/SparseExtra/UmfPackSupport.h29
7 files changed, 105 insertions, 103 deletions
diff --git a/unsupported/Eigen/src/SparseExtra/CholmodSupport.h b/unsupported/Eigen/src/SparseExtra/CholmodSupport.h
index 8b500062b..aee4ae00a 100644
--- a/unsupported/Eigen/src/SparseExtra/CholmodSupport.h
+++ b/unsupported/Eigen/src/SparseExtra/CholmodSupport.h
@@ -25,38 +25,39 @@
#ifndef EIGEN_CHOLMODSUPPORT_H
#define EIGEN_CHOLMODSUPPORT_H
+namespace internal {
template<typename Scalar, typename CholmodType>
-void ei_cholmod_configure_matrix(CholmodType& mat)
+void cholmod_configure_matrix(CholmodType& mat)
{
- if (ei_is_same_type<Scalar,float>::ret)
+ if (is_same_type<Scalar,float>::ret)
{
mat.xtype = CHOLMOD_REAL;
mat.dtype = CHOLMOD_SINGLE;
}
- else if (ei_is_same_type<Scalar,double>::ret)
+ else if (is_same_type<Scalar,double>::ret)
{
mat.xtype = CHOLMOD_REAL;
mat.dtype = CHOLMOD_DOUBLE;
}
- else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ else if (is_same_type<Scalar,std::complex<float> >::ret)
{
mat.xtype = CHOLMOD_COMPLEX;
mat.dtype = CHOLMOD_SINGLE;
}
- else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ else if (is_same_type<Scalar,std::complex<double> >::ret)
{
mat.xtype = CHOLMOD_COMPLEX;
mat.dtype = CHOLMOD_DOUBLE;
}
else
{
- ei_assert(false && "Scalar type not supported by CHOLMOD");
+ eigen_assert(false && "Scalar type not supported by CHOLMOD");
}
}
template<typename _MatrixType>
-cholmod_sparse ei_cholmod_map_eigen_to_sparse(_MatrixType& mat)
+cholmod_sparse cholmod_map_eigen_to_sparse(_MatrixType& mat)
{
typedef typename _MatrixType::Scalar Scalar;
cholmod_sparse res;
@@ -73,7 +74,7 @@ cholmod_sparse ei_cholmod_map_eigen_to_sparse(_MatrixType& mat)
res.dtype = 0;
res.stype = -1;
- ei_cholmod_configure_matrix<Scalar>(res);
+ cholmod_configure_matrix<Scalar>(res);
if (_MatrixType::Flags & SelfAdjoint)
@@ -92,9 +93,9 @@ cholmod_sparse ei_cholmod_map_eigen_to_sparse(_MatrixType& mat)
}
template<typename Derived>
-cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
+cholmod_dense cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
{
- EIGEN_STATIC_ASSERT((ei_traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ EIGEN_STATIC_ASSERT((traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
typedef typename Derived::Scalar Scalar;
cholmod_dense res;
@@ -105,20 +106,20 @@ cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
res.x = mat.derived().data();
res.z = 0;
- ei_cholmod_configure_matrix<Scalar>(res);
+ cholmod_configure_matrix<Scalar>(res);
return res;
}
template<typename Scalar, int Flags, typename Index>
-MappedSparseMatrix<Scalar,Flags,Index> ei_map_cholmod_sparse_to_eigen(cholmod_sparse& cm)
+MappedSparseMatrix<Scalar,Flags,Index> map_cholmod_sparse_to_eigen(cholmod_sparse& cm)
{
return MappedSparseMatrix<Scalar,Flags,Index>
(cm.nrow, cm.ncol, reinterpret_cast<Index*>(cm.p)[cm.ncol],
reinterpret_cast<Index*>(cm.p), reinterpret_cast<Index*>(cm.i),reinterpret_cast<Scalar*>(cm.x) );
}
-
+} // end namespace internal
template<typename _MatrixType>
class SparseLLT<_MatrixType, Cholmod> : public SparseLLT<_MatrixType>
@@ -164,11 +165,11 @@ class SparseLLT<_MatrixType, Cholmod> : public SparseLLT<_MatrixType>
bool solveInPlace(MatrixBase<Derived> &b) const;
template<typename Rhs>
- inline const ei_solve_retval<SparseLLT<MatrixType, Cholmod>, Rhs>
+ inline const internal::solve_retval<SparseLLT<MatrixType, Cholmod>, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
- ei_assert(true && "SparseLLT is not initialized.");
- return ei_solve_retval<SparseLLT<MatrixType, Cholmod>, Rhs>(*this, b.derived());
+ eigen_assert(true && "SparseLLT is not initialized.");
+ return internal::solve_retval<SparseLLT<MatrixType, Cholmod>, Rhs>(*this, b.derived());
}
void compute(const MatrixType& matrix);
@@ -192,8 +193,8 @@ class SparseLLT<_MatrixType, Cholmod> : public SparseLLT<_MatrixType>
template<typename _MatrixType, typename Rhs>
- struct ei_solve_retval<SparseLLT<_MatrixType, Cholmod>, Rhs>
- : ei_solve_retval_base<SparseLLT<_MatrixType, Cholmod>, Rhs>
+ struct internal::solve_retval<SparseLLT<_MatrixType, Cholmod>, Rhs>
+ : internal::solve_retval_base<SparseLLT<_MatrixType, Cholmod>, Rhs>
{
typedef SparseLLT<_MatrixType, Cholmod> SpLLTDecType;
EIGEN_MAKE_SOLVE_HELPERS(SpLLTDecType,Rhs)
@@ -201,7 +202,7 @@ template<typename _MatrixType, typename Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
//Index size = dec().cholmodFactor()->n;
- ei_assert((Index)dec().cholmodFactor()->n==rhs().rows());
+ eigen_assert((Index)dec().cholmodFactor()->n==rhs().rows());
cholmod_factor* cholmodFactor = const_cast<cholmod_factor*>(dec().cholmodFactor());
cholmod_common* cholmodCommon = const_cast<cholmod_common*>(dec().cholmodCommon());
@@ -211,7 +212,7 @@ template<typename _MatrixType, typename Rhs>
// Base::solveInPlace(b);
// as long as our own triangular sparse solver is not fully optimal,
// let's use CHOLMOD's one:
- cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(rhs().const_cast_derived());
+ cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(rhs().const_cast_derived());
cholmod_dense* x = cholmod_solve(CHOLMOD_A, cholmodFactor, &cdb, cholmodCommon);
dst = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x), rhs().rows());
@@ -235,7 +236,7 @@ void SparseLLT<_MatrixType,Cholmod>::compute(const _MatrixType& a)
m_cholmodFactor = 0;
}
- cholmod_sparse A = ei_cholmod_map_eigen_to_sparse(const_cast<_MatrixType&>(a));
+ cholmod_sparse A = internal::cholmod_map_eigen_to_sparse(const_cast<_MatrixType&>(a));
// m_cholmod.supernodal = CHOLMOD_AUTO;
// TODO
// if (m_flags&IncompleteFactorization)
@@ -271,11 +272,11 @@ SparseLLT<_MatrixType,Cholmod>::matrixL() const
{
if (m_status & MatrixLIsDirty)
{
- ei_assert(!(m_status & SupernodalFactorIsDirty));
+ eigen_assert(!(m_status & SupernodalFactorIsDirty));
cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod);
const_cast<typename Base::CholMatrixType&>(m_matrix) =
- ei_map_cholmod_sparse_to_eigen<Scalar,ColMajor,Index>(*cmRes);
+ internal::map_cholmod_sparse_to_eigen<Scalar,ColMajor,Index>(*cmRes);
free(cmRes);
m_status = (m_status & ~MatrixLIsDirty);
@@ -291,7 +292,7 @@ template<typename Derived>
bool SparseLLT<_MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
{
//Index size = m_cholmodFactor->n;
- ei_assert((Index)m_cholmodFactor->n==b.rows());
+ eigen_assert((Index)m_cholmodFactor->n==b.rows());
// this uses Eigen's triangular sparse solver
// if (m_status & MatrixLIsDirty)
@@ -299,10 +300,10 @@ bool SparseLLT<_MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
// Base::solveInPlace(b);
// as long as our own triangular sparse solver is not fully optimal,
// let's use CHOLMOD's one:
- cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(b);
+ cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(b);
cholmod_dense* x = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &cdb, &m_cholmod);
- ei_assert(x && "Eigen: cholmod_solve failed.");
+ eigen_assert(x && "Eigen: cholmod_solve failed.");
b = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x),b.rows());
cholmod_free_dense(&x, &m_cholmod);
@@ -362,11 +363,11 @@ class SparseLDLT<_MatrixType,Cholmod> : public SparseLDLT<_MatrixType>
void solveInPlace(MatrixBase<Derived> &b) const;
template<typename Rhs>
- inline const ei_solve_retval<SparseLDLT<MatrixType, Cholmod>, Rhs>
+ inline const internal::solve_retval<SparseLDLT<MatrixType, Cholmod>, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
- ei_assert(true && "SparseLDLT is not initialized.");
- return ei_solve_retval<SparseLDLT<MatrixType, Cholmod>, Rhs>(*this, b.derived());
+ eigen_assert(true && "SparseLDLT is not initialized.");
+ return internal::solve_retval<SparseLDLT<MatrixType, Cholmod>, Rhs>(*this, b.derived());
}
void compute(const _MatrixType& matrix);
@@ -392,8 +393,8 @@ class SparseLDLT<_MatrixType,Cholmod> : public SparseLDLT<_MatrixType>
template<typename _MatrixType, typename Rhs>
- struct ei_solve_retval<SparseLDLT<_MatrixType, Cholmod>, Rhs>
- : ei_solve_retval_base<SparseLDLT<_MatrixType, Cholmod>, Rhs>
+ struct internal::solve_retval<SparseLDLT<_MatrixType, Cholmod>, Rhs>
+ : internal::solve_retval_base<SparseLDLT<_MatrixType, Cholmod>, Rhs>
{
typedef SparseLDLT<_MatrixType, Cholmod> SpLDLTDecType;
EIGEN_MAKE_SOLVE_HELPERS(SpLDLTDecType,Rhs)
@@ -401,7 +402,7 @@ template<typename _MatrixType, typename Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
//Index size = dec().cholmodFactor()->n;
- ei_assert((Index)dec().cholmodFactor()->n==rhs().rows());
+ eigen_assert((Index)dec().cholmodFactor()->n==rhs().rows());
cholmod_factor* cholmodFactor = const_cast<cholmod_factor*>(dec().cholmodFactor());
cholmod_common* cholmodCommon = const_cast<cholmod_common*>(dec().cholmodCommon());
@@ -411,7 +412,7 @@ template<typename _MatrixType, typename Rhs>
// Base::solveInPlace(b);
// as long as our own triangular sparse solver is not fully optimal,
// let's use CHOLMOD's one:
- cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(rhs().const_cast_derived());
+ cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(rhs().const_cast_derived());
cholmod_dense* x = cholmod_solve(CHOLMOD_LDLt, cholmodFactor, &cdb, cholmodCommon);
dst = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x), rhs().rows());
@@ -434,7 +435,7 @@ void SparseLDLT<_MatrixType,Cholmod>::compute(const _MatrixType& a)
m_cholmodFactor = 0;
}
- cholmod_sparse A = ei_cholmod_map_eigen_to_sparse(const_cast<_MatrixType&>(a));
+ cholmod_sparse A = internal::cholmod_map_eigen_to_sparse(const_cast<_MatrixType&>(a));
//m_cholmod.supernodal = CHOLMOD_AUTO;
m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
@@ -473,7 +474,7 @@ SparseLDLT<_MatrixType,Cholmod>::matrixL() const
{
if (m_status & MatrixLIsDirty)
{
- ei_assert(!(m_status & SupernodalFactorIsDirty));
+ eigen_assert(!(m_status & SupernodalFactorIsDirty));
cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod);
const_cast<typename Base::CholMatrixType&>(m_matrix) = MappedSparseMatrix<Scalar>(*cmRes);
@@ -494,7 +495,7 @@ template<typename Derived>
void SparseLDLT<_MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
{
//Index size = m_cholmodFactor->n;
- ei_assert((Index)m_cholmodFactor->n == b.rows());
+ eigen_assert((Index)m_cholmodFactor->n == b.rows());
// this uses Eigen's triangular sparse solver
// if (m_status & MatrixLIsDirty)
@@ -502,7 +503,7 @@ void SparseLDLT<_MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
// Base::solveInPlace(b);
// as long as our own triangular sparse solver is not fully optimal,
// let's use CHOLMOD's one:
- cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(b);
+ cholmod_dense cdb = internal::cholmod_map_eigen_to_dense(b);
cholmod_dense* x = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &cdb, &m_cholmod);
b = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x),b.rows());
cholmod_free_dense(&x, &m_cholmod);
diff --git a/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
index f81eb693d..4ea41af85 100644
--- a/unsupported/Eigen/src/SparseExtra/RandomSetter.h
+++ b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
@@ -305,8 +305,8 @@ class RandomSetter
/** \returns a reference to the coefficient at given coordinates \a row, \a col */
Scalar& operator() (Index row, Index col)
{
- ei_assert(((!IsUpper) || (row<=col)) && "Invalid access to an upper triangular matrix");
- ei_assert(((!IsLower) || (col<=row)) && "Invalid access to an upper triangular matrix");
+ eigen_assert(((!IsUpper) || (row<=col)) && "Invalid access to an upper triangular matrix");
+ eigen_assert(((!IsLower) || (col<=row)) && "Invalid access to an upper triangular matrix");
const Index outer = SetterRowMajor ? row : col;
const Index inner = SetterRowMajor ? col : row;
const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
diff --git a/unsupported/Eigen/src/SparseExtra/SparseLDLT.h b/unsupported/Eigen/src/SparseExtra/SparseLDLT.h
index a852f2b0f..837d70295 100644
--- a/unsupported/Eigen/src/SparseExtra/SparseLDLT.h
+++ b/unsupported/Eigen/src/SparseExtra/SparseLDLT.h
@@ -99,7 +99,7 @@ class SparseLDLT
SparseLDLT(int flags = 0)
: m_flags(flags), m_status(0)
{
- ei_assert((MatrixType::Flags&RowMajorBit)==0);
+ eigen_assert((MatrixType::Flags&RowMajorBit)==0);
m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
}
@@ -108,7 +108,7 @@ class SparseLDLT
SparseLDLT(const MatrixType& matrix, int flags = 0)
: m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
{
- ei_assert((MatrixType::Flags&RowMajorBit)==0);
+ eigen_assert((MatrixType::Flags&RowMajorBit)==0);
m_precision = RealScalar(0.1) * Eigen::NumTraits<RealScalar>::dummy_precision();
compute(matrix);
}
@@ -166,11 +166,11 @@ class SparseLDLT
bool solveInPlace(MatrixBase<Derived> &b) const;
template<typename Rhs>
- inline const ei_solve_retval<SparseLDLT<MatrixType>, Rhs>
+ inline const internal::solve_retval<SparseLDLT<MatrixType>, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
- ei_assert(true && "SparseLDLT is not initialized.");
- return ei_solve_retval<SparseLDLT<MatrixType>, Rhs>(*this, b.derived());
+ eigen_assert(true && "SparseLDLT is not initialized.");
+ return internal::solve_retval<SparseLDLT<MatrixType>, Rhs>(*this, b.derived());
}
inline Index cols() const { return m_matrix.cols(); }
@@ -193,13 +193,11 @@ class SparseLDLT
bool m_succeeded;
};
-
-
-
+namespace internal {
template<typename _MatrixType, typename Rhs>
-struct ei_solve_retval<SparseLDLT<_MatrixType>, Rhs>
- : ei_solve_retval_base<SparseLDLT<_MatrixType>, Rhs>
+struct solve_retval<SparseLDLT<_MatrixType>, Rhs>
+ : solve_retval_base<SparseLDLT<_MatrixType>, Rhs>
{
typedef SparseLDLT<_MatrixType> SpLDLTDecType;
EIGEN_MAKE_SOLVE_HELPERS(SpLDLTDecType,Rhs)
@@ -207,7 +205,7 @@ struct ei_solve_retval<SparseLDLT<_MatrixType>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
//Index size = dec().matrixL().rows();
- ei_assert(dec().matrixL().rows()==rhs().rows());
+ eigen_assert(dec().matrixL().rows()==rhs().rows());
Rhs b(rhs().rows(), rhs().cols());
b = rhs();
@@ -225,7 +223,7 @@ struct ei_solve_retval<SparseLDLT<_MatrixType>, Rhs>
};
-
+} // end namespace internal
/** Computes / recomputes the LDLT decomposition of matrix \a a
* using the default algorithm.
@@ -332,7 +330,7 @@ bool SparseLDLT<_MatrixType,Backend>::_numeric(const _MatrixType& a)
Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */
if (i <= k)
{
- y[i] += ei_conj(Ax[p]); /* scatter A(i,k) into Y (sum duplicates) */
+ y[i] += internal::conj(Ax[p]); /* scatter A(i,k) into Y (sum duplicates) */
Index len;
for (len = 0; tags[i] != k; i = m_parent[i])
{
@@ -355,9 +353,9 @@ bool SparseLDLT<_MatrixType,Backend>::_numeric(const _MatrixType& a)
Index p2 = Lp[i] + m_nonZerosPerCol[i];
Index p;
for (p = Lp[i]; p < p2; ++p)
- y[Li[p]] -= ei_conj(Lx[p]) * (yi);
+ y[Li[p]] -= internal::conj(Lx[p]) * (yi);
Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */
- m_diag[k] -= l_ki * ei_conj(yi);
+ m_diag[k] -= l_ki * internal::conj(yi);
Li[p] = k; /* store L(k,i) in column form of L */
Lx[p] = (l_ki);
++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
@@ -382,7 +380,7 @@ template<typename Derived>
bool SparseLDLT<_MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
{
//Index size = m_matrix.rows();
- ei_assert(m_matrix.rows()==b.rows());
+ eigen_assert(m_matrix.rows()==b.rows());
if (!m_succeeded)
return false;
diff --git a/unsupported/Eigen/src/SparseExtra/SparseLLT.h b/unsupported/Eigen/src/SparseExtra/SparseLLT.h
index 5be914b6a..ac042217b 100644
--- a/unsupported/Eigen/src/SparseExtra/SparseLLT.h
+++ b/unsupported/Eigen/src/SparseExtra/SparseLLT.h
@@ -112,11 +112,11 @@ class SparseLLT
bool solveInPlace(MatrixBase<Derived> &b) const;
template<typename Rhs>
- inline const ei_solve_retval<SparseLLT<MatrixType>, Rhs>
+ inline const internal::solve_retval<SparseLLT<MatrixType>, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
- ei_assert(true && "SparseLLT is not initialized.");
- return ei_solve_retval<SparseLLT<MatrixType>, Rhs>(*this, b.derived());
+ eigen_assert(true && "SparseLLT is not initialized.");
+ return internal::solve_retval<SparseLLT<MatrixType>, Rhs>(*this, b.derived());
}
inline Index cols() const { return m_matrix.cols(); }
@@ -134,13 +134,11 @@ class SparseLLT
};
-
-
-
+namespace internal {
template<typename _MatrixType, typename Rhs>
-struct ei_solve_retval<SparseLLT<_MatrixType>, Rhs>
- : ei_solve_retval_base<SparseLLT<_MatrixType>, Rhs>
+struct solve_retval<SparseLLT<_MatrixType>, Rhs>
+ : solve_retval_base<SparseLLT<_MatrixType>, Rhs>
{
typedef SparseLLT<_MatrixType> SpLLTDecType;
EIGEN_MAKE_SOLVE_HELPERS(SpLLTDecType,Rhs)
@@ -148,7 +146,7 @@ struct ei_solve_retval<SparseLLT<_MatrixType>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
const Index size = dec().matrixL().rows();
- ei_assert(size==rhs().rows());
+ eigen_assert(size==rhs().rows());
Rhs b(rhs().rows(), rhs().cols());
b = rhs();
@@ -162,7 +160,7 @@ struct ei_solve_retval<SparseLLT<_MatrixType>, Rhs>
};
-
+} // end namespace internal
/** Computes / recomputes the LLT decomposition of matrix \a a
@@ -184,7 +182,7 @@ void SparseLLT<_MatrixType,Backend>::compute(const _MatrixType& a)
m_matrix.reserve(a.nonZeros()*2);
for (Index j = 0; j < size; ++j)
{
- Scalar x = ei_real(a.coeff(j,j));
+ Scalar x = internal::real(a.coeff(j,j));
// TODO better estimate of the density !
tempVector.init(density>0.001? IsDense : IsSparse);
@@ -193,7 +191,7 @@ void SparseLLT<_MatrixType,Backend>::compute(const _MatrixType& a)
// init with current matrix a
{
typename _MatrixType::InnerIterator it(a,j);
- ei_assert(it.index()==j &&
+ eigen_assert(it.index()==j &&
"matrix must has non zero diagonal entries and only the lower triangular part must be stored");
++it; // skip diagonal element
for (; it; ++it)
@@ -207,7 +205,7 @@ void SparseLLT<_MatrixType,Backend>::compute(const _MatrixType& a)
if (it && it.index()==j)
{
Scalar y = it.value();
- x -= ei_abs2(y);
+ x -= internal::abs2(y);
++it; // skip j-th element, and process remaining column coefficients
tempVector.restart();
for (; it; ++it)
@@ -218,7 +216,7 @@ void SparseLLT<_MatrixType,Backend>::compute(const _MatrixType& a)
}
// copy the temporary vector to the respective m_matrix.col()
// while scaling the result by 1/real(x)
- RealScalar rx = ei_sqrt(ei_real(x));
+ RealScalar rx = internal::sqrt(internal::real(x));
m_matrix.insert(j,j) = rx; // FIXME use insertBack
Scalar y = Scalar(1)/rx;
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector, m_precision*rx); it; ++it)
@@ -236,7 +234,7 @@ template<typename Derived>
bool SparseLLT<_MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
{
const Index size = m_matrix.rows();
- ei_assert(size==b.rows());
+ eigen_assert(size==b.rows());
m_matrix.template triangularView<Lower>().solveInPlace(b);
m_matrix.adjoint().template triangularView<Upper>().solveInPlace(b);
diff --git a/unsupported/Eigen/src/SparseExtra/SparseLU.h b/unsupported/Eigen/src/SparseExtra/SparseLU.h
index f6ced52c9..3d10dbbee 100644
--- a/unsupported/Eigen/src/SparseExtra/SparseLU.h
+++ b/unsupported/Eigen/src/SparseExtra/SparseLU.h
@@ -103,7 +103,7 @@ class SparseLU
void setOrderingMethod(int m)
{
- ei_assert( (m&~OrderingMask) == 0 && m!=0 && "invalid ordering method");
+ eigen_assert( (m&~OrderingMask) == 0 && m!=0 && "invalid ordering method");
m_flags = m_flags&~OrderingMask | m&OrderingMask;
}
@@ -141,7 +141,7 @@ class SparseLU
template<typename _MatrixType, typename Backend>
void SparseLU<_MatrixType,Backend>::compute(const _MatrixType& )
{
- ei_assert(false && "not implemented yet");
+ eigen_assert(false && "not implemented yet");
}
/** Computes *x = U^-1 L^-1 b
@@ -156,7 +156,7 @@ template<typename _MatrixType, typename Backend>
template<typename BDerived, typename XDerived>
bool SparseLU<_MatrixType,Backend>::solve(const MatrixBase<BDerived> &, MatrixBase<XDerived>* , const int ) const
{
- ei_assert(false && "not implemented yet");
+ eigen_assert(false && "not implemented yet");
return false;
}
diff --git a/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h b/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h
index 9453aabce..31a0ee509 100644
--- a/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h
+++ b/unsupported/Eigen/src/SparseExtra/SuperLUSupport.h
@@ -126,7 +126,7 @@ struct SluMatrix : SuperMatrix
Store = &storage;
else
{
- ei_assert(false && "storage type not supported");
+ eigen_assert(false && "storage type not supported");
Store = 0;
}
}
@@ -134,17 +134,17 @@ struct SluMatrix : SuperMatrix
template<typename Scalar>
void setScalarType()
{
- if (ei_is_same_type<Scalar,float>::ret)
+ if (internal::is_same_type<Scalar,float>::ret)
Dtype = SLU_S;
- else if (ei_is_same_type<Scalar,double>::ret)
+ else if (internal::is_same_type<Scalar,double>::ret)
Dtype = SLU_D;
- else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ else if (internal::is_same_type<Scalar,std::complex<float> >::ret)
Dtype = SLU_C;
- else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ else if (internal::is_same_type<Scalar,std::complex<double> >::ret)
Dtype = SLU_Z;
else
{
- ei_assert(false && "Scalar type not supported by SuperLU");
+ eigen_assert(false && "Scalar type not supported by SuperLU");
}
}
@@ -152,7 +152,7 @@ struct SluMatrix : SuperMatrix
static SluMatrix Map(Matrix<Scalar,Rows,Cols,Options,MRows,MCols>& mat)
{
typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
- ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
SluMatrix res;
res.setStorageType(SLU_DN);
res.setScalarType<Scalar>();
@@ -198,7 +198,7 @@ struct SluMatrix : SuperMatrix
if (MatrixType::Flags & Lower)
res.Mtype = SLU_TRL;
if (MatrixType::Flags & SelfAdjoint)
- ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
+ eigen_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
return res;
}
};
@@ -209,7 +209,7 @@ struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
static void run(MatrixType& mat, SluMatrix& res)
{
- ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
res.setStorageType(SLU_DN);
res.setScalarType<Scalar>();
res.Mtype = SLU_GE;
@@ -256,21 +256,23 @@ struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
if (MatrixType::Flags & Lower)
res.Mtype = SLU_TRL;
if (MatrixType::Flags & SelfAdjoint)
- ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
+ eigen_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
}
};
+namespace internal {
+
template<typename MatrixType>
-SluMatrix ei_asSluMatrix(MatrixType& mat)
+SluMatrix asSluMatrix(MatrixType& mat)
{
return SluMatrix::Map(mat);
}
/** View a Super LU matrix as an Eigen expression */
template<typename Scalar, int Flags, typename Index>
-MappedSparseMatrix<Scalar,Flags,Index> ei_map_superlu(SluMatrix& sluMat)
+MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
{
- ei_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
+ eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
|| (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
@@ -280,6 +282,8 @@ MappedSparseMatrix<Scalar,Flags,Index> ei_map_superlu(SluMatrix& sluMat)
sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
}
+} // end namespace internal
+
template<typename MatrixType>
class SparseLU<MatrixType,SuperLU> : public SparseLU<MatrixType>
{
@@ -393,7 +397,7 @@ void SparseLU<MatrixType,SuperLU>::compute(const MatrixType& a)
m_sluOptions.ColPerm = NATURAL;
};
- m_sluA = ei_asSluMatrix(m_matrix);
+ m_sluA = internal::asSluMatrix(m_matrix);
memset(&m_sluL,0,sizeof m_sluL);
memset(&m_sluU,0,sizeof m_sluU);
//m_sluEqued = 'B';
@@ -471,7 +475,7 @@ bool SparseLU<MatrixType,SuperLU>::solve(const MatrixBase<BDerived> &b,
{
const int size = m_matrix.rows();
const int rhsCols = b.cols();
- ei_assert(size==b.rows());
+ eigen_assert(size==b.rows());
switch (transposed) {
case SvNoTrans : m_sluOptions.Trans = NOTRANS; break;
@@ -637,7 +641,7 @@ typename SparseLU<MatrixType,SuperLU>::Scalar SparseLU<MatrixType,SuperLU>::dete
if (m_u._outerIndexPtr()[j+1]-m_u._outerIndexPtr()[j] > 0)
{
int lastId = m_u._outerIndexPtr()[j+1]-1;
- ei_assert(m_u._innerIndexPtr()[lastId]<=j);
+ eigen_assert(m_u._innerIndexPtr()[lastId]<=j);
if (m_u._innerIndexPtr()[lastId]==j)
{
det *= m_u._valuePtr()[lastId];
diff --git a/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h b/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
index 9d7e3e96e..4be1aca62 100644
--- a/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
+++ b/unsupported/Eigen/src/SparseExtra/UmfPackSupport.h
@@ -183,11 +183,11 @@ class SparseLU<_MatrixType,UmfPack> : public SparseLU<_MatrixType>
bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
template<typename Rhs>
- inline const ei_solve_retval<SparseLU<MatrixType, UmfPack>, Rhs>
+ inline const internal::solve_retval<SparseLU<MatrixType, UmfPack>, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
- ei_assert(true && "SparseLU is not initialized.");
- return ei_solve_retval<SparseLU<MatrixType, UmfPack>, Rhs>(*this, b.derived());
+ eigen_assert(true && "SparseLU is not initialized.");
+ return internal::solve_retval<SparseLU<MatrixType, UmfPack>, Rhs>(*this, b.derived());
}
void compute(const MatrixType& matrix);
@@ -197,7 +197,7 @@ class SparseLU<_MatrixType,UmfPack> : public SparseLU<_MatrixType>
inline const MatrixType& matrixLU() const
{
- //ei_assert(m_isInitialized && "LU is not initialized.");
+ //eigen_assert(m_isInitialized && "LU is not initialized.");
return *m_matrixRef;
}
@@ -221,10 +221,11 @@ class SparseLU<_MatrixType,UmfPack> : public SparseLU<_MatrixType>
mutable bool m_extractedDataAreDirty;
};
+namespace internal {
template<typename _MatrixType, typename Rhs>
- struct ei_solve_retval<SparseLU<_MatrixType, UmfPack>, Rhs>
- : ei_solve_retval_base<SparseLU<_MatrixType, UmfPack>, Rhs>
+ struct solve_retval<SparseLU<_MatrixType, UmfPack>, Rhs>
+ : solve_retval_base<SparseLU<_MatrixType, UmfPack>, Rhs>
{
typedef SparseLU<_MatrixType, UmfPack> SpLUDecType;
EIGEN_MAKE_SOLVE_HELPERS(SpLUDecType,Rhs)
@@ -233,8 +234,8 @@ template<typename _MatrixType, typename Rhs>
{
const int rhsCols = rhs().cols();
- ei_assert((Rhs::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet");
- ei_assert((Dest::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet");
+ eigen_assert((Rhs::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet");
+ eigen_assert((Dest::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet");
void* numeric = const_cast<void*>(dec().numeric());
@@ -244,13 +245,13 @@ template<typename _MatrixType, typename Rhs>
errorCode = umfpack_solve(UMFPACK_A,
dec().matrixLU()._outerIndexPtr(), dec().matrixLU()._innerIndexPtr(), dec().matrixLU()._valuePtr(),
&dst.col(j).coeffRef(0), &rhs().const_cast_derived().col(j).coeffRef(0), numeric, 0, 0);
- ei_assert(!errorCode && "UmfPack could not solve the system.");
+ eigen_assert(!errorCode && "UmfPack could not solve the system.");
}
}
};
-
+} // end namespace internal
template<typename MatrixType>
void SparseLU<MatrixType,UmfPack>::compute(const MatrixType& a)
@@ -258,7 +259,7 @@ void SparseLU<MatrixType,UmfPack>::compute(const MatrixType& a)
typedef typename MatrixType::Index Index;
const Index rows = a.rows();
const Index cols = a.cols();
- ei_assert((MatrixType::Flags&RowMajorBit)==0 && "Row major matrices are not supported yet");
+ eigen_assert((MatrixType::Flags&RowMajorBit)==0 && "Row major matrices are not supported yet");
m_matrixRef = &a;
@@ -322,9 +323,9 @@ bool SparseLU<MatrixType,UmfPack>::solve(const MatrixBase<BDerived> &b, MatrixBa
{
//const int size = m_matrix.rows();
const int rhsCols = b.cols();
-// ei_assert(size==b.rows());
- ei_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet");
- ei_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet");
+// eigen_assert(size==b.rows());
+ eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet");
+ eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet");
int errorCode;
for (int j=0; j<rhsCols; ++j)