aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-02-18 18:51:24 -0800
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-02-18 18:51:24 -0800
commitcbd7e98174127a52d4d76bec4394316548bb31e5 (patch)
treecc14b314fc0b32591eeb88845d259591b1b37129 /Eigen/src
parent7ed9441ea472031bb9357b5bec80151cae7ed2cb (diff)
parent37a1d736bfd6fdaa0e1a93aee585d1861ab4004a (diff)
Merged the latest version of the code from eigen/eigen
Diffstat (limited to 'Eigen/src')
-rw-r--r--Eigen/src/Cholesky/LDLT.h52
-rw-r--r--Eigen/src/Core/EigenBase.h30
-rw-r--r--Eigen/src/Core/MatrixBase.h45
-rw-r--r--Eigen/src/Core/StableNorm.h27
-rw-r--r--Eigen/src/Core/util/Memory.h18
-rw-r--r--Eigen/src/Geometry/Transform.h14
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h34
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h136
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h4
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h10
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h4
-rw-r--r--Eigen/src/SparseCore/SparseProduct.h5
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h13
-rw-r--r--Eigen/src/SparseLU/SparseLU_Memory.h4
14 files changed, 269 insertions, 127 deletions
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 4faedd257..f34d26465 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -16,7 +16,10 @@
namespace Eigen {
namespace internal {
-template<typename MatrixType, int UpLo> struct LDLT_Traits;
+ template<typename MatrixType, int UpLo> struct LDLT_Traits;
+
+ // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
+ enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };
}
/** \ingroup Cholesky_Module
@@ -69,7 +72,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
* The default constructor is useful in cases in which the user intends to
* perform decompositions via LDLT::compute(const MatrixType&).
*/
- LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
+ LDLT()
+ : m_matrix(),
+ m_transpositions(),
+ m_sign(internal::ZeroSign),
+ m_isInitialized(false)
+ {}
/** \brief Default Constructor with memory preallocation
*
@@ -81,6 +89,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(size, size),
m_transpositions(size),
m_temporary(size),
+ m_sign(internal::ZeroSign),
m_isInitialized(false)
{}
@@ -93,6 +102,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(matrix.rows(), matrix.cols()),
m_transpositions(matrix.rows()),
m_temporary(matrix.rows()),
+ m_sign(internal::ZeroSign),
m_isInitialized(false)
{
compute(matrix);
@@ -139,7 +149,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isPositive() const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
- return m_sign == 1;
+ return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
}
#ifdef EIGEN2_SUPPORT
@@ -153,7 +163,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isNegative(void) const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
- return m_sign == -1;
+ return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
}
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
@@ -235,7 +245,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
MatrixType m_matrix;
TranspositionType m_transpositions;
TmpMatrixType m_temporary;
- int m_sign;
+ internal::SignMatrix m_sign;
bool m_isInitialized;
};
@@ -246,7 +256,7 @@ template<int UpLo> struct ldlt_inplace;
template<> struct ldlt_inplace<Lower>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
- static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+ static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
using std::abs;
typedef typename MatrixType::Scalar Scalar;
@@ -258,8 +268,9 @@ template<> struct ldlt_inplace<Lower>
if (size <= 1)
{
transpositions.setIdentity();
- if(sign)
- *sign = numext::real(mat.coeff(0,0))>0 ? 1:-1;
+ if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef;
+ else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef;
+ else sign = ZeroSign;
return true;
}
@@ -284,7 +295,6 @@ template<> struct ldlt_inplace<Lower>
if(biggest_in_corner < cutoff)
{
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
- if(sign) *sign = 0;
break;
}
@@ -325,15 +335,15 @@ template<> struct ldlt_inplace<Lower>
}
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
A21 /= mat.coeffRef(k,k);
-
- if(sign)
- {
- // LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
- int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
- if(k == 0)
- *sign = newSign;
- else if(*sign != newSign)
- *sign = 0;
+
+ RealScalar realAkk = numext::real(mat.coeffRef(k,k));
+ if (sign == PositiveSemiDef) {
+ if (realAkk < 0) sign = Indefinite;
+ } else if (sign == NegativeSemiDef) {
+ if (realAkk > 0) sign = Indefinite;
+ } else if (sign == ZeroSign) {
+ if (realAkk > 0) sign = PositiveSemiDef;
+ else if (realAkk < 0) sign = NegativeSemiDef;
}
}
@@ -399,7 +409,7 @@ template<> struct ldlt_inplace<Lower>
template<> struct ldlt_inplace<Upper>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
- static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+ static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
Transpose<MatrixType> matt(mat);
return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
@@ -445,7 +455,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
m_isInitialized = false;
m_temporary.resize(size);
- internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
+ internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
m_isInitialized = true;
return *this;
@@ -473,7 +483,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
for (Index i = 0; i < size; i++)
m_transpositions.coeffRef(i) = i;
m_temporary.resize(size);
- m_sign = sigma>=0 ? 1 : -1;
+ m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
m_isInitialized = true;
}
diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h
index a25e823ab..1a577c2dc 100644
--- a/Eigen/src/Core/EigenBase.h
+++ b/Eigen/src/Core/EigenBase.h
@@ -141,36 +141,6 @@ Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
return derived();
}
-/** replaces \c *this by \c *this * \a other.
- *
- * \returns a reference to \c *this
- */
-template<typename Derived>
-template<typename OtherDerived>
-inline Derived&
-MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
-{
- other.derived().applyThisOnTheRight(derived());
- return derived();
-}
-
-/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
- */
-template<typename Derived>
-template<typename OtherDerived>
-inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
-{
- other.derived().applyThisOnTheRight(derived());
-}
-
-/** replaces \c *this by \c *this * \a other. */
-template<typename Derived>
-template<typename OtherDerived>
-inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
-{
- other.derived().applyThisOnTheLeft(derived());
-}
-
} // end namespace Eigen
#endif // EIGEN_EIGENBASE_H
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 61798317e..49b69f873 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -564,6 +564,51 @@ template<typename Derived> class MatrixBase
{EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
};
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** replaces \c *this by \c *this * \a other.
+ *
+ * \returns a reference to \c *this
+ *
+ * Example: \include MatrixBase_applyOnTheRight.cpp
+ * Output: \verbinclude MatrixBase_applyOnTheRight.out
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived&
+MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheRight(derived());
+ return derived();
+}
+
+/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
+ *
+ * Example: \include MatrixBase_applyOnTheRight.cpp
+ * Output: \verbinclude MatrixBase_applyOnTheRight.out
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheRight(derived());
+}
+
+/** replaces \c *this by \a other * \c *this.
+ *
+ * Example: \include MatrixBase_applyOnTheLeft.cpp
+ * Output: \verbinclude MatrixBase_applyOnTheLeft.out
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheLeft(derived());
+}
+
} // end namespace Eigen
#endif // EIGEN_MATRIXBASE_H
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
index c219e2f53..525620bad 100644
--- a/Eigen/src/Core/StableNorm.h
+++ b/Eigen/src/Core/StableNorm.h
@@ -17,16 +17,29 @@ namespace internal {
template<typename ExpressionType, typename Scalar>
inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
{
- Scalar max = bl.cwiseAbs().maxCoeff();
- if (max>scale)
+ using std::max;
+ Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
+
+ if (maxCoeff>scale)
{
- ssq = ssq * numext::abs2(scale/max);
- scale = max;
- invScale = Scalar(1)/scale;
+ ssq = ssq * numext::abs2(scale/maxCoeff);
+ Scalar tmp = Scalar(1)/maxCoeff;
+ if(tmp > NumTraits<Scalar>::highest())
+ {
+ invScale = NumTraits<Scalar>::highest();
+ scale = Scalar(1)/invScale;
+ }
+ else
+ {
+ scale = maxCoeff;
+ invScale = tmp;
+ }
}
- // TODO if the max is much much smaller than the current scale,
+
+ // TODO if the maxCoeff is much much smaller than the current scale,
// then we can neglect this sub vector
- ssq += (bl*invScale).squaredNorm();
+ if(scale>Scalar(0)) // if scale==0, then bl is 0
+ ssq += (bl*invScale).squaredNorm();
}
template<typename Derived>
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index 2f2398bbf..88cf1c30d 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -608,7 +608,7 @@ template<typename T> class aligned_stack_memory_handler
*/
#ifdef EIGEN_ALLOCA
// The native alloca() that comes with llvm aligns buffer on 16 bytes even when AVX is enabled.
- #if defined(__arm__) || EIGEN_ALIGN_BYTES > 16
+#if defined(__arm__) || defined(_WIN32) || EIGEN_ALIGN_BYTES > 16
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+EIGEN_ALIGN_BYTES)) & ~(size_t(EIGEN_ALIGN_BYTES-1))) + EIGEN_ALIGN_BYTES)
#else
#define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
@@ -761,11 +761,27 @@ public:
::new( p ) T( value );
}
+#if (__cplusplus >= 201103L)
+ template <typename U, typename... Args>
+ void construct( U* u, Args&&... args)
+ {
+ ::new( static_cast<void*>(u) ) U( std::forward<Args>( args )... );
+ }
+#endif
+
void destroy( pointer p )
{
p->~T();
}
+#if (__cplusplus >= 201103L)
+ template <typename U>
+ void destroy( U* u )
+ {
+ u->~U();
+ }
+#endif
+
void deallocate( pointer p, size_type /*num*/ )
{
internal::aligned_free( p );
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
index 419f90148..0a5012cfe 100644
--- a/Eigen/src/Geometry/Transform.h
+++ b/Eigen/src/Geometry/Transform.h
@@ -530,9 +530,9 @@ public:
inline Transform& operator=(const UniformScaling<Scalar>& t);
inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
- inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry)> operator*(const UniformScaling<Scalar>& s) const
+ inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator*(const UniformScaling<Scalar>& s) const
{
- Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry),Options> res = *this;
+ Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode),Options> res = *this;
res.scale(s.factor());
return res;
}
@@ -699,9 +699,13 @@ template<typename Scalar, int Dim, int Mode,int Options>
Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)
{
EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
- m_matrix << other.m11(), other.m21(), other.dx(),
- other.m12(), other.m22(), other.dy(),
- 0, 0, 1;
+ if (Mode == int(AffineCompact))
+ m_matrix << other.m11(), other.m21(), other.dx(),
+ other.m12(), other.m22(), other.dy();
+ else
+ m_matrix << other.m11(), other.m21(), other.dx(),
+ other.m12(), other.m22(), other.dy(),
+ 0, 0, 1;
return *this;
}
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 4b13f08d4..5c320e2d2 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -66,9 +66,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
}
// unordered insertion
- for(int k=0; k<nnz; ++k)
+ for(Index k=0; k<nnz; ++k)
{
- int i = indices[k];
+ Index i = indices[k];
res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
@@ -76,8 +76,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
#if 0
// alternative ordered insertion code:
- int t200 = rows/(log2(200)*1.39);
- int t = (rows*100)/139;
+ Index t200 = rows/(log2(200)*1.39);
+ Index t = (rows*100)/139;
// FIXME reserve nnz non zeros
// FIXME implement fast sort algorithms for very small nnz
@@ -90,9 +90,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
if(true)
{
if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
- for(int k=0; k<nnz; ++k)
+ for(Index k=0; k<nnz; ++k)
{
- int i = indices[k];
+ Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
mask[i] = false;
}
@@ -100,7 +100,7 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
else
{
// dense path
- for(int i=0; i<rows; ++i)
+ for(Index i=0; i<rows; ++i)
{
if(mask[i])
{
@@ -134,8 +134,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
// sort the non zeros:
@@ -149,7 +149,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix rhsRow = rhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
@@ -162,7 +162,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix lhsRow = lhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
@@ -175,7 +175,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
res = resRow;
@@ -190,7 +190,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
res = resCol;
@@ -202,7 +202,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix lhsCol = lhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
@@ -215,7 +215,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix rhsCol = rhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
@@ -228,8 +228,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
// sort the non zeros:
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index d0436aa7c..6f615e250 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -335,6 +335,14 @@ const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::inner
}
+namespace internal {
+
+template< typename XprType, int BlockRows, int BlockCols, bool InnerPanel,
+ bool OuterVector = (BlockCols==1 && XprType::IsRowMajor) || (BlockRows==1 && !XprType::IsRowMajor)>
+class GenericSparseBlockInnerIteratorImpl;
+
+}
+
/** Generic implementation of sparse Block expression.
* Real-only.
*/
@@ -342,11 +350,12 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
{
- typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+
+ typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
/** Column or Row constructor
*/
@@ -354,8 +363,8 @@ public:
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- m_blockRows(xpr.rows()),
- m_blockCols(xpr.cols())
+ m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+ m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
@@ -394,29 +403,8 @@ public:
inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
- class InnerIterator : public _MatrixTypeNested::InnerIterator
- {
- typedef typename _MatrixTypeNested::InnerIterator Base;
- const BlockType& m_block;
- Index m_end;
- public:
-
- EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer)
- : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
- m_block(block),
- m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
- {
- while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
- Base::operator++();
- }
-
- inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
- inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
- inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
- inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
-
- inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
- };
+ typedef internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel> InnerIterator;
+
class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator
{
typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
@@ -441,7 +429,7 @@ public:
inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; }
};
protected:
- friend class InnerIterator;
+ friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
friend class ReverseInnerIterator;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
@@ -454,6 +442,100 @@ public:
};
+namespace internal {
+ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+ class GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel,false> : public Block<XprType, BlockRows, BlockCols, InnerPanel>::_MatrixTypeNested::InnerIterator
+ {
+ typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+ enum {
+ IsRowMajor = BlockType::IsRowMajor
+ };
+ typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename BlockType::Index Index;
+ typedef typename _MatrixTypeNested::InnerIterator Base;
+ const BlockType& m_block;
+ Index m_end;
+ public:
+
+ EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer)
+ : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
+ m_block(block),
+ m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
+ {
+ while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
+ Base::operator++();
+ }
+
+ inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+ inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
+ inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
+ inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
+
+ inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
+ };
+
+ // Row vector of a column-major sparse matrix or column of a row-major one.
+ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+ class GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel,true>
+ {
+ typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+ enum {
+ IsRowMajor = BlockType::IsRowMajor
+ };
+ typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename BlockType::Index Index;
+ typedef typename BlockType::Scalar Scalar;
+ const BlockType& m_block;
+ Index m_outerPos;
+ Index m_innerIndex;
+ Scalar m_value;
+ Index m_end;
+ public:
+
+ EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0)
+ :
+ m_block(block),
+ m_outerPos( (IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) - 1), // -1 so that operator++ finds the first non-zero entry
+ m_innerIndex(IsRowMajor ? block.m_startRow.value() : block.m_startCol.value()),
+ m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+
+ ++(*this);
+ }
+
+ inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+ inline Index outer() const { return 0; }
+ inline Index row() const { return IsRowMajor ? 0 : index(); }
+ inline Index col() const { return IsRowMajor ? index() : 0; }
+
+ inline Scalar value() const { return m_value; }
+
+ inline GenericSparseBlockInnerIteratorImpl& operator++()
+ {
+ // search next non-zero entry
+ while(m_outerPos<m_end)
+ {
+ m_outerPos++;
+ typename XprType::InnerIterator it(m_block.m_matrix, m_outerPos);
+ // search for the key m_innerIndex in the current outer-vector
+ while(it && it.index() < m_innerIndex) ++it;
+ if(it && it.index()==m_innerIndex)
+ {
+ m_value = it.value();
+ break;
+ }
+ }
+ return *this;
+ }
+
+ inline operator bool() const { return m_outerPos < m_end; }
+ };
+
+} // end namespace internal
+
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index 6132a4880..610833f3b 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -125,7 +125,7 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
inline Scalar value() const { return Base::value() * m_factor; }
protected:
- int m_outer;
+ Index m_outer;
Scalar m_factor;
};
@@ -156,7 +156,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
{
for(Index c=0; c<rhs.cols(); ++c)
{
- int n = lhs.outerSize();
+ Index n = lhs.outerSize();
for(Index j=0; j<n; ++j)
{
typename Res::Scalar tmp(0);
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index f2e234eee..0e7e760fb 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -402,7 +402,7 @@ class SparseMatrix
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
- eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
@@ -480,7 +480,7 @@ class SparseMatrix
if(m_innerNonZeros != 0)
return;
m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
- for (int i = 0; i < m_outerSize; i++)
+ for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
@@ -752,8 +752,8 @@ class SparseMatrix
else
for (Index i=0; i<m.outerSize(); ++i)
{
- int p = m.m_outerIndex[i];
- int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+ Index p = m.m_outerIndex[i];
+ Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k)
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
@@ -1022,7 +1022,7 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
wi.fill(-1);
Index count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
- for(int j=0; j<outerSize(); ++j)
+ for(Index j=0; j<outerSize(); ++j)
{
Index start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 706f699b8..bbcf7fb1c 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -302,8 +302,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
}
else
{
- SparseMatrix<Scalar, RowMajorBit> trans = m;
- s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
+ SparseMatrix<Scalar, RowMajorBit, Index> trans = m;
+ s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans);
}
}
return s;
diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
index 70b6480ef..cf7663070 100644
--- a/Eigen/src/SparseCore/SparseProduct.h
+++ b/Eigen/src/SparseCore/SparseProduct.h
@@ -16,6 +16,7 @@ template<typename Lhs, typename Rhs>
struct SparseSparseProductReturnType
{
typedef typename internal::traits<Lhs>::Scalar Scalar;
+ typedef typename internal::traits<Lhs>::Index Index;
enum {
LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
@@ -24,11 +25,11 @@ struct SparseSparseProductReturnType
};
typedef typename internal::conditional<TransposeLhs,
- SparseMatrix<Scalar,0>,
+ SparseMatrix<Scalar,0,Index>,
typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
typedef typename internal::conditional<TransposeRhs,
- SparseMatrix<Scalar,0>,
+ SparseMatrix<Scalar,0,Index>,
typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
typedef SparseSparseProduct<LhsNested, RhsNested> Type;
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 70857c7b6..fcc18f5c9 100644
--- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
- //int size = lhs.outerSize();
+ //Index size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
@@ -100,7 +100,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
- typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
@@ -126,10 +126,11 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
- ColMajorMatrix colLhs(lhs);
- ColMajorMatrix colRhs(rhs);
- internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
+ ColMajorMatrixLhs colLhs(lhs);
+ ColMajorMatrixRhs colRhs(rhs);
+ internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
// let's transpose the product to get a column x column product
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h
index d068d62df..1ffa7d54e 100644
--- a/Eigen/src/SparseLU/SparseLU_Memory.h
+++ b/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -70,7 +70,7 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
if(num_expansions == 0 || keep_prev)
new_len = length ; // First time allocate requested
else
- new_len = Index(alpha * length);
+ new_len = (std::max)(length+1,Index(alpha * length));
VectorType old_vec; // Temporary vector to hold the previous values
if (nbElts > 0 )
@@ -107,7 +107,7 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
do
{
alpha = (alpha + 1)/2;
- new_len = Index(alpha * length);
+ new_len = (std::max)(length+1,Index(alpha * length));
#ifdef EIGEN_EXCEPTIONS
try
#endif