From e8cdbedefb1913b5a0e2f2b7d38470f081cb8d29 Mon Sep 17 00:00:00 2001 From: Christoph Hertzberg Date: Thu, 4 Dec 2014 22:48:53 +0100 Subject: bug #877, bug #572: Introduce a global Index typedef. Rename Sparse*::Index to StorageIndex, make Dense*::StorageIndex an alias to DenseIndex. Overall this commit gets rid of all Index conversion warnings. --- Eigen/src/Cholesky/LDLT.h | 1 + Eigen/src/Cholesky/LLT.h | 1 + Eigen/src/CholmodSupport/CholmodSupport.h | 24 +-- Eigen/src/Core/ArrayBase.h | 1 - Eigen/src/Core/AssignEvaluator.h | 52 +++--- Eigen/src/Core/BandMatrix.h | 12 +- Eigen/src/Core/Block.h | 14 +- Eigen/src/Core/CommaInitializer.h | 2 +- Eigen/src/Core/CoreEvaluators.h | 30 ++-- Eigen/src/Core/CoreIterators.h | 18 +- Eigen/src/Core/CwiseBinaryOp.h | 8 +- Eigen/src/Core/CwiseUnaryOp.h | 4 +- Eigen/src/Core/DenseBase.h | 14 +- Eigen/src/Core/DenseCoeffsBase.h | 4 - Eigen/src/Core/Diagonal.h | 24 +-- Eigen/src/Core/DiagonalMatrix.h | 8 +- Eigen/src/Core/EigenBase.h | 8 +- Eigen/src/Core/MapBase.h | 1 - Eigen/src/Core/Matrix.h | 2 +- Eigen/src/Core/MatrixBase.h | 3 +- Eigen/src/Core/PermutationMatrix.h | 16 +- Eigen/src/Core/PlainObjectBase.h | 3 - Eigen/src/Core/Product.h | 10 +- Eigen/src/Core/ProductEvaluators.h | 22 ++- Eigen/src/Core/ReturnByValue.h | 4 +- Eigen/src/Core/SelfAdjointView.h | 4 +- Eigen/src/Core/Solve.h | 7 +- Eigen/src/Core/StableNorm.h | 1 - Eigen/src/Core/Swap.h | 2 +- Eigen/src/Core/Transpose.h | 10 +- Eigen/src/Core/TriangularMatrix.h | 18 +- Eigen/src/Core/util/Macros.h | 22 +-- Eigen/src/Core/util/XprHelper.h | 17 ++ Eigen/src/Geometry/Transform.h | 3 +- Eigen/src/Householder/HouseholderSequence.h | 8 +- .../IterativeLinearSolvers/BasicPreconditioners.h | 3 +- Eigen/src/IterativeLinearSolvers/BiCGSTAB.h | 1 - .../src/IterativeLinearSolvers/ConjugateGradient.h | 1 - Eigen/src/IterativeLinearSolvers/IncompleteLUT.h | 64 +++---- .../IterativeLinearSolvers/IterativeSolverBase.h | 6 +- Eigen/src/LU/FullPivLU.h | 1 + Eigen/src/LU/PartialPivLU.h | 1 + Eigen/src/OrderingMethods/Amd.h | 15 +- Eigen/src/PaStiXSupport/PaStiXSupport.h | 1 + Eigen/src/PardisoSupport/PardisoSupport.h | 2 +- Eigen/src/QR/ColPivHouseholderQR.h | 3 +- Eigen/src/QR/FullPivHouseholderQR.h | 1 + Eigen/src/QR/HouseholderQR.h | 1 + Eigen/src/SPQRSupport/SuiteSparseQRSupport.h | 20 +-- Eigen/src/SVD/SVDBase.h | 1 + Eigen/src/SparseCholesky/SimplicialCholesky.h | 36 ++-- Eigen/src/SparseCholesky/SimplicialCholesky_impl.h | 30 ++-- Eigen/src/SparseCore/AmbiVector.h | 92 +++++----- Eigen/src/SparseCore/CompressedStorage.h | 26 +-- .../SparseCore/ConservativeSparseSparseProduct.h | 21 ++- Eigen/src/SparseCore/MappedSparseMatrix.h | 84 ++++----- Eigen/src/SparseCore/SparseAssign.h | 3 - Eigen/src/SparseCore/SparseBlock.h | 173 +++++++++---------- Eigen/src/SparseCore/SparseColEtree.h | 10 +- Eigen/src/SparseCore/SparseCwiseBinaryOp.h | 36 ++-- Eigen/src/SparseCore/SparseCwiseUnaryOp.h | 4 +- Eigen/src/SparseCore/SparseDenseProduct.h | 16 +- Eigen/src/SparseCore/SparseDiagonalProduct.h | 16 +- Eigen/src/SparseCore/SparseMatrix.h | 188 ++++++++++----------- Eigen/src/SparseCore/SparseMatrixBase.h | 28 +-- Eigen/src/SparseCore/SparsePermutation.h | 18 +- Eigen/src/SparseCore/SparseSelfAdjointView.h | 75 ++++---- .../SparseCore/SparseSparseProductWithPruning.h | 26 +-- Eigen/src/SparseCore/SparseTranspose.h | 16 +- Eigen/src/SparseCore/SparseTriangularView.h | 22 +-- Eigen/src/SparseCore/SparseUtil.h | 41 ++--- Eigen/src/SparseCore/SparseVector.h | 80 ++++----- Eigen/src/SparseCore/SparseView.h | 26 +-- Eigen/src/SparseCore/TriangularSolver.h | 16 +- Eigen/src/SparseLU/SparseLU.h | 55 +++--- Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h | 66 ++++---- Eigen/src/SparseQR/SparseQR.h | 46 ++--- Eigen/src/SuperLUSupport/SuperLUSupport.h | 18 +- Eigen/src/UmfPackSupport/UmfPackSupport.h | 10 +- 79 files changed, 891 insertions(+), 886 deletions(-) (limited to 'Eigen') diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 5acbf4651..e2a6ca2b2 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -60,6 +60,7 @@ template class LDLT typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix TmpMatrixType; typedef Transpositions TranspositionType; diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index 90194e64d..5e0cf6c88 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -60,6 +60,7 @@ template class LLT typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; enum { PacketSize = internal::packet_traits::size, diff --git a/Eigen/src/CholmodSupport/CholmodSupport.h b/Eigen/src/CholmodSupport/CholmodSupport.h index 3eadb83a0..3ce3e99d3 100644 --- a/Eigen/src/CholmodSupport/CholmodSupport.h +++ b/Eigen/src/CholmodSupport/CholmodSupport.h @@ -48,8 +48,8 @@ void cholmod_configure_matrix(CholmodType& mat) /** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. * Note that the data are shared. */ -template -cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) +template +cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_StorageIndex>& mat) { cholmod_sparse res; res.nzmax = mat.nonZeros(); @@ -74,11 +74,11 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) res.dtype = 0; res.stype = -1; - if (internal::is_same<_Index,int>::value) + if (internal::is_same<_StorageIndex,int>::value) { res.itype = CHOLMOD_INT; } - else if (internal::is_same<_Index,UF_long>::value) + else if (internal::is_same<_StorageIndex,UF_long>::value) { res.itype = CHOLMOD_LONG; } @@ -138,12 +138,12 @@ cholmod_dense viewAsCholmod(MatrixBase& mat) /** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. * The data are not copied but shared. */ -template -MappedSparseMatrix viewAsEigen(cholmod_sparse& cm) +template +MappedSparseMatrix viewAsEigen(cholmod_sparse& cm) { - return MappedSparseMatrix - (cm.nrow, cm.ncol, static_cast(cm.p)[cm.ncol], - static_cast(cm.p), static_cast(cm.i),static_cast(cm.x) ); + return MappedSparseMatrix + (cm.nrow, cm.ncol, static_cast(cm.p)[cm.ncol], + static_cast(cm.p), static_cast(cm.i),static_cast(cm.x) ); } enum CholmodMode { @@ -169,7 +169,7 @@ class CholmodBase : public SparseSolverBase typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef MatrixType CholMatrixType; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; public: @@ -195,8 +195,8 @@ class CholmodBase : public SparseSolverBase cholmod_finish(&m_cholmod); } - inline Index cols() const { return m_cholmodFactor->n; } - inline Index rows() const { return m_cholmodFactor->n; } + inline StorageIndex cols() const { return internal::convert_index(m_cholmodFactor->n); } + inline StorageIndex rows() const { return internal::convert_index(m_cholmodFactor->n); } /** \brief Reports whether previous computation was successful. * diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h index d42693d4b..82c12076e 100644 --- a/Eigen/src/Core/ArrayBase.h +++ b/Eigen/src/Core/ArrayBase.h @@ -50,7 +50,6 @@ template class ArrayBase typename NumTraits::Scalar>::Real>::operator*; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h index 4db10e697..506bace69 100644 --- a/Eigen/src/Core/AssignEvaluator.h +++ b/Eigen/src/Core/AssignEvaluator.h @@ -179,20 +179,20 @@ struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling +template struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer) + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { - kernel.assignCoeffByOuterInner(outer, Index); - copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); + kernel.assignCoeffByOuterInner(outer, Index_); + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); } }; template struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index) { } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { } }; /*********************** @@ -246,13 +246,13 @@ struct copy_using_evaluator_innervec_CompleteUnrolling EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; -template +template struct copy_using_evaluator_innervec_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer) + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { - kernel.template assignPacketByOuterInner(outer, Index); - enum { NextIndex = Index + packet_traits::size }; + kernel.template assignPacketByOuterInner(outer, Index_); + enum { NextIndex = Index_ + packet_traits::size }; copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); } }; @@ -260,7 +260,7 @@ struct copy_using_evaluator_innervec_InnerUnrolling template struct copy_using_evaluator_innervec_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, typename Kernel::Index) { } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { } }; /*************************************************************************** @@ -283,8 +283,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static void run(Kernel &kernel) { - typedef typename Kernel::Index Index; - for(Index outer = 0; outer < kernel.outerSize(); ++outer) { for(Index inner = 0; inner < kernel.innerSize(); ++inner) { kernel.assignCoeffByOuterInner(outer, inner); @@ -306,7 +304,7 @@ struct dense_assignment_loop template struct dense_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; @@ -330,7 +328,7 @@ struct unaligned_dense_assignment_loop { // if IsAligned = true, then do nothing template - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index, typename Kernel::Index) {} + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {} }; template <> @@ -342,16 +340,16 @@ struct unaligned_dense_assignment_loop #if EIGEN_COMP_MSVC template static EIGEN_DONT_INLINE void run(Kernel &kernel, - typename Kernel::Index start, - typename Kernel::Index end) + Index start, + Index end) #else template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, - typename Kernel::Index start, - typename Kernel::Index end) + Index start, + Index end) #endif { - for (typename Kernel::Index index = start; index < end; ++index) + for (Index index = start; index < end; ++index) kernel.assignCoeff(index); } }; @@ -361,8 +359,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { - typedef typename Kernel::Index Index; - const Index size = kernel.size(); typedef packet_traits PacketTraits; enum { @@ -386,7 +382,7 @@ struct dense_assignment_loop template struct dense_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; @@ -409,8 +405,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { - typedef typename Kernel::Index Index; - const Index innerSize = kernel.innerSize(); const Index outerSize = kernel.outerSize(); const Index packetSize = packet_traits::size; @@ -433,7 +427,7 @@ struct dense_assignment_loop struct dense_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; @@ -452,7 +446,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { - typedef typename Kernel::Index Index; const Index size = kernel.size(); for(Index i = 0; i < size; ++i) kernel.assignCoeff(i); @@ -478,7 +471,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { - typedef typename Kernel::Index Index; typedef packet_traits PacketTraits; enum { packetSize = PacketTraits::size, @@ -533,7 +525,7 @@ public: typedef DstEvaluatorTypeT DstEvaluatorType; typedef SrcEvaluatorTypeT SrcEvaluatorType; typedef typename DstEvaluatorType::Scalar Scalar; - typedef typename DstEvaluatorType::Index Index; + typedef typename DstEvaluatorType::StorageIndex StorageIndex; typedef copy_using_evaluator_traits AssignmentTraits; @@ -731,8 +723,8 @@ EIGEN_DEVICE_FUNC void call_assignment_no_alias(Dst& dst, const Src& src, const && int(Dst::SizeAtCompileTime) != 1 }; - typename Dst::Index dstRows = NeedToTranspose ? src.cols() : src.rows(); - typename Dst::Index dstCols = NeedToTranspose ? src.rows() : src.cols(); + Index dstRows = NeedToTranspose ? src.cols() : src.rows(); + Index dstCols = NeedToTranspose ? src.rows() : src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h index e59ee3da9..d07ea7056 100644 --- a/Eigen/src/Core/BandMatrix.h +++ b/Eigen/src/Core/BandMatrix.h @@ -32,7 +32,7 @@ class BandMatrixBase : public EigenBase }; typedef typename internal::traits::Scalar Scalar; typedef Matrix DenseMatrixType; - typedef typename DenseMatrixType::Index Index; + typedef typename DenseMatrixType::StorageIndex StorageIndex; typedef typename internal::traits::CoefficientsType CoefficientsType; typedef EigenBase Base; @@ -179,7 +179,7 @@ struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; - typedef DenseIndex Index; + typedef DenseIndex StorageIndex; enum { CoeffReadCost = NumTraits::ReadCost, RowsAtCompileTime = _Rows, @@ -201,7 +201,7 @@ class BandMatrix : public BandMatrixBase::Scalar Scalar; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::CoefficientsType CoefficientsType; explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) @@ -241,7 +241,7 @@ struct traits::CoeffReadCost, RowsAtCompileTime = _Rows, @@ -264,7 +264,7 @@ class BandMatrixWrapper : public BandMatrixBase::Scalar Scalar; typedef typename internal::traits::CoefficientsType CoefficientsType; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs) : m_coeffs(coeffs), @@ -312,7 +312,7 @@ template class TridiagonalMatrix : public BandMatrix { typedef BandMatrix Base; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; public: explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h index 9cf9d5432..6ea383695 100644 --- a/Eigen/src/Core/Block.h +++ b/Eigen/src/Core/Block.h @@ -154,7 +154,7 @@ class BlockImpl : public internal::BlockImpl_dense { typedef internal::BlockImpl_dense Impl; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; public: typedef Impl Base; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) @@ -306,13 +306,13 @@ template m_startRow; - const internal::variable_if_dynamic m_startCol; - const internal::variable_if_dynamic m_blockRows; - const internal::variable_if_dynamic m_blockCols; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; }; /** \internal Internal implementation of dense Blocks in the direct access case.*/ diff --git a/Eigen/src/Core/CommaInitializer.h b/Eigen/src/Core/CommaInitializer.h index 70cbfeff5..98ebe3bf6 100644 --- a/Eigen/src/Core/CommaInitializer.h +++ b/Eigen/src/Core/CommaInitializer.h @@ -28,7 +28,7 @@ template struct CommaInitializer { typedef typename XprType::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s) diff --git a/Eigen/src/Core/CoreEvaluators.h b/Eigen/src/Core/CoreEvaluators.h index a0dc72c4d..eb35b44cb 100644 --- a/Eigen/src/Core/CoreEvaluators.h +++ b/Eigen/src/Core/CoreEvaluators.h @@ -111,7 +111,7 @@ struct evaluator_base typedef evaluator type; typedef evaluator nestedType; - typedef typename traits::Index Index; + typedef typename traits::StorageIndex StorageIndex; // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. typedef traits ExpressionTraits; }; @@ -128,7 +128,7 @@ struct evaluator > : evaluator_base { typedef PlainObjectBase PlainObjectType; - typedef typename PlainObjectType::Index Index; + typedef typename PlainObjectType::StorageIndex StorageIndex; typedef typename PlainObjectType::Scalar Scalar; typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; typedef typename PlainObjectType::PacketScalar PacketScalar; @@ -264,7 +264,7 @@ struct unary_evaluator, IndexBased> EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -343,7 +343,7 @@ struct evaluator > : m_functor(n.functor()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -394,7 +394,7 @@ struct unary_evaluator, IndexBased > m_argImpl(op.nestedExpression()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -469,7 +469,7 @@ struct binary_evaluator, IndexBased, IndexBase m_rhsImpl(xpr.rhs()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -522,7 +522,7 @@ struct unary_evaluator, IndexBased> m_argImpl(op.nestedExpression()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; @@ -563,7 +563,7 @@ struct mapbase_evaluator : evaluator_base { typedef Derived XprType; typedef typename XprType::PointerType PointerType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -760,7 +760,7 @@ struct unary_evaluator, IndexBa m_startCol(block.startCol()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -865,7 +865,7 @@ struct evaluator > m_elseImpl(select.elseMatrix()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; inline EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const @@ -898,7 +898,7 @@ struct unary_evaluator > : evaluator_base > { typedef Replicate XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketReturnType PacketReturnType; enum { @@ -981,7 +981,7 @@ struct evaluator > : m_expr(expr) {} - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const @@ -1016,7 +1016,7 @@ struct evaluator_wrapper_base EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} - typedef typename ArgType::Index Index; + typedef typename ArgType::StorageIndex StorageIndex; typedef typename ArgType::Scalar Scalar; typedef typename ArgType::CoeffReturnType CoeffReturnType; typedef typename ArgType::PacketScalar PacketScalar; @@ -1103,7 +1103,7 @@ struct unary_evaluator > : evaluator_base > { typedef Reverse XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -1219,7 +1219,7 @@ struct evaluator > m_index(diagonal.index()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; diff --git a/Eigen/src/Core/CoreIterators.h b/Eigen/src/Core/CoreIterators.h index 7feebc4e4..141eaa2eb 100644 --- a/Eigen/src/Core/CoreIterators.h +++ b/Eigen/src/Core/CoreIterators.h @@ -36,7 +36,7 @@ protected: typedef internal::inner_iterator_selector::Kind> IteratorType; typedef typename internal::evaluator::type EvaluatorType; typedef typename internal::traits::Scalar Scalar; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; public: /** Construct an iterator over the \a outerId -th row or column of \a xpr */ InnerIterator(const XprType &xpr, const Index &outerId) @@ -50,11 +50,11 @@ public: */ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; } /// \returns the column or row index of the current coefficient. - EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_iter.index(); } /// \returns the row index of the current coefficient. - EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_iter.row(); } /// \returns the column index of the current coefficient. - EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_iter.col(); } /// \returns \c true if the iterator \c *this still references a valid coefficient. EIGEN_STRONG_INLINE operator bool() const { return m_iter; } @@ -77,7 +77,7 @@ class inner_iterator_selector protected: typedef typename evaluator::type EvaluatorType; typedef typename traits::Scalar Scalar; - typedef typename traits::Index Index; + typedef typename traits::StorageIndex StorageIndex; enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; public: @@ -93,9 +93,9 @@ public: EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; } - EIGEN_STRONG_INLINE Index index() const { return m_inner; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } @@ -115,7 +115,7 @@ class inner_iterator_selector protected: typedef typename evaluator::InnerIterator Base; typedef typename evaluator::type EvaluatorType; - typedef typename traits::Index Index; + typedef typename traits::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/) diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h index a205c3f10..4d4626279 100644 --- a/Eigen/src/Core/CwiseBinaryOp.h +++ b/Eigen/src/Core/CwiseBinaryOp.h @@ -59,8 +59,8 @@ struct traits > typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, BinaryOp>::ret StorageKind; - typedef typename promote_index_type::Index, - typename traits::Index>::type Index; + typedef typename promote_index_type::StorageIndex, + typename traits::StorageIndex>::type StorageIndex; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename remove_reference::type _LhsNested; @@ -111,7 +111,7 @@ class CwiseBinaryOp : } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index rows() const { + EIGEN_STRONG_INLINE StorageIndex rows() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits::type>::RowsAtCompileTime==Dynamic) return m_rhs.rows(); @@ -119,7 +119,7 @@ class CwiseBinaryOp : return m_lhs.rows(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index cols() const { + EIGEN_STRONG_INLINE StorageIndex cols() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits::type>::ColsAtCompileTime==Dynamic) return m_rhs.cols(); diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h index da1d1992d..5388af216 100644 --- a/Eigen/src/Core/CwiseUnaryOp.h +++ b/Eigen/src/Core/CwiseUnaryOp.h @@ -66,9 +66,9 @@ class CwiseUnaryOp : public CwiseUnaryOpImpl class DenseBase typedef typename internal::traits::StorageKind StorageKind; - /** \brief The type of indices + /** \brief The interface type of indices * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. - * \sa \ref TopicPreprocessorDirectives. + * \sa \ref TopicPreprocessorDirectives, StorageIndex. */ - typedef typename internal::traits::Index Index; + typedef Eigen::Index Index; + + /** + * \brief The type used to store indices + * \details This typedef is relevant for types that store multiple indices such as + * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index + * \sa \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. + */ + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h index a9e4dbaf9..569fed956 100644 --- a/Eigen/src/Core/DenseCoeffsBase.h +++ b/Eigen/src/Core/DenseCoeffsBase.h @@ -35,7 +35,6 @@ class DenseCoeffsBase : public EigenBase { public: typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; @@ -287,7 +286,6 @@ class DenseCoeffsBase : public DenseCoeffsBase Base; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; @@ -450,7 +448,6 @@ class DenseCoeffsBase : public DenseCoeffsBase Base; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; @@ -525,7 +522,6 @@ class DenseCoeffsBase public: typedef DenseCoeffsBase Base; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h index 33b82f90f..18f061179 100644 --- a/Eigen/src/Core/Diagonal.h +++ b/Eigen/src/Core/Diagonal.h @@ -70,28 +70,28 @@ template class Diagonal EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) EIGEN_DEVICE_FUNC - explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {} + explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(internal::convert_index(a_index)) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) EIGEN_DEVICE_FUNC - inline Index rows() const + inline StorageIndex rows() const { - return m_index.value()<0 ? numext::mini(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value())) - : numext::mini(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value())); + return m_index.value()<0 ? numext::mini(m_matrix.cols(),m_matrix.rows()+m_index.value()) + : numext::mini(m_matrix.rows(),m_matrix.cols()-m_index.value()); } EIGEN_DEVICE_FUNC - inline Index cols() const { return 1; } + inline StorageIndex cols() const { return 1; } EIGEN_DEVICE_FUNC - inline Index innerStride() const + inline StorageIndex innerStride() const { return m_matrix.outerStride() + 1; } EIGEN_DEVICE_FUNC - inline Index outerStride() const + inline StorageIndex outerStride() const { return 0; } @@ -153,23 +153,23 @@ template class Diagonal } EIGEN_DEVICE_FUNC - inline Index index() const + inline StorageIndex index() const { return m_index.value(); } protected: typename MatrixType::Nested m_matrix; - const internal::variable_if_dynamicindex m_index; + const internal::variable_if_dynamicindex m_index; private: // some compilers may fail to optimize std::max etc in case of compile-time constants... EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } + EIGEN_STRONG_INLINE StorageIndex absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } + EIGEN_STRONG_INLINE StorageIndex rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } + EIGEN_STRONG_INLINE StorageIndex colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } // trigger a compile time error is someone try to call packet template typename MatrixType::PacketReturnType packet(Index) const; template typename MatrixType::PacketReturnType packet(Index,Index) const; diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h index e3dc71336..f37091000 100644 --- a/Eigen/src/Core/DiagonalMatrix.h +++ b/Eigen/src/Core/DiagonalMatrix.h @@ -22,7 +22,7 @@ class DiagonalBase : public EigenBase typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::RealScalar RealScalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, @@ -108,7 +108,7 @@ struct traits > { typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; typedef DiagonalShape StorageKind; - typedef DenseIndex Index; +// typedef DenseIndex Index; enum { Flags = LvalueBit | NoPreferredStorageOrderBit }; @@ -124,7 +124,7 @@ class DiagonalMatrix typedef const DiagonalMatrix& Nested; typedef _Scalar Scalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; #endif protected: @@ -230,7 +230,7 @@ struct traits > { typedef _DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; - typedef typename DiagonalVectorType::Index Index; + typedef typename DiagonalVectorType::StorageIndex StorageIndex; typedef DiagonalShape StorageKind; typedef typename traits::XprKind XprKind; enum { diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h index 52b66e6dc..c98ca467a 100644 --- a/Eigen/src/Core/EigenBase.h +++ b/Eigen/src/Core/EigenBase.h @@ -28,7 +28,7 @@ template struct EigenBase // typedef typename internal::plain_matrix_type::type PlainObject; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; /** \returns a reference to the derived object */ EIGEN_DEVICE_FUNC @@ -46,14 +46,14 @@ template struct EigenBase /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ EIGEN_DEVICE_FUNC - inline Index rows() const { return derived().rows(); } + inline StorageIndex rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ EIGEN_DEVICE_FUNC - inline Index cols() const { return derived().cols(); } + inline StorageIndex cols() const { return derived().cols(); } /** \returns the number of coefficients, which is rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ EIGEN_DEVICE_FUNC - inline Index size() const { return rows() * cols(); } + inline StorageIndex size() const { return rows() * cols(); } /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ template diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h index 3c67edae5..3dafee9d7 100644 --- a/Eigen/src/Core/MapBase.h +++ b/Eigen/src/Core/MapBase.h @@ -37,7 +37,6 @@ template class MapBase }; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h index 0b3d90786..94b1a966e 100644 --- a/Eigen/src/Core/Matrix.h +++ b/Eigen/src/Core/Matrix.h @@ -107,7 +107,7 @@ struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; - typedef DenseIndex Index; + typedef DenseIndex StorageIndex; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = _Rows, diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 86994cb36..5c00d6a63 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -52,7 +52,8 @@ template class MatrixBase #ifndef EIGEN_PARSED_BY_DOXYGEN typedef MatrixBase StorageBaseType; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef Eigen::Index Index; + typedef Index StorageIndex; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h index 4846f2ae1..886d59a2c 100644 --- a/Eigen/src/Core/PermutationMatrix.h +++ b/Eigen/src/Core/PermutationMatrix.h @@ -67,7 +67,7 @@ class PermutationBase : public EigenBase MaxColsAtCompileTime = Traits::MaxColsAtCompileTime }; typedef typename Traits::StorageIndexType StorageIndexType; - typedef typename Traits::Index Index; + typedef typename Traits::StorageIndex StorageIndex; typedef Matrix DenseMatrixType; typedef PermutationMatrix @@ -277,7 +277,7 @@ struct traits IndicesType; - typedef typename IndicesType::Index Index; + typedef typename IndicesType::StorageIndex StorageIndex; typedef _StorageIndexType StorageIndexType; }; } @@ -294,7 +294,7 @@ class PermutationMatrix : public PermutationBase, _PacketAccess> IndicesType; - typedef typename IndicesType::Index Index; + typedef typename IndicesType::StorageIndex StorageIndex; typedef _StorageIndexType StorageIndexType; }; } @@ -418,7 +418,7 @@ class Map > typedef PermutationStorage StorageKind; typedef typename _IndicesType::Scalar Scalar; typedef typename _IndicesType::Scalar StorageIndexType; - typedef typename _IndicesType::Index Index; + typedef typename _IndicesType::StorageIndex StorageIndex; typedef _IndicesType IndicesType; enum { RowsAtCompileTime = _IndicesType::SizeAtCompileTime, @@ -558,7 +558,7 @@ struct permut_matrix_product_retval : public ReturnByValue > { typedef typename remove_all::type MatrixTypeNestedCleaned; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix) : m_permutation(perm), m_matrix(matrix) @@ -650,7 +650,7 @@ class Transpose > MaxColsAtCompileTime = Traits::MaxColsAtCompileTime }; typedef typename Traits::Scalar Scalar; - typedef typename Traits::Index Index; + typedef typename Traits::StorageIndex StorageIndex; #endif Transpose(const PermutationType& p) : m_permutation(p) {} diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h index 06e326a05..65d69f484 100644 --- a/Eigen/src/Core/PlainObjectBase.h +++ b/Eigen/src/Core/PlainObjectBase.h @@ -95,7 +95,6 @@ class PlainObjectBase : public internal::dense_xpr_base::type typedef typename internal::dense_xpr_base::type Base; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; @@ -846,7 +845,6 @@ namespace internal { template struct conservative_resize_like_impl { - typedef typename Derived::Index Index; static void run(DenseBase& _this, Index rows, Index cols) { if (_this.rows() == rows && _this.cols() == cols) return; @@ -912,7 +910,6 @@ struct conservative_resize_like_impl { using conservative_resize_like_impl::run; - typedef typename Derived::Index Index; static void run(DenseBase& _this, Index size) { const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index cb79543ef..8ff13fbba 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -67,8 +67,8 @@ struct traits > typedef typename product_promote_storage_type::ret>::ret StorageKind; - typedef typename promote_index_type::type Index; + typedef typename promote_index_type::type StorageIndex; enum { RowsAtCompileTime = LhsTraits::RowsAtCompileTime, @@ -120,8 +120,8 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option, && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } - EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); } + EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_lhs.rows(); } + EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; } EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; } @@ -149,7 +149,7 @@ class dense_product_base public: using Base::derived; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; operator const Scalar() const { diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h index 3cebbbd12..b2c9b56ed 100644 --- a/Eigen/src/Core/ProductEvaluators.h +++ b/Eigen/src/Core/ProductEvaluators.h @@ -210,7 +210,6 @@ struct generic_product_impl template EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&) { - typedef typename Dst::Index Index; // FIXME make sure lhs is sequentially stored // FIXME not very good if rhs is real and lhs complex while alpha is real too // FIXME we should probably build an evaluator for dst and rhs @@ -222,7 +221,6 @@ EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, cons // Row major result template EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&) { - typedef typename Dst::Index Index; // FIXME make sure rhs is sequentially stored // FIXME not very good if lhs is real and rhs complex while alpha is real too // FIXME we should probably build an evaluator for dst and lhs @@ -372,7 +370,7 @@ struct product_evaluator, ProductTag, DenseShape, : evaluator_base > { typedef Product XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -524,7 +522,7 @@ struct product_evaluator, LazyCoeffBasedProduc template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); @@ -535,7 +533,7 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); @@ -546,7 +544,7 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); @@ -556,7 +554,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); @@ -566,7 +564,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { eigen_assert(innerDim>0 && "you are using a non initialized matrix"); @@ -579,7 +577,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { eigen_assert(innerDim>0 && "you are using a non initialized matrix"); @@ -668,7 +666,7 @@ template { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename scalar_product_traits::ReturnType Scalar; typedef typename internal::packet_traits::type PacketScalar; public: @@ -733,7 +731,7 @@ struct product_evaluator, ProductTag, DiagonalSha using Base::coeff; using Base::packet_impl; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PacketScalar PacketScalar; typedef Product XprType; @@ -781,7 +779,7 @@ struct product_evaluator, ProductTag, DenseShape, using Base::coeff; using Base::packet_impl; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PacketScalar PacketScalar; typedef Product XprType; diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h index af01a5567..d2b80d872 100644 --- a/Eigen/src/Core/ReturnByValue.h +++ b/Eigen/src/Core/ReturnByValue.h @@ -61,8 +61,8 @@ template class ReturnByValue EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { static_cast(this)->evalTo(dst); } - EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast(this)->rows(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast(this)->cols(); } + EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return static_cast(this)->rows(); } + EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return static_cast(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h index b785e8e1e..2d5760066 100644 --- a/Eigen/src/Core/SelfAdjointView.h +++ b/Eigen/src/Core/SelfAdjointView.h @@ -59,7 +59,7 @@ template class SelfAdjointView /** \brief The type of coefficients in this matrix */ typedef typename internal::traits::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; enum { Mode = internal::traits::Mode, @@ -224,7 +224,7 @@ public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::AssignmentTraits AssignmentTraits; diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h index 3905cd616..5a3a4235e 100644 --- a/Eigen/src/Core/Solve.h +++ b/Eigen/src/Core/Solve.h @@ -48,6 +48,7 @@ struct traits > : traits::StorageKind>::PlainObject> { typedef typename solve_traits::StorageKind>::PlainObject PlainObject; + typedef typename promote_index_type::type StorageIndex; typedef traits BaseTraits; enum { Flags = BaseTraits::Flags & RowMajorBit, @@ -62,15 +63,15 @@ template class Solve : public SolveImpl::StorageKind> { public: - typedef typename RhsType::Index Index; typedef typename internal::traits::PlainObject PlainObject; + typedef typename internal::traits::StorageIndex StorageIndex; Solve(const Decomposition &dec, const RhsType &rhs) : m_dec(dec), m_rhs(rhs) {} - EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } - EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } + EIGEN_DEVICE_FUNC StorageIndex rows() const { return m_dec.cols(); } + EIGEN_DEVICE_FUNC StorageIndex cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index 0b7e39827..83a973365 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h @@ -55,7 +55,6 @@ inline typename NumTraits::Scalar>::Real blueNorm_impl(const EigenBase& _vec) { typedef typename Derived::RealScalar RealScalar; - typedef typename Derived::Index Index; using std::pow; using std::sqrt; using std::abs; diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h index 55319320a..3d4d8b802 100644 --- a/Eigen/src/Core/Swap.h +++ b/Eigen/src/Core/Swap.h @@ -28,7 +28,7 @@ protected: public: typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::DstXprType DstXprType; typedef swap_assign_op Functor; diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index a3b95256f..e1316a73d 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -29,14 +29,10 @@ namespace Eigen { namespace internal { template -struct traits > +struct traits > : public traits { - typedef typename traits::Scalar Scalar; - typedef typename traits::Index Index; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type MatrixTypeNestedPlain; - typedef typename traits::StorageKind StorageKind; - typedef typename traits::XprKind XprKind; enum { RowsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTime = MatrixType::RowsAtCompileTime, @@ -68,8 +64,8 @@ template class Transpose EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) - EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_matrix.cols(); } + EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_matrix.rows(); } /** \returns the nested expression */ EIGEN_DEVICE_FUNC diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h index cf0255bce..d8135be27 100644 --- a/Eigen/src/Core/TriangularMatrix.h +++ b/Eigen/src/Core/TriangularMatrix.h @@ -45,7 +45,7 @@ template class TriangularBase : public EigenBase }; typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::FullMatrixType DenseMatrixType; typedef DenseMatrixType DenseType; typedef Derived const& Nested; @@ -54,9 +54,9 @@ template class TriangularBase : public EigenBase inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } EIGEN_DEVICE_FUNC - inline Index rows() const { return derived().rows(); } + inline StorageIndex rows() const { return derived().rows(); } EIGEN_DEVICE_FUNC - inline Index cols() const { return derived().cols(); } + inline StorageIndex cols() const { return derived().cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().outerStride(); } EIGEN_DEVICE_FUNC @@ -199,7 +199,7 @@ template class TriangularView public: typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::MatrixTypeNestedCleaned NestedExpression; enum { @@ -222,9 +222,9 @@ template class TriangularView { return Base::operator=(other); } EIGEN_DEVICE_FUNC - inline Index rows() const { return m_matrix.rows(); } + inline StorageIndex rows() const { return m_matrix.rows(); } EIGEN_DEVICE_FUNC - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex cols() const { return m_matrix.cols(); } EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; } @@ -325,7 +325,7 @@ template class TriangularViewImpl<_Mat using Base::derived; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; enum { Mode = _Mode, @@ -688,7 +688,7 @@ public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::AssignmentTraits AssignmentTraits; @@ -831,7 +831,7 @@ struct triangular_assignment_loop template struct triangular_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; typedef typename Kernel::Scalar Scalar; EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index bc26043d7..11b7e2887 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -629,7 +629,7 @@ namespace Eigen { typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ typedef typename Eigen::internal::nested::type Nested; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ + typedef typename Eigen::internal::traits::StorageIndex StorageIndex; \ enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ Flags = Eigen::internal::traits::Flags, \ @@ -639,23 +639,13 @@ namespace Eigen { #define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ - typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ - typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ + EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ typedef typename Base::PacketScalar PacketScalar; \ - typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ - typedef typename Eigen::internal::nested::type Nested; \ - typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ - MaxRowsAtCompileTime = Eigen::internal::traits::MaxRowsAtCompileTime, \ - MaxColsAtCompileTime = Eigen::internal::traits::MaxColsAtCompileTime, \ - Flags = Eigen::internal::traits::Flags, \ - SizeAtCompileTime = Base::SizeAtCompileTime, \ - MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ - IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ + typedef Eigen::Index Index; \ + enum { MaxRowsAtCompileTime = Eigen::internal::traits::MaxRowsAtCompileTime, \ + MaxColsAtCompileTime = Eigen::internal::traits::MaxColsAtCompileTime}; \ using Base::derived; \ - using Base::const_cast_derived; + using Base::const_cast_derived; #define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b) #define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b) diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h index 09866ad8d..299e5cbc2 100644 --- a/Eigen/src/Core/util/XprHelper.h +++ b/Eigen/src/Core/util/XprHelper.h @@ -26,8 +26,25 @@ namespace Eigen { typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex; +/** + * \brief The Index type as used for the API. + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa \ref TopicPreprocessorDirectives, StorageIndex. + */ + +typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index; + namespace internal { +template +EIGEN_DEVICE_FUNC +inline IndexDest convert_index(const IndexSrc& idx) { + // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away: + eigen_internal_assert(idx <= NumTraits::highest() && "Index value to big for target type"); + return IndexDest(idx); +} + + //classes inheriting no_assignment_operator don't generate a default operator=. class no_assignment_operator { diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index 7ebde6803..276e94c58 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -66,7 +66,7 @@ template struct traits > { typedef _Scalar Scalar; - typedef DenseIndex Index; + typedef DenseIndex StorageIndex; typedef Dense StorageKind; enum { Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1, @@ -202,6 +202,7 @@ public: }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; + typedef DenseIndex StorageIndex; typedef DenseIndex Index; /** type of the matrix used to represent the transformation */ typedef typename internal::make_proper_matrix_type::type MatrixType; diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h index 4ded2995f..bf2bb59ab 100644 --- a/Eigen/src/Householder/HouseholderSequence.h +++ b/Eigen/src/Householder/HouseholderSequence.h @@ -60,7 +60,7 @@ template struct traits > { typedef typename VectorsType::Scalar Scalar; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; typedef typename VectorsType::StorageKind StorageKind; enum { RowsAtCompileTime = Side==OnTheLeft ? traits::RowsAtCompileTime @@ -87,7 +87,7 @@ struct hseq_side_dependent_impl { typedef Block EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; @@ -100,7 +100,7 @@ struct hseq_side_dependent_impl { typedef Transpose > EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; @@ -131,7 +131,7 @@ template class HouseholderS MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime }; typedef typename internal::traits::Scalar Scalar; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; typedef HouseholderSequence< typename internal::conditional::IsComplex, diff --git a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h index 3991afa8f..a09f81225 100644 --- a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +++ b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -34,9 +34,8 @@ class DiagonalPreconditioner { typedef _Scalar Scalar; typedef Matrix Vector; - typedef typename Vector::Index Index; - public: + typedef typename Vector::StorageIndex StorageIndex; // this typedef is only to export the scalar type and compile-time dimensions to solve_retval typedef Matrix MatrixType; diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h index 224fe913f..5f55efbe9 100644 --- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -159,7 +159,6 @@ class BiCGSTAB : public IterativeSolverBase(row.size()); /* length of the vector */ Index first, last ; ncut--; /* to fit the zero-based indices */ @@ -105,7 +105,7 @@ class IncompleteLUT : public SparseSolverBase > typedef Matrix Vector; typedef SparseMatrix FactorType; typedef SparseMatrix PermutType; - typedef typename FactorType::Index Index; + typedef typename FactorType::StorageIndex StorageIndex; public: typedef Matrix MatrixType; @@ -124,9 +124,9 @@ class IncompleteLUT : public SparseSolverBase > compute(mat); } - Index rows() const { return m_lu.rows(); } + StorageIndex rows() const { return m_lu.rows(); } - Index cols() const { return m_lu.cols(); } + StorageIndex cols() const { return m_lu.cols(); } /** \brief Reports whether previous computation was successful. * @@ -189,8 +189,8 @@ protected: bool m_analysisIsOk; bool m_factorizationIsOk; ComputationInfo m_info; - PermutationMatrix m_P; // Fill-reducing permutation - PermutationMatrix m_Pinv; // Inverse permutation + PermutationMatrix m_P; // Fill-reducing permutation + PermutationMatrix m_Pinv; // Inverse permutation }; /** @@ -218,14 +218,14 @@ template void IncompleteLUT::analyzePattern(const _MatrixType& amat) { // Compute the Fill-reducing permutation - SparseMatrix mat1 = amat; - SparseMatrix mat2 = amat.transpose(); + SparseMatrix mat1 = amat; + SparseMatrix mat2 = amat.transpose(); // Symmetrize the pattern // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... - SparseMatrix AtA = mat2 + mat1; + SparseMatrix AtA = mat2 + mat1; AtA.prune(keep_diag()); - internal::minimum_degree_ordering(AtA, m_P); // Then compute the AMD ordering... + internal::minimum_degree_ordering(AtA, m_P); // Then compute the AMD ordering... m_Pinv = m_P.inverse(); // ... and the inverse permutation @@ -241,7 +241,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) using std::abs; eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); - Index n = amat.cols(); // Size of the matrix + StorageIndex n = amat.cols(); // Size of the matrix m_lu.resize(n,n); // Declare Working vectors and variables Vector u(n) ; // real values of the row -- maximum size is n -- @@ -250,7 +250,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) // Apply the fill-reducing permutation eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); - SparseMatrix mat; + SparseMatrix mat; mat = amat.twistedBy(m_Pinv); // Initialization @@ -259,21 +259,21 @@ void IncompleteLUT::factorize(const _MatrixType& amat) u.fill(0); // number of largest elements to keep in each row: - Index fill_in = static_cast (amat.nonZeros()*m_fillfactor)/n+1; + StorageIndex fill_in = static_cast (amat.nonZeros()*m_fillfactor)/n+1; if (fill_in > n) fill_in = n; // number of largest nonzero elements to keep in the L and the U part of the current row: - Index nnzL = fill_in/2; - Index nnzU = nnzL; + StorageIndex nnzL = fill_in/2; + StorageIndex nnzU = nnzL; m_lu.reserve(n * (nnzL + nnzU + 1)); // global loop over the rows of the sparse matrix - for (Index ii = 0; ii < n; ii++) + for (StorageIndex ii = 0; ii < n; ii++) { // 1 - copy the lower and the upper part of the row i of mat in the working vector u - Index sizeu = 1; // number of nonzero elements in the upper part of the current row - Index sizel = 0; // number of nonzero elements in the lower part of the current row + StorageIndex sizeu = 1; // number of nonzero elements in the upper part of the current row + StorageIndex sizel = 0; // number of nonzero elements in the lower part of the current row ju(ii) = ii; u(ii) = 0; jr(ii) = ii; @@ -282,7 +282,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii for (; j_it; ++j_it) { - Index k = j_it.index(); + StorageIndex k = j_it.index(); if (k < ii) { // copy the lower part @@ -298,7 +298,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) else { // copy the upper part - Index jpos = ii + sizeu; + StorageIndex jpos = ii + sizeu; ju(jpos) = k; u(jpos) = j_it.value(); jr(k) = jpos; @@ -317,19 +317,19 @@ void IncompleteLUT::factorize(const _MatrixType& amat) rownorm = sqrt(rownorm); // 3 - eliminate the previous nonzero rows - Index jj = 0; - Index len = 0; + StorageIndex jj = 0; + StorageIndex len = 0; while (jj < sizel) { // In order to eliminate in the correct order, // we must select first the smallest column index among ju(jj:sizel) - Index k; - Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment + StorageIndex k; + StorageIndex minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment k += jj; if (minrow != ju(jj)) { // swap the two locations - Index j = ju(jj); + StorageIndex j = ju(jj); swap(ju(jj), ju(k)); jr(minrow) = jj; jr(j) = k; swap(u(jj), u(k)); @@ -355,11 +355,11 @@ void IncompleteLUT::factorize(const _MatrixType& amat) for (; ki_it; ++ki_it) { Scalar prod = fact * ki_it.value(); - Index j = ki_it.index(); - Index jpos = jr(j); + StorageIndex j = ki_it.index(); + StorageIndex jpos = jr(j); if (jpos == -1) // fill-in element { - Index newpos; + StorageIndex newpos; if (j >= ii) // dealing with the upper part { newpos = ii + sizeu; @@ -388,7 +388,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) } // end of the elimination on the row ii // reset the upper part of the pointer jr to zero - for(Index k = 0; k ::factorize(const _MatrixType& amat) // store the largest m_fill elements of the L part m_lu.startVec(ii); - for(Index k = 0; k < len; k++) + for(StorageIndex k = 0; k < len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); // store the diagonal element @@ -413,7 +413,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) // sort the U-part of the row // apply the dropping rule first len = 0; - for(Index k = 1; k < sizeu; k++) + for(StorageIndex k = 1; k < sizeu; k++) { if(abs(u(ii+k)) > m_droptol * rownorm ) { @@ -429,7 +429,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) internal::QuickSplit(uu, juu, len); // store the largest elements of the U part - for(Index k = ii + 1; k < ii + len; k++) + for(StorageIndex k = ii + 1; k < ii + len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); } diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h index f33c868bb..cc99e00f9 100644 --- a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -28,7 +28,7 @@ public: typedef typename internal::traits::MatrixType MatrixType; typedef typename internal::traits::Preconditioner Preconditioner; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; public: @@ -115,9 +115,9 @@ public: } /** \internal */ - Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; } + StorageIndex rows() const { return mp_matrix ? mp_matrix->rows() : 0; } /** \internal */ - Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; } + StorageIndex cols() const { return mp_matrix ? mp_matrix->cols() : 0; } /** \returns the tolerance threshold used by the stopping criteria */ RealScalar tolerance() const { return m_tolerance; } diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h index 96f2cebee..eb4520004 100644 --- a/Eigen/src/LU/FullPivLU.h +++ b/Eigen/src/LU/FullPivLU.h @@ -67,6 +67,7 @@ template class FullPivLU typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename internal::plain_row_type::type IntRowVectorType; typedef typename internal::plain_col_type::type IntColVectorType; typedef PermutationMatrix PermutationQType; diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index d04e4191b..7e2c8b471 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -73,6 +73,7 @@ template class PartialPivLU typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef PermutationMatrix PermutationType; typedef Transpositions TranspositionType; typedef typename MatrixType::PlainObject PlainObject; diff --git a/Eigen/src/OrderingMethods/Amd.h b/Eigen/src/OrderingMethods/Amd.h index ce7c0bbf3..50022d1ca 100644 --- a/Eigen/src/OrderingMethods/Amd.h +++ b/Eigen/src/OrderingMethods/Amd.h @@ -42,7 +42,7 @@ template inline void amd_mark(const T0* w, const T1& j /* clear w */ template -static int cs_wclear (Index mark, Index lemax, Index *w, Index n) +static Index cs_wclear (Index mark, Index lemax, Index *w, Index n) { Index k; if(mark < 2 || (mark + lemax < 0)) @@ -59,7 +59,7 @@ static int cs_wclear (Index mark, Index lemax, Index *w, Index n) template Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack) { - int i, p, top = 0; + Index i, p, top = 0; if(!head || !next || !post || !stack) return (-1); /* check inputs */ stack[0] = j; /* place j on the stack */ while (top >= 0) /* while (stack is not empty) */ @@ -92,11 +92,12 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation { using std::sqrt; - int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, - k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, - ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t; - unsigned int h; + Index d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, + k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, + ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t; + std::size_t h; + Index n = C.cols(); dense = std::max (16, Index(10 * sqrt(double(n)))); /* find dense threshold */ dense = std::min (n-2, dense); @@ -330,7 +331,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation h %= n; /* finalize hash of i */ next[i] = hhead[h]; /* place i in hash bucket */ hhead[h] = i; - last[i] = h; /* save hash of i in last[i] */ + last[i] = Index(h); /* save hash of i in last[i] */ } } /* scan2 is done */ degree[k] = dk; /* finalize |Lk| */ diff --git a/Eigen/src/PaStiXSupport/PaStiXSupport.h b/Eigen/src/PaStiXSupport/PaStiXSupport.h index a96c27695..27acf4128 100644 --- a/Eigen/src/PaStiXSupport/PaStiXSupport.h +++ b/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -139,6 +139,7 @@ class PastixBase : public SparseSolverBase typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix Vector; typedef SparseMatrix ColSpMatrix; diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h index 054af6635..7c75dcb7f 100644 --- a/Eigen/src/PardisoSupport/PardisoSupport.h +++ b/Eigen/src/PardisoSupport/PardisoSupport.h @@ -110,7 +110,7 @@ class PardisoImpl : public SparseSolveBase typedef typename Traits::MatrixType MatrixType; typedef typename Traits::Scalar Scalar; typedef typename Traits::RealScalar RealScalar; - typedef typename Traits::Index Index; + typedef typename Traits::StorageIndex StorageIndex; typedef SparseMatrix SparseMatrixType; typedef Matrix VectorType; typedef Matrix IntRowVectorType; diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h index de77e8411..6fade3755 100644 --- a/Eigen/src/QR/ColPivHouseholderQR.h +++ b/Eigen/src/QR/ColPivHouseholderQR.h @@ -58,6 +58,7 @@ template class ColPivHouseholderQR typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix MatrixQType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef PermutationMatrix PermutationType; @@ -69,7 +70,7 @@ template class ColPivHouseholderQR private: - typedef typename PermutationType::Index PermIndexType; + typedef typename PermutationType::StorageIndex PermIndexType; public: diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index 5712d175c..90ab25b2b 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -67,6 +67,7 @@ template class FullPivHouseholderQR typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef internal::FullPivHouseholderQRMatrixQReturnType MatrixQReturnType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef Matrix class HouseholderQR typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix MatrixQType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef typename internal::plain_row_type::type RowVectorType; diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h index 44f6a1acb..5fd18b787 100644 --- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h @@ -63,9 +63,9 @@ class SPQR : public SparseSolverBase > public: typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; - typedef UF_long Index ; - typedef SparseMatrix MatrixType; - typedef Map > PermutationType; + typedef UF_long StorageIndex ; + typedef SparseMatrix MatrixType; + typedef Map > PermutationType; public: SPQR() : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()) @@ -150,7 +150,7 @@ class SPQR : public SparseSolverBase > { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); if(!m_isRUpToDate) { - m_R = viewAsEigen(*m_cR); + m_R = viewAsEigen(*m_cR); m_isRUpToDate = true; } return m_R; @@ -204,11 +204,11 @@ class SPQR : public SparseSolverBase > RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format mutable MatrixType m_R; // The sparse matrix R in Eigen format - mutable Index *m_E; // The permutation applied to columns + mutable StorageIndex *m_E; // The permutation applied to columns mutable cholmod_sparse *m_H; //The householder vectors - mutable Index *m_HPinv; // The row permutation of H + mutable StorageIndex *m_HPinv; // The row permutation of H mutable cholmod_dense *m_HTau; // The Householder coefficients - mutable Index m_rank; // The rank of the matrix + mutable StorageIndex m_rank; // The rank of the matrix mutable cholmod_common m_cc; // Workspace and parameters template friend struct SPQR_QProduct; }; @@ -217,12 +217,12 @@ template struct SPQR_QProduct : ReturnByValue > { typedef typename SPQRType::Scalar Scalar; - typedef typename SPQRType::Index Index; + typedef typename SPQRType::StorageIndex StorageIndex; //Define the constructor to get reference to argument types SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {} - inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } - inline Index cols() const { return m_other.cols(); } + inline StorageIndex rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } + inline StorageIndex cols() const { return m_other.cols(); } // Assign to a vector template void evalTo(ResType& res) const diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index 27b732b80..0bc2ede28 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -53,6 +53,7 @@ public: typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h index 918a34e13..b148d6b1f 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -44,8 +44,8 @@ class SimplicialCholeskyBase : public SparseSolverBase enum { UpLo = internal::traits::UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; public: @@ -70,8 +70,8 @@ class SimplicialCholeskyBase : public SparseSolverBase Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } - inline Index cols() const { return m_matrix.cols(); } - inline Index rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } /** \brief Reports whether previous computation was successful. * @@ -216,16 +216,16 @@ class SimplicialCholeskyBase : public SparseSolverBase VectorType m_diag; // the diagonal coefficients (LDLT mode) VectorXi m_parent; // elimination tree VectorXi m_nonZerosPerCol; - PermutationMatrix m_P; // the permutation - PermutationMatrix m_Pinv; // the inverse permutation + PermutationMatrix m_P; // the permutation + PermutationMatrix m_Pinv; // the inverse permutation RealScalar m_shiftOffset; RealScalar m_shiftScale; }; -template > class SimplicialLLT; -template > class SimplicialLDLT; -template > class SimplicialCholesky; +template > class SimplicialLLT; +template > class SimplicialLDLT; +template > class SimplicialCholesky; namespace internal { @@ -235,8 +235,8 @@ template struct traits CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef TriangularView MatrixL; typedef TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } @@ -249,8 +249,8 @@ template struct traits CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef TriangularView MatrixL; typedef TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } @@ -293,7 +293,7 @@ public: typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; @@ -382,8 +382,8 @@ public: typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; typedef typename Traits::MatrixL MatrixL; @@ -464,8 +464,8 @@ public: typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; typedef internal::traits > LDLTTraits; diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h index 7aaf702be..302323ab4 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h @@ -57,7 +57,7 @@ void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrix ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); - for(Index k = 0; k < size; ++k) + for(StorageIndex k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ @@ -82,7 +82,7 @@ void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrix } /* construct Lp index array from m_nonZerosPerCol column counts */ - Index* Lp = m_matrix.outerIndexPtr(); + StorageIndex* Lp = m_matrix.outerIndexPtr(); Lp[0] = 0; for(Index k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); @@ -104,35 +104,35 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(ap.rows()==ap.cols()); - const Index size = ap.rows(); + const StorageIndex size = ap.rows(); eigen_assert(m_parent.size()==size); eigen_assert(m_nonZerosPerCol.size()==size); - const Index* Lp = m_matrix.outerIndexPtr(); - Index* Li = m_matrix.innerIndexPtr(); + const StorageIndex* Lp = m_matrix.outerIndexPtr(); + StorageIndex* Li = m_matrix.innerIndexPtr(); Scalar* Lx = m_matrix.valuePtr(); ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); + ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0); + ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); bool ok = true; m_diag.resize(DoLDLT ? size : 0); - for(Index k = 0; k < size; ++k) + for(StorageIndex k = 0; k < size; ++k) { // compute nonzero pattern of kth row of L, in topological order y[k] = 0.0; // Y(0:k) is now all zero - Index top = size; // stack for pattern is empty + StorageIndex top = size; // stack for pattern is empty tags[k] = k; // mark node k as visited m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L for(typename MatrixType::InnerIterator it(ap,k); it; ++it) { - Index i = it.index(); + StorageIndex i = it.index(); if(i <= k) { y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ - Index len; + StorageIndex len; for(len = 0; tags[i] != k; i = m_parent[i]) { pattern[len++] = i; /* L(k,i) is nonzero */ @@ -149,7 +149,7 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& y[k] = 0.0; for(; top < size; ++top) { - Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ + StorageIndex i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; @@ -160,8 +160,8 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& else yi = l_ki = yi / Lx[Lp[i]]; - Index p2 = Lp[i] + m_nonZerosPerCol[i]; - Index p; + StorageIndex p2 = Lp[i] + m_nonZerosPerCol[i]; + StorageIndex p; for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) y[Li[p]] -= numext::conj(Lx[p]) * yi; d -= numext::real(l_ki * numext::conj(yi)); @@ -180,7 +180,7 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& } else { - Index p = Lp[k] + m_nonZerosPerCol[k]++; + StorageIndex p = Lp[k] + m_nonZerosPerCol[k]++; Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ if(d <= RealScalar(0)) { ok = false; /* failure, matrix is not positive definite */ diff --git a/Eigen/src/SparseCore/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h index 5c9c3101e..759fc08ff 100644 --- a/Eigen/src/SparseCore/AmbiVector.h +++ b/Eigen/src/SparseCore/AmbiVector.h @@ -19,12 +19,12 @@ namespace internal { * * See BasicSparseLLT and SparseProduct for usage examples. */ -template +template class AmbiVector { public: typedef _Scalar Scalar; - typedef _Index Index; + typedef _StorageIndex StorageIndex; typedef typename NumTraits::Real RealScalar; explicit AmbiVector(Index size) @@ -36,10 +36,10 @@ class AmbiVector void init(double estimatedDensity); void init(int mode); - Index nonZeros() const; + StorageIndex nonZeros() const; /** Specifies a sub-vector to work on */ - void setBounds(Index start, Index end) { m_start = start; m_end = end; } + void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); } void setZero(); @@ -55,12 +55,16 @@ class AmbiVector { if (m_allocatedSize < size) reallocate(size); - m_size = size; + m_size = convert_index(size); } - Index size() const { return m_size; } + StorageIndex size() const { return m_size; } protected: + StorageIndex convert_index(Index idx) + { + return internal::convert_index(idx); + } void reallocate(Index size) { @@ -70,15 +74,15 @@ class AmbiVector if (size<1000) { Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar); - m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); + m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl)); m_buffer = new Scalar[allocSize]; } else { - m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl); + m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl)); m_buffer = new Scalar[size]; } - m_size = size; + m_size = convert_index(size); m_start = 0; m_end = m_size; } @@ -86,7 +90,7 @@ class AmbiVector void reallocateSparse() { Index copyElements = m_allocatedElements; - m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size); + m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size); Index allocSize = m_allocatedElements * sizeof(ListEl); allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); Scalar* newBuffer = new Scalar[allocSize]; @@ -99,30 +103,30 @@ class AmbiVector // element type of the linked list struct ListEl { - Index next; - Index index; + StorageIndex next; + StorageIndex index; Scalar value; }; // used to store data in both mode Scalar* m_buffer; Scalar m_zero; - Index m_size; - Index m_start; - Index m_end; - Index m_allocatedSize; - Index m_allocatedElements; - Index m_mode; + StorageIndex m_size; + StorageIndex m_start; + StorageIndex m_end; + StorageIndex m_allocatedSize; + StorageIndex m_allocatedElements; + StorageIndex m_mode; // linked list mode - Index m_llStart; - Index m_llCurrent; - Index m_llSize; + StorageIndex m_llStart; + StorageIndex m_llCurrent; + StorageIndex m_llSize; }; /** \returns the number of non zeros in the current sub vector */ -template -_Index AmbiVector<_Scalar,_Index>::nonZeros() const +template +_StorageIndex AmbiVector<_Scalar,_StorageIndex>::nonZeros() const { if (m_mode==IsSparse) return m_llSize; @@ -130,8 +134,8 @@ _Index AmbiVector<_Scalar,_Index>::nonZeros() const return m_end - m_start; } -template -void AmbiVector<_Scalar,_Index>::init(double estimatedDensity) +template +void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity) { if (estimatedDensity>0.1) init(IsDense); @@ -139,8 +143,8 @@ void AmbiVector<_Scalar,_Index>::init(double estimatedDensity) init(IsSparse); } -template -void AmbiVector<_Scalar,_Index>::init(int mode) +template +void AmbiVector<_Scalar,_StorageIndex>::init(int mode) { m_mode = mode; if (m_mode==IsSparse) @@ -155,15 +159,15 @@ void AmbiVector<_Scalar,_Index>::init(int mode) * * Don't worry, this function is extremely cheap. */ -template -void AmbiVector<_Scalar,_Index>::restart() +template +void AmbiVector<_Scalar,_StorageIndex>::restart() { m_llCurrent = m_llStart; } /** Set all coefficients of current subvector to zero */ -template -void AmbiVector<_Scalar,_Index>::setZero() +template +void AmbiVector<_Scalar,_StorageIndex>::setZero() { if (m_mode==IsDense) { @@ -178,8 +182,8 @@ void AmbiVector<_Scalar,_Index>::setZero() } } -template -_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) +template +_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i) { if (m_mode==IsDense) return m_buffer[i]; @@ -195,7 +199,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) m_llCurrent = 0; ++m_llSize; llElements[0].value = Scalar(0); - llElements[0].index = i; + llElements[0].index = convert_index(i); llElements[0].next = -1; return llElements[0].value; } @@ -204,7 +208,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) // this is going to be the new first element of the list ListEl& el = llElements[m_llSize]; el.value = Scalar(0); - el.index = i; + el.index = convert_index(i); el.next = m_llStart; m_llStart = m_llSize; ++m_llSize; @@ -213,7 +217,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) } else { - Index nextel = llElements[m_llCurrent].next; + StorageIndex nextel = llElements[m_llCurrent].next; eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); while (nextel >= 0 && llElements[nextel].index<=i) { @@ -237,7 +241,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) // let's insert a new coefficient ListEl& el = llElements[m_llSize]; el.value = Scalar(0); - el.index = i; + el.index = convert_index(i); el.next = llElements[m_llCurrent].next; llElements[m_llCurrent].next = m_llSize; ++m_llSize; @@ -247,8 +251,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) } } -template -_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i) +template +_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i) { if (m_mode==IsDense) return m_buffer[i]; @@ -275,8 +279,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i) } /** Iterator over the nonzero coefficients */ -template -class AmbiVector<_Scalar,_Index>::Iterator +template +class AmbiVector<_Scalar,_StorageIndex>::Iterator { public: typedef _Scalar Scalar; @@ -320,7 +324,7 @@ class AmbiVector<_Scalar,_Index>::Iterator } } - Index index() const { return m_cachedIndex; } + StorageIndex index() const { return m_cachedIndex; } Scalar value() const { return m_cachedValue; } operator bool() const { return m_cachedIndex>=0; } @@ -359,9 +363,9 @@ class AmbiVector<_Scalar,_Index>::Iterator protected: const AmbiVector& m_vector; // the target vector - Index m_currentEl; // the current element in sparse/linked-list mode + StorageIndex m_currentEl; // the current element in sparse/linked-list mode RealScalar m_epsilon; // epsilon used to prune zero coefficients - Index m_cachedIndex; // current coordinate + StorageIndex m_cachedIndex; // current coordinate Scalar m_cachedValue; // current value bool m_isDense; // mode of the vector }; diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h index 2741f8292..f98b42760 100644 --- a/Eigen/src/SparseCore/CompressedStorage.h +++ b/Eigen/src/SparseCore/CompressedStorage.h @@ -18,13 +18,13 @@ namespace internal { * Stores a sparse set of values as a list of values and a list of indices. * */ -template +template class CompressedStorage { public: typedef _Scalar Scalar; - typedef _Index Index; + typedef _StorageIndex StorageIndex; protected: @@ -92,10 +92,10 @@ class CompressedStorage void append(const Scalar& v, Index i) { - Index id = static_cast(m_size); + Index id = m_size; resize(m_size+1, 1); m_values[id] = v; - m_indices[id] = i; + m_indices[id] = internal::convert_index(i); } inline size_t size() const { return m_size; } @@ -105,17 +105,17 @@ class CompressedStorage inline Scalar& value(size_t i) { return m_values[i]; } inline const Scalar& value(size_t i) const { return m_values[i]; } - inline Index& index(size_t i) { return m_indices[i]; } - inline const Index& index(size_t i) const { return m_indices[i]; } + inline StorageIndex& index(size_t i) { return m_indices[i]; } + inline const StorageIndex& index(size_t i) const { return m_indices[i]; } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ - inline Index searchLowerIndex(Index key) const + inline StorageIndex searchLowerIndex(Index key) const { return searchLowerIndex(0, m_size, key); } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ - inline Index searchLowerIndex(size_t start, size_t end, Index key) const + inline StorageIndex searchLowerIndex(size_t start, size_t end, Index key) const { while(end>start) { @@ -125,7 +125,7 @@ class CompressedStorage else end = mid; } - return static_cast(start); + return static_cast(start); } /** \returns the stored value at index \a key @@ -167,7 +167,7 @@ class CompressedStorage { m_allocatedSize = 2*(m_size+1); internal::scoped_array newValues(m_allocatedSize); - internal::scoped_array newIndices(m_allocatedSize); + internal::scoped_array newIndices(m_allocatedSize); // copy first chunk internal::smart_copy(m_values, m_values +id, newValues.ptr()); @@ -188,7 +188,7 @@ class CompressedStorage internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1); } m_size++; - m_indices[id] = key; + m_indices[id] = convert_index(key); m_values[id] = defaultValue; } return m_values[id]; @@ -216,7 +216,7 @@ class CompressedStorage { eigen_internal_assert(size!=m_allocatedSize); internal::scoped_array newValues(size); - internal::scoped_array newIndices(size); + internal::scoped_array newIndices(size); size_t copySize = (std::min)(size, m_size); internal::smart_copy(m_values, m_values+copySize, newValues.ptr()); internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr()); @@ -227,7 +227,7 @@ class CompressedStorage protected: Scalar* m_values; - Index* m_indices; + StorageIndex* m_indices; size_t m_size; size_t m_allocatedSize; diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h index a30522ff7..244f1b50e 100644 --- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -18,7 +18,6 @@ template static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false) { typedef typename remove_all::type::Scalar Scalar; - typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); @@ -137,8 +136,8 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; - typedef SparseMatrix ColMajorMatrixAux; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrixAux; typedef typename sparse_eval::type ColMajorMatrix; // If the result is tall and thin (in the extreme case a column vector) @@ -167,7 +166,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); @@ -180,7 +179,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); @@ -193,7 +192,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); res = resRow; @@ -208,7 +207,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); res = resCol; @@ -220,7 +219,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); @@ -233,7 +232,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); @@ -246,8 +245,8 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; - typedef SparseMatrix ColMajorMatrix; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); // sort the non zeros: diff --git a/Eigen/src/SparseCore/MappedSparseMatrix.h b/Eigen/src/SparseCore/MappedSparseMatrix.h index 2852c669a..5e4580329 100644 --- a/Eigen/src/SparseCore/MappedSparseMatrix.h +++ b/Eigen/src/SparseCore/MappedSparseMatrix.h @@ -22,14 +22,14 @@ namespace Eigen { * */ namespace internal { -template -struct traits > : traits > +template +struct traits > : traits > {}; } -template +template class MappedSparseMatrix - : public SparseMatrixBase > + : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix) @@ -37,19 +37,19 @@ class MappedSparseMatrix protected: - Index m_outerSize; - Index m_innerSize; - Index m_nnz; - Index* m_outerIndex; - Index* m_innerIndices; + StorageIndex m_outerSize; + StorageIndex m_innerSize; + StorageIndex m_nnz; + StorageIndex* m_outerIndex; + StorageIndex* m_innerIndices; Scalar* m_values; public: - inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } - inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } - inline Index innerSize() const { return m_innerSize; } - inline Index outerSize() const { return m_outerSize; } + inline StorageIndex rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline StorageIndex innerSize() const { return m_innerSize; } + inline StorageIndex outerSize() const { return m_outerSize; } bool isCompressed() const { return true; } @@ -58,11 +58,11 @@ class MappedSparseMatrix inline const Scalar* valuePtr() const { return m_values; } inline Scalar* valuePtr() { return m_values; } - inline const Index* innerIndexPtr() const { return m_innerIndices; } - inline Index* innerIndexPtr() { return m_innerIndices; } + inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; } + inline StorageIndex* innerIndexPtr() { return m_innerIndices; } - inline const Index* outerIndexPtr() const { return m_outerIndex; } - inline Index* outerIndexPtr() { return m_outerIndex; } + inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } + inline StorageIndex* outerIndexPtr() { return m_outerIndex; } //---------------------------------------- inline Scalar coeff(Index row, Index col) const @@ -79,7 +79,7 @@ class MappedSparseMatrix // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) - const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); + const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); const Index id = r-&m_innerIndices[0]; return ((*r==inner) && (id=start && "you probably called coeffRef on a non finalized matrix"); eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); - Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); + StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); const Index id = r-&m_innerIndices[0]; eigen_assert((*r==inner) && (id -class MappedSparseMatrix::InnerIterator +template +class MappedSparseMatrix::InnerIterator { public: InnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), - m_outer(outer), + m_outer(convert_index(outer)), m_id(mat.outerIndexPtr()[outer]), m_start(m_id), m_end(mat.outerIndexPtr()[outer+1]) @@ -131,22 +131,22 @@ class MappedSparseMatrix::InnerIterator inline Scalar value() const { return m_matrix.valuePtr()[m_id]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id]); } - inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_matrix.innerIndexPtr()[m_id]; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } protected: const MappedSparseMatrix& m_matrix; - const Index m_outer; - Index m_id; - const Index m_start; - const Index m_end; + const StorageIndex m_outer; + StorageIndex m_id; + const StorageIndex m_start; + const StorageIndex m_end; }; -template -class MappedSparseMatrix::ReverseInnerIterator +template +class MappedSparseMatrix::ReverseInnerIterator { public: ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer) @@ -162,18 +162,18 @@ class MappedSparseMatrix::ReverseInnerIterator inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id-1]); } - inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_matrix.innerIndexPtr()[m_id-1]; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); } protected: const MappedSparseMatrix& m_matrix; - const Index m_outer; - Index m_id; - const Index m_start; - const Index m_end; + const StorageIndex m_outer; + StorageIndex m_id; + const StorageIndex m_start; + const StorageIndex m_end; }; namespace internal { diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h index 97c079d3f..469c2b188 100644 --- a/Eigen/src/SparseCore/SparseAssign.h +++ b/Eigen/src/SparseCore/SparseAssign.h @@ -71,7 +71,6 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); - typedef typename DstXprType::Index Index; typedef typename DstXprType::Scalar Scalar; typedef typename internal::evaluator::type DstEvaluatorType; typedef typename internal::evaluator::type SrcEvaluatorType; @@ -144,7 +143,6 @@ struct Assignment static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); - typedef typename SrcXprType::Index Index; typename internal::evaluator::type srcEval(src); typename internal::evaluator::type dstEval(dst); @@ -161,7 +159,6 @@ struct Assignment &) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); - typedef typename SrcXprType::Index Index; dst.setZero(); typename internal::evaluator::type srcEval(src); diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h index 9e4da2057..8db4bbb75 100644 --- a/Eigen/src/SparseCore/SparseBlock.h +++ b/Eigen/src/SparseCore/SparseBlock.h @@ -27,39 +27,39 @@ public: EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) inline BlockImpl(const XprType& xpr, Index i) - : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) {} inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) - : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) {} - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } - Index nonZeros() const + StorageIndex nonZeros() const { typedef typename internal::evaluator::type EvaluatorType; EvaluatorType matEval(m_matrix); - Index nnz = 0; + StorageIndex nnz = 0; Index end = m_outerStart + m_outerSize.value(); - for(int j=m_outerStart; j m_outerSize; + StorageIndex m_outerStart; + const internal::variable_if_dynamic m_outerSize; public: EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) @@ -82,15 +82,16 @@ public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) protected: + typedef typename Base::IndexVector IndexVector; enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index i) - : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) {} inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) - : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) {} template @@ -102,14 +103,14 @@ public: // and/or it is not at the end of the nonzeros of the underlying matrix. // 1 - eval to a temporary to avoid transposition and/or aliasing issues - SparseMatrix tmp(other); + SparseMatrix tmp(other); // 2 - let's check whether there is enough allocated memory - Index nnz = tmp.nonZeros(); - Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block - Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block - Index block_size = end - start; // available room in the current block - Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; + StorageIndex nnz = tmp.nonZeros(); + StorageIndex start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block + StorageIndex end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block + StorageIndex block_size = end - start; // available room in the current block + StorageIndex tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; Index free_size = m_matrix.isCompressed() ? Index(matrix.data().allocatedSize()) + block_size @@ -151,7 +152,7 @@ public: matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros(); // update outer index pointers - Index p = start; + StorageIndex p = start; for(Index k=0; k >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); + return Map(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } const Scalar& lastCoeff() const @@ -207,32 +208,32 @@ public: return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - Index startRow() const { return IsRowMajor ? m_outerStart : 0; } - Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } - Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + StorageIndex startRow() const { return IsRowMajor ? m_outerStart : 0; } + StorageIndex startCol() const { return IsRowMajor ? 0 : m_outerStart; } + StorageIndex blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + StorageIndex blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename SparseMatrixType::Nested m_matrix; - Index m_outerStart; - const internal::variable_if_dynamic m_outerSize; + StorageIndex m_outerStart; + const internal::variable_if_dynamic m_outerSize; }; } // namespace internal -template -class BlockImpl,BlockRows,BlockCols,true,Sparse> - : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> { public: - typedef _Index Index; - typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef _StorageIndex StorageIndex; + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; typedef internal::sparse_matrix_block_impl Base; inline BlockImpl(SparseMatrixType& xpr, Index i) : Base(xpr, i) @@ -245,13 +246,13 @@ public: using Base::operator=; }; -template -class BlockImpl,BlockRows,BlockCols,true,Sparse> - : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> { public: - typedef _Index Index; - typedef const SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef _StorageIndex StorageIndex; + typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; typedef internal::sparse_matrix_block_impl Base; inline BlockImpl(SparseMatrixType& xpr, Index i) : Base(xpr, i) @@ -333,8 +334,8 @@ public: */ inline BlockImpl(const XprType& xpr, Index i) : m_matrix(xpr), - m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), - m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0), m_blockRows(BlockRows==1 ? 1 : xpr.rows()), m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} @@ -342,11 +343,11 @@ public: /** Dynamic-size constructor */ inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) - : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) + : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols)) {} - inline Index rows() const { return m_blockRows.value(); } - inline Index cols() const { return m_blockCols.value(); } + inline StorageIndex rows() const { return m_blockRows.value(); } + inline StorageIndex cols() const { return m_blockCols.value(); } inline Scalar& coeffRef(Index row, Index col) { @@ -374,10 +375,10 @@ public: } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - Index startRow() const { return m_startRow.value(); } - Index startCol() const { return m_startCol.value(); } - Index blockRows() const { return m_blockRows.value(); } - Index blockCols() const { return m_blockCols.value(); } + StorageIndex startRow() const { return m_startRow.value(); } + StorageIndex startCol() const { return m_startCol.value(); } + StorageIndex blockRows() const { return m_blockRows.value(); } + StorageIndex blockCols() const { return m_blockCols.value(); } protected: friend class internal::GenericSparseBlockInnerIteratorImpl; @@ -386,10 +387,10 @@ public: EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) typename XprType::Nested m_matrix; - const internal::variable_if_dynamic m_startRow; - const internal::variable_if_dynamic m_startCol; - const internal::variable_if_dynamic m_blockRows; - const internal::variable_if_dynamic m_blockCols; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; }; @@ -402,7 +403,7 @@ namespace internal { IsRowMajor = BlockType::IsRowMajor }; typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested; - typedef typename BlockType::Index Index; + typedef typename BlockType::StorageIndex StorageIndex; typedef typename _MatrixTypeNested::InnerIterator Base; const BlockType& m_block; Index m_end; @@ -417,10 +418,10 @@ namespace internal { Base::operator++(); } - inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } - inline Index row() const { return Base::row() - m_block.m_startRow.value(); } - inline Index col() const { return Base::col() - m_block.m_startCol.value(); } + inline StorageIndex index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline StorageIndex outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline StorageIndex row() const { return Base::row() - m_block.m_startRow.value(); } + inline StorageIndex col() const { return Base::col() - m_block.m_startCol.value(); } inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } }; @@ -434,13 +435,13 @@ namespace internal { IsRowMajor = BlockType::IsRowMajor }; typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested; - typedef typename BlockType::Index Index; + typedef typename BlockType::StorageIndex StorageIndex; typedef typename BlockType::Scalar Scalar; const BlockType& m_block; - Index m_outerPos; - Index m_innerIndex; + StorageIndex m_outerPos; + StorageIndex m_innerIndex; Scalar m_value; - Index m_end; + StorageIndex m_end; public: explicit EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0) @@ -456,10 +457,10 @@ namespace internal { ++(*this); } - inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return 0; } - inline Index row() const { return IsRowMajor ? 0 : index(); } - inline Index col() const { return IsRowMajor ? index() : 0; } + inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline StorageIndex outer() const { return 0; } + inline StorageIndex row() const { return IsRowMajor ? 0 : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : 0; } inline Scalar value() const { return m_value; } @@ -491,7 +492,7 @@ struct unary_evaluator, IteratorBa class OuterVectorInnerIterator; public: typedef Block XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; class ReverseInnerIterator; @@ -538,10 +539,10 @@ public: EvalIterator::operator++(); } - inline Index index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); } - inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } - inline Index row() const { return EvalIterator::row() - m_block.startRow(); } - inline Index col() const { return EvalIterator::col() - m_block.startCol(); } + inline StorageIndex index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); } + inline StorageIndex outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } + inline StorageIndex row() const { return EvalIterator::row() - m_block.startRow(); } + inline StorageIndex col() const { return EvalIterator::col() - m_block.startCol(); } inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; } }; @@ -550,10 +551,10 @@ template class unary_evaluator, IteratorBased>::OuterVectorInnerIterator { const unary_evaluator& m_eval; - Index m_outerPos; - Index m_innerIndex; + StorageIndex m_outerPos; + StorageIndex m_innerIndex; Scalar m_value; - Index m_end; + StorageIndex m_end; public: EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer) @@ -568,10 +569,10 @@ public: ++(*this); } - inline Index index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); } - inline Index outer() const { return 0; } - inline Index row() const { return IsRowMajor ? 0 : index(); } - inline Index col() const { return IsRowMajor ? index() : 0; } + inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); } + inline StorageIndex outer() const { return 0; } + inline StorageIndex row() const { return IsRowMajor ? 0 : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : 0; } inline Scalar value() const { return m_value; } diff --git a/Eigen/src/SparseCore/SparseColEtree.h b/Eigen/src/SparseCore/SparseColEtree.h index f8745f461..88c799068 100644 --- a/Eigen/src/SparseCore/SparseColEtree.h +++ b/Eigen/src/SparseCore/SparseColEtree.h @@ -58,10 +58,10 @@ Index etree_find (Index i, IndexVector& pp) * \param perm The permutation to apply to the column of \b mat */ template -int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0) +int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0) { - typedef typename MatrixType::Index Index; - Index nc = mat.cols(); // Number of columns + typedef typename MatrixType::StorageIndex Index; + Index nc = mat.cols(); // Number of columns Index m = mat.rows(); Index diagSize = (std::min)(nc,m); IndexVector root(nc); // root of subtree of etree @@ -70,7 +70,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl pp.setZero(); // Initialize disjoint sets parent.resize(mat.cols()); //Compute first nonzero column in each row - Index row,col; + Index row,col; firstRowElt.resize(m); firstRowElt.setConstant(nc); firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); @@ -89,7 +89,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl except use (firstRowElt[r],c) in place of an edge (r,c) of A. Thus each row clique in A'*A is replaced by a star centered at its first vertex, which has the same fill. */ - Index rset, cset, rroot; + Index rset, cset, rroot; for (col = 0; col < nc; col++) { found_diag = col>=m; diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index 94ca9b1a4..afb09ad91 100644 --- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -56,7 +56,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; public: @@ -97,9 +97,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_value; } - EIGEN_STRONG_INLINE Index index() const { return m_id; } - EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } - EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; } + EIGEN_STRONG_INLINE StorageIndex row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } @@ -108,7 +108,7 @@ public: RhsIterator m_rhsIter; const BinaryOp& m_functor; Scalar m_value; - Index m_id; + StorageIndex m_id; }; @@ -145,7 +145,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; public: @@ -177,9 +177,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } - EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } @@ -223,7 +223,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; public: @@ -241,9 +241,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } - EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_rhsIter.row(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_rhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } @@ -288,7 +288,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; public: @@ -307,9 +307,9 @@ public: { return m_functor(m_lhsIter.value(), m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } - EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } @@ -317,7 +317,7 @@ public: LhsIterator m_lhsIter; const RhsEvaluator &m_rhsEval; const BinaryOp& m_functor; - const Index m_outer; + const StorageIndex m_outer; }; diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h index 32b7bc949..63d8f329c 100644 --- a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h @@ -47,7 +47,7 @@ class unary_evaluator, IteratorBased>::InnerIterat typedef typename unary_evaluator, IteratorBased>::EvalIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) {} @@ -122,7 +122,7 @@ class unary_evaluator, IteratorBased>::InnerItera typedef typename unary_evaluator, IteratorBased>::EvalIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) {} diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h index 5aea11425..f6e6fab29 100644 --- a/Eigen/src/SparseCore/SparseDenseProduct.h +++ b/Eigen/src/SparseCore/SparseDenseProduct.h @@ -29,7 +29,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -62,7 +62,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { @@ -86,7 +86,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -106,7 +106,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -193,7 +193,7 @@ protected: typedef typename evaluator::type RhsEval; typedef typename evaluator::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; - typedef typename ProdXprType::Index Index; + typedef typename ProdXprType::StorageIndex StorageIndex; public: enum { @@ -211,9 +211,9 @@ public: m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits::StorageKind() )) {} - EIGEN_STRONG_INLINE Index outer() const { return m_outer; } - EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } - EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } + EIGEN_STRONG_INLINE StorageIndex outer() const { return m_outer; } + EIGEN_STRONG_INLINE StorageIndex row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } diff --git a/Eigen/src/SparseCore/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h index be935e9f3..19a79edad 100644 --- a/Eigen/src/SparseCore/SparseDiagonalProduct.h +++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -66,7 +66,7 @@ struct sparse_diagonal_product_evaluator::InnerIterator SparseXprInnerIterator; typedef typename SparseXprType::Scalar Scalar; - typedef typename SparseXprType::Index Index; + typedef typename SparseXprType::StorageIndex StorageIndex; public: class InnerIterator : public SparseXprInnerIterator @@ -96,7 +96,7 @@ template struct sparse_diagonal_product_evaluator { typedef typename SparseXprType::Scalar Scalar; - typedef typename SparseXprType::Index Index; + typedef typename SparseXprType::StorageIndex StorageIndex; typedef CwiseBinaryOp, const typename SparseXprType::ConstInnerVectorReturnType, @@ -111,14 +111,14 @@ struct sparse_diagonal_product_evaluator(outer)) {} inline Scalar value() const { return m_cwiseIter.value(); } - inline Index index() const { return m_cwiseIter.index(); } - inline Index outer() const { return m_outer; } - inline Index col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; } - inline Index row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); } + inline StorageIndex index() const { return convert_index(m_cwiseIter.index()); } + inline StorageIndex outer() const { return m_outer; } + inline StorageIndex col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; } + inline StorageIndex row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); } EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_cwiseIter; return *this; } @@ -127,7 +127,7 @@ struct sparse_diagonal_product_evaluator struct traits > { typedef _Scalar Scalar; - typedef _Index Index; + typedef _Index StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -65,7 +65,7 @@ struct traits, DiagIndex> > typedef _Scalar Scalar; typedef Dense StorageKind; - typedef _Index Index; + typedef _Index StorageIndex; typedef MatrixXpr XprKind; enum { @@ -103,23 +103,24 @@ class SparseMatrix using Base::IsRowMajor; - typedef internal::CompressedStorage Storage; + typedef internal::CompressedStorage Storage; enum { Options = _Options }; + typedef typename Base::IndexVector IndexVector; + typedef typename Base::ScalarVector ScalarVector; protected: - typedef SparseMatrix TransposedSparseMatrix; - Index m_outerSize; - Index m_innerSize; - Index* m_outerIndex; - Index* m_innerNonZeros; // optional, if null then the data is compressed + StorageIndex m_outerSize; + StorageIndex m_innerSize; + StorageIndex* m_outerIndex; + StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed Storage m_data; - Eigen::Map > innerNonZeros() { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } - const Eigen::Map > innerNonZeros() const { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + Eigen::Map innerNonZeros() { return Eigen::Map(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + const Eigen::Map innerNonZeros() const { return Eigen::Map(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } public: @@ -127,14 +128,14 @@ class SparseMatrix inline bool isCompressed() const { return m_innerNonZeros==0; } /** \returns the number of rows of the matrix */ - inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline StorageIndex rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } /** \returns the number of columns of the matrix */ - inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ - inline Index innerSize() const { return m_innerSize; } + inline StorageIndex innerSize() const { return m_innerSize; } /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ - inline Index outerSize() const { return m_outerSize; } + inline StorageIndex outerSize() const { return m_outerSize; } /** \returns a const pointer to the array of values. * This function is aimed at interoperability with other libraries. @@ -148,29 +149,29 @@ class SparseMatrix /** \returns a const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ - inline const Index* innerIndexPtr() const { return &m_data.index(0); } + inline const StorageIndex* innerIndexPtr() const { return &m_data.index(0); } /** \returns a non-const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ - inline Index* innerIndexPtr() { return &m_data.index(0); } + inline StorageIndex* innerIndexPtr() { return &m_data.index(0); } /** \returns a const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ - inline const Index* outerIndexPtr() const { return m_outerIndex; } + inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } /** \returns a non-const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ - inline Index* outerIndexPtr() { return m_outerIndex; } + inline StorageIndex* outerIndexPtr() { return m_outerIndex; } /** \returns a const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ - inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; } + inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; } /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ - inline Index* innerNonZeroPtr() { return m_innerNonZeros; } + inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; } /** \internal */ inline Storage& data() { return m_data; } @@ -234,7 +235,7 @@ class SparseMatrix if(isCompressed()) { - reserve(Matrix::Constant(outerSize(), 2)); + reserve(IndexVector::Constant(outerSize(), 2)); } return insertUncompressed(row,col); } @@ -248,17 +249,17 @@ class SparseMatrix inline void setZero() { m_data.clear(); - memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); if(m_innerNonZeros) - memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index)); + memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); } /** \returns the number of non zero coefficients */ - inline Index nonZeros() const + inline StorageIndex nonZeros() const { if(m_innerNonZeros) return innerNonZeros().sum(); - return static_cast(m_data.size()); + return convert_index(Index(m_data.size())); } /** Preallocates \a reserveSize non zeros. @@ -302,13 +303,13 @@ class SparseMatrix { std::size_t totalReserveSize = 0; // turn the matrix into non-compressed mode - m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(Index))); + m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); // temporarily use m_innerSizes to hold the new starting points. - Index* newOuterIndex = m_innerNonZeros; + StorageIndex* newOuterIndex = m_innerNonZeros; - Index count = 0; + StorageIndex count = 0; for(Index j=0; j=0; --j) { - Index innerNNZ = previousOuterIndex - m_outerIndex[j]; + StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); @@ -335,15 +336,15 @@ class SparseMatrix } else { - Index* newOuterIndex = static_cast(std::malloc((m_outerSize+1)*sizeof(Index))); + StorageIndex* newOuterIndex = static_cast(std::malloc((m_outerSize+1)*sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); - Index count = 0; + StorageIndex count = 0; for(Index j=0; j(reserveSizes[j], alreadyReserved); + StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; + StorageIndex toReserve = std::max(reserveSizes[j], alreadyReserved); count += toReserve + m_innerNonZeros[j]; } newOuterIndex[m_outerSize] = count; @@ -354,7 +355,7 @@ class SparseMatrix Index offset = newOuterIndex[j] - m_outerIndex[j]; if(offset>0) { - Index innerNNZ = m_innerNonZeros[j]; + StorageIndex innerNNZ = m_innerNonZeros[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); @@ -425,7 +426,7 @@ class SparseMatrix { if(isCompressed()) { - Index size = static_cast(m_data.size()); + StorageIndex size = internal::convert_index(Index(m_data.size())); Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) @@ -490,7 +491,7 @@ class SparseMatrix { if(m_innerNonZeros != 0) return; - m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(Index))); + m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(StorageIndex))); for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; @@ -517,7 +518,7 @@ class SparseMatrix // TODO also implement a unit test makeCompressed(); - Index k = 0; + StorageIndex k = 0; for(Index j=0; jcols() : rows - this->rows(); Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); - Index newInnerSize = IsRowMajor ? cols : rows; + StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows); // Deals with inner non zeros if (m_innerNonZeros) { // Resize m_innerNonZeros - Index *newInnerNonZeros = static_cast(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index))); + StorageIndex *newInnerNonZeros = static_cast(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex))); if (!newInnerNonZeros) internal::throw_std_bad_alloc(); m_innerNonZeros = newInnerNonZeros; @@ -566,7 +567,7 @@ class SparseMatrix else if (innerChange < 0) { // Inner size decreased: allocate a new m_innerNonZeros - m_innerNonZeros = static_cast(std::malloc((m_outerSize+outerChange+1) * sizeof(Index))); + m_innerNonZeros = static_cast(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); for(Index i = 0; i < m_outerSize; i++) m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; @@ -577,8 +578,8 @@ class SparseMatrix { for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) { - Index &n = m_innerNonZeros[i]; - Index start = m_outerIndex[i]; + StorageIndex &n = m_innerNonZeros[i]; + StorageIndex start = m_outerIndex[i]; while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; } } @@ -589,12 +590,12 @@ class SparseMatrix if (outerChange == 0) return; - Index *newOuterIndex = static_cast(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index))); + StorageIndex *newOuterIndex = static_cast(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); m_outerIndex = newOuterIndex; if (outerChange > 0) { - Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; + StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; for(Index i=m_outerSize; i(std::malloc((outerSize + 1) * sizeof(Index))); + m_outerIndex = static_cast(std::malloc((outerSize + 1) * sizeof(StorageIndex))); if (!m_outerIndex) internal::throw_std_bad_alloc(); m_outerSize = outerSize; @@ -622,7 +623,7 @@ class SparseMatrix std::free(m_innerNonZeros); m_innerNonZeros = 0; } - memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); } /** \internal @@ -715,9 +716,9 @@ class SparseMatrix { eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); this->m_data.resize(rows()); - Eigen::Map >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); - Eigen::Map >(&this->m_data.value(0), rows()).setOnes(); - Eigen::Map >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); + Eigen::Map(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); + Eigen::Map(&this->m_data.value(0), rows()).setOnes(); + Eigen::Map(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); } inline SparseMatrix& operator=(const SparseMatrix& other) { @@ -808,9 +809,7 @@ protected: template void initAssignment(const Other& other) { - eigen_assert( other.rows() == typename Other::Index(Index(other.rows())) - && other.cols() == typename Other::Index(Index(other.cols())) ); - resize(Index(other.rows()), Index(other.cols())); + resize(other.rows(), other.cols()); if(m_innerNonZeros) { std::free(m_innerNonZeros); @@ -826,15 +825,15 @@ protected: * A vector object that is equal to 0 everywhere but v at the position i */ class SingletonVector { - Index m_index; - Index m_value; + StorageIndex m_index; + StorageIndex m_value; public: - typedef Index value_type; + typedef StorageIndex value_type; SingletonVector(Index i, Index v) - : m_index(i), m_value(v) + : m_index(convert_index(i)), m_value(convert_index(v)) {} - Index operator[](Index i) const { return i==m_index ? m_value : 0; } + StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; } }; /** \internal @@ -853,14 +852,14 @@ public: eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; - m_data.index(p) = inner; + m_data.index(p) = convert_index(inner); return (m_data.value(p) = 0); } private: static void check_template_parameters() { - EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } @@ -880,7 +879,7 @@ class SparseMatrix::InnerIterator { public: InnerIterator(const SparseMatrix& mat, Index outer) - : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(convert_index(outer)), m_id(mat.m_outerIndex[outer]) { if(mat.isCompressed()) m_end = mat.m_outerIndex[outer+1]; @@ -893,19 +892,19 @@ class SparseMatrix::InnerIterator inline const Scalar& value() const { return m_values[m_id]; } inline Scalar& valueRef() { return const_cast(m_values[m_id]); } - inline Index index() const { return m_indices[m_id]; } - inline Index outer() const { return m_outer; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_indices[m_id]; } + inline StorageIndex outer() const { return m_outer; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end); } protected: const Scalar* m_values; - const Index* m_indices; - const Index m_outer; - Index m_id; - Index m_end; + const StorageIndex* m_indices; + const StorageIndex m_outer; + StorageIndex m_id; + StorageIndex m_end; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix A; @@ -931,19 +930,19 @@ class SparseMatrix::ReverseInnerIterator inline const Scalar& value() const { return m_values[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_values[m_id-1]); } - inline Index index() const { return m_indices[m_id-1]; } - inline Index outer() const { return m_outer; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_indices[m_id-1]; } + inline StorageIndex outer() const { return m_outer; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id > m_start); } protected: const Scalar* m_values; - const Index* m_indices; - const Index m_outer; - Index m_id; - const Index m_start; + const StorageIndex* m_indices; + const StorageIndex m_outer; + StorageIndex m_id; + const StorageIndex m_start; }; namespace internal { @@ -954,13 +953,13 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa EIGEN_UNUSED_VARIABLE(Options); enum { IsRowMajor = SparseMatrixType::IsRowMajor }; typedef typename SparseMatrixType::Scalar Scalar; - typedef typename SparseMatrixType::Index Index; - SparseMatrix trMat(mat.rows(),mat.cols()); + typedef typename SparseMatrixType::StorageIndex StorageIndex; + SparseMatrix trMat(mat.rows(),mat.cols()); if(begin!=end) { // pass 1: count the nnz per inner-vector - Matrix wi(trMat.outerSize()); + typename SparseMatrixType::IndexVector wi(trMat.outerSize()); wi.setZero(); for(InputIterator it(begin); it!=end; ++it) { @@ -1034,13 +1033,13 @@ void SparseMatrix::sumupDuplicates() { eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task - Matrix wi(innerSize()); + IndexVector wi(innerSize()); wi.fill(-1); - Index count = 0; + StorageIndex count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers for(Index j=0; j& SparseMatrix > (dest.m_outerIndex,dest.outerSize()).setZero(); + Eigen::Map (dest.m_outerIndex,dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass @@ -1098,8 +1097,8 @@ EIGEN_DONT_INLINE SparseMatrix& SparseMatrix positions(dest.outerSize()); + StorageIndex count = 0; + IndexVector positions(dest.outerSize()); for (Index j=0; j& SparseMatrix::Scalar& Sparse { eigen_assert(!isCompressed()); - const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; + const StorageIndex outer = convert_index(IsRowMajor ? row : col); + const StorageIndex inner = convert_index(IsRowMajor ? col : row); Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; - Index innerNNZ = m_innerNonZeros[outer]; + StorageIndex innerNNZ = m_innerNonZeros[outer]; if(innerNNZ>=room) { // this inner vector is full, we need to reallocate the whole buffer :( - reserve(SingletonVector(outer,std::max(2,innerNNZ))); + reserve(SingletonVector(outer,std::max(2,innerNNZ))); } Index startId = m_outerIndex[outer]; @@ -1180,7 +1179,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse // we start a new inner vector while (previousOuter>=0 && m_outerIndex[previousOuter]==0) { - m_outerIndex[previousOuter] = static_cast(m_data.size()); + m_outerIndex[previousOuter] = convert_index(m_data.size()); --previousOuter; } m_outerIndex[outer+1] = m_outerIndex[outer]; @@ -1280,7 +1279,6 @@ struct evaluator > : evaluator_base > { typedef _Scalar Scalar; - typedef _Index Index; typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType; typedef typename SparseMatrixType::InnerIterator InnerIterator; typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator; diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h index 04baabe4f..c55a6a930 100644 --- a/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/Eigen/src/SparseCore/SparseMatrixBase.h @@ -30,13 +30,15 @@ template class SparseMatrixBase : public EigenBase typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits::type >::type PacketReturnType; typedef SparseMatrixBase StorageBaseType; typedef EigenBase Base; + typedef Matrix IndexVector; + typedef Matrix ScalarVector; template Derived& operator=(const EigenBase &other); @@ -99,7 +101,7 @@ template class SparseMatrixBase : public EigenBase typedef typename internal::add_const >::type ConstTransposeReturnType; // FIXME storage order do not match evaluator storage order - typedef SparseMatrix PlainObject; + typedef SparseMatrix PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers @@ -142,15 +144,15 @@ template class SparseMatrixBase : public EigenBase #undef EIGEN_CURRENT_STORAGE_BASE_CLASS /** \returns the number of rows. \sa cols() */ - inline Index rows() const { return derived().rows(); } + inline StorageIndex rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows() */ - inline Index cols() const { return derived().cols(); } + inline StorageIndex cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(). */ - inline Index size() const { return rows() * cols(); } + inline StorageIndex size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ - inline Index nonZeros() const { return derived().nonZeros(); } + inline StorageIndex nonZeros() const { return derived().nonZeros(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode @@ -158,10 +160,10 @@ template class SparseMatrixBase : public EigenBase inline bool isVector() const { return rows()==1 || cols()==1; } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ - Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } + StorageIndex outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ - Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } + StorageIndex innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } @@ -227,8 +229,8 @@ template class SparseMatrixBase : public EigenBase } else { - SparseMatrix trans = m; - s << static_cast >&>(trans); + SparseMatrix trans = m; + s << static_cast >&>(trans); } } return s; @@ -288,7 +290,7 @@ template class SparseMatrixBase : public EigenBase { return Product(lhs.derived(), rhs.derived()); } /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ - SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const + SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct(derived(), perm); } @@ -352,6 +354,10 @@ template class SparseMatrixBase : public EigenBase protected: bool m_isRValue; + + static inline StorageIndex convert_index(const Index idx) { + return internal::convert_index(idx); + } }; } // end namespace Eigen diff --git a/Eigen/src/SparseCore/SparsePermutation.h b/Eigen/src/SparseCore/SparsePermutation.h index 21411f232..80e5c5fef 100644 --- a/Eigen/src/SparseCore/SparsePermutation.h +++ b/Eigen/src/SparseCore/SparsePermutation.h @@ -21,15 +21,15 @@ struct traits::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; - typedef typename MatrixTypeNestedCleaned::Index Index; + typedef typename MatrixTypeNestedCleaned::StorageIndex StorageIndex; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight }; typedef typename internal::conditional, - SparseMatrix >::type ReturnType; + SparseMatrix, + SparseMatrix >::type ReturnType; }; template @@ -38,7 +38,7 @@ struct permut_sparsematrix_product_retval { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; - typedef typename MatrixTypeNestedCleaned::Index Index; + typedef typename MatrixTypeNestedCleaned::StorageIndex StorageIndex; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, @@ -56,8 +56,8 @@ struct permut_sparsematrix_product_retval { if(MoveOuter) { - SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); - Matrix sizes(m_matrix.outerSize()); + SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); + Matrix sizes(m_matrix.outerSize()); for(Index j=0; j tmp(m_matrix.rows(), m_matrix.cols()); - Matrix sizes(tmp.outerSize()); + SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); + Matrix sizes(tmp.outerSize()); sizes.setZero(); - PermutationMatrix perm; + PermutationMatrix perm; if((Side==OnTheLeft) ^ Transposed) perm = m_permutation; else diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h index 5da7d2bef..e13f98144 100644 --- a/Eigen/src/SparseCore/SparseSelfAdjointView.h +++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -33,10 +33,10 @@ struct traits > : traits { }; template -void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm = 0); template -void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm = 0); } @@ -48,8 +48,8 @@ template class SparseSelfAdjointView enum { Mode = _Mode }; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; - typedef Matrix VectorI; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; @@ -58,8 +58,8 @@ template class SparseSelfAdjointView eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); } - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } /** \internal \returns a reference to the nested matrix */ const _MatrixTypeNested& matrix() const { return m_matrix; } @@ -117,22 +117,22 @@ template class SparseSelfAdjointView SparseSelfAdjointView& rankUpdate(const SparseMatrixBase& u, const Scalar& alpha = Scalar(1)); /** \internal triggered by sparse_matrix = SparseSelfadjointView; */ - template void evalTo(SparseMatrix& _dest) const + template void evalTo(SparseMatrix& _dest) const { internal::permute_symm_to_fullsymm(m_matrix, _dest); } - template void evalTo(DynamicSparseMatrix& _dest) const + template void evalTo(DynamicSparseMatrix& _dest) const { // TODO directly evaluate into _dest; - SparseMatrix tmp(_dest.rows(),_dest.cols()); + SparseMatrix tmp(_dest.rows(),_dest.cols()); internal::permute_symm_to_fullsymm(m_matrix, tmp); _dest = tmp; } /** \returns an expression of P H P^-1 */ // TODO implement twists in a more evaluator friendly fashion - SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix& perm) const + SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm); } @@ -215,7 +215,6 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; - typedef typename SparseLhsType::Index Index; typedef typename SparseLhsType::Scalar LhsScalar; enum { @@ -302,7 +301,7 @@ struct generic_product_impl @@ -353,12 +352,12 @@ protected: namespace internal { template -void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm) { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::Scalar Scalar; - typedef SparseMatrix Dest; - typedef Matrix VectorI; + typedef SparseMatrix Dest; + typedef Matrix VectorI; Dest& dest(_dest.derived()); enum { @@ -401,16 +400,16 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix(it.index()); Index r = it.row(); Index c = it.col(); - Index jp = perm ? perm[j] : j; - Index ip = perm ? perm[i] : i; + StorageIndex jp = perm ? perm[j] : j; + StorageIndex ip = perm ? perm[i] : i; if(Mode==(Upper|Lower)) { @@ -440,12 +439,12 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix -void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm) { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::Scalar Scalar; - SparseMatrix& dest(_dest.derived()); - typedef Matrix VectorI; + SparseMatrix& dest(_dest.derived()); + typedef Matrix VectorI; enum { SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor, StorageOrderMatch = int(SrcOrder) == int(DstOrder), @@ -453,20 +452,20 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrixj)) continue; - Index ip = perm ? perm[i] : i; + StorageIndex ip = perm ? perm[i] : i; count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; } } @@ -477,17 +476,17 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrixj)) continue; - Index jp = perm ? perm[j] : j; - Index ip = perm? perm[i] : i; + StorageIndex jp = perm ? perm[j] : j; + StorageIndex ip = perm? perm[i] : i; Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp); @@ -519,11 +518,11 @@ class SparseSymmetricPermutationProduct { public: typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; protected: - typedef PermutationMatrix Perm; + typedef PermutationMatrix Perm; public: - typedef Matrix VectorI; + typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; @@ -531,8 +530,8 @@ class SparseSymmetricPermutationProduct : m_matrix(mat), m_perm(perm) {} - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } template void evalTo(SparseMatrix& _dest) const diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index f291f8cef..1384fbbff 100644 --- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -22,16 +22,16 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); typedef typename remove_all::type::Scalar Scalar; - typedef typename remove_all::type::Index Index; + typedef typename remove_all::type::StorageIndex StorageIndex; // make sure to call innerSize/outerSize since we fake the storage order. - Index rows = lhs.innerSize(); - Index cols = rhs.outerSize(); + StorageIndex rows = lhs.innerSize(); + StorageIndex cols = rhs.outerSize(); //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer - AmbiVector tempVector(rows); + AmbiVector tempVector(rows); // estimate the number of non zero entries // given a rhs column containing Y non zeros, we assume that the respective Y columns @@ -39,7 +39,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) - Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); + StorageIndex estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); // mimics a resizeByInnerOuter: if(ResultType::IsRowMajor) @@ -70,7 +70,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r } } res.startVec(j); - for (typename AmbiVector::Iterator it(tempVector,tolerance); it; ++it) + for (typename AmbiVector::Iterator it(tempVector,tolerance); it; ++it) res.insertBackByOuterInner(j,it.index()) = it.value(); } res.finalize(); @@ -103,7 +103,7 @@ struct sparse_sparse_product_with_pruning_selector SparseTemporaryType; + typedef SparseMatrix SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res = _res; @@ -129,8 +129,8 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrixLhs; - typedef SparseMatrix ColMajorMatrixRhs; + typedef SparseMatrix ColMajorMatrixLhs; + typedef SparseMatrix ColMajorMatrixRhs; ColMajorMatrixLhs colLhs(lhs); ColMajorMatrixRhs colRhs(rhs); internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); @@ -149,7 +149,7 @@ struct sparse_sparse_product_with_pruning_selector RowMajorMatrixLhs; + typedef SparseMatrix RowMajorMatrixLhs; RowMajorMatrixLhs rowLhs(lhs); sparse_sparse_product_with_pruning_selector(rowLhs,rhs,res,tolerance); } @@ -161,7 +161,7 @@ struct sparse_sparse_product_with_pruning_selector RowMajorMatrixRhs; + typedef SparseMatrix RowMajorMatrixRhs; RowMajorMatrixRhs rowRhs(rhs); sparse_sparse_product_with_pruning_selector(lhs,rowRhs,res,tolerance); } @@ -173,7 +173,7 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrixRhs; + typedef SparseMatrix ColMajorMatrixRhs; ColMajorMatrixRhs colRhs(rhs); internal::sparse_sparse_product_with_pruning_impl(lhs, colRhs, res, tolerance); } @@ -185,7 +185,7 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrixLhs; + typedef SparseMatrix ColMajorMatrixLhs; ColMajorMatrixLhs colLhs(lhs); internal::sparse_sparse_product_with_pruning_impl(colLhs, rhs, res, tolerance); } diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h index c3d2d1a16..c74af46b3 100644 --- a/Eigen/src/SparseCore/SparseTranspose.h +++ b/Eigen/src/SparseCore/SparseTranspose.h @@ -20,7 +20,7 @@ template class TransposeImpl protected: typedef SparseMatrixBase > Base; public: - inline typename MatrixType::Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); } + inline typename MatrixType::StorageIndex nonZeros() const { return Base::derived().nestedExpression().nonZeros(); } }; namespace internal { @@ -33,28 +33,28 @@ struct unary_evaluator, IteratorBased> typedef typename evaluator::ReverseInnerIterator EvalReverseIterator; public: typedef Transpose XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; class InnerIterator : public EvalIterator { public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : EvalIterator(unaryOp.m_argImpl,outer) {} - Index row() const { return EvalIterator::col(); } - Index col() const { return EvalIterator::row(); } + StorageIndex row() const { return EvalIterator::col(); } + StorageIndex col() const { return EvalIterator::row(); } }; class ReverseInnerIterator : public EvalReverseIterator { public: - EIGEN_STRONG_INLINE ReverseInnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE ReverseInnerIterator(const unary_evaluator& unaryOp, Index outer) : EvalReverseIterator(unaryOp.m_argImpl,outer) {} - Index row() const { return EvalReverseIterator::col(); } - Index col() const { return EvalReverseIterator::row(); } + StorageIndex row() const { return EvalReverseIterator::col(); } + StorageIndex col() const { return EvalReverseIterator::row(); } }; enum { diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h index b044d6778..15bdbacb5 100644 --- a/Eigen/src/SparseCore/SparseTriangularView.h +++ b/Eigen/src/SparseCore/SparseTriangularView.h @@ -64,7 +64,7 @@ template class TriangularViewImpl::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator { typedef typename MatrixTypeNestedCleaned::InnerIterator Base; - typedef typename TriangularViewType::Index Index; + typedef typename TriangularViewType::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE InnerIterator(const TriangularViewImpl& view, Index outer) @@ -102,9 +102,9 @@ class TriangularViewImpl::InnerIterator : public MatrixT return *this; } - inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } - inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } - inline Index index() const + inline StorageIndex row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } + inline StorageIndex col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } + inline StorageIndex index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); @@ -134,7 +134,7 @@ template class TriangularViewImpl::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator { typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base; - typedef typename TriangularViewImpl::Index Index; + typedef typename TriangularViewImpl::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const TriangularViewType& view, Index outer) @@ -150,8 +150,8 @@ class TriangularViewImpl::ReverseInnerIterator : public EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } - inline Index row() const { return Base::row(); } - inline Index col() const { return Base::col(); } + inline StorageIndex row() const { return Base::row(); } + inline StorageIndex col() const { return Base::col(); } EIGEN_STRONG_INLINE operator bool() const { @@ -175,7 +175,7 @@ struct unary_evaluator, IteratorBased> protected: typedef typename XprType::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator EvalIterator; enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit)) @@ -246,9 +246,9 @@ public: } } -// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } -// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } - inline Index index() const +// inline StorageIndex row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } +// inline StorageIndex col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } + inline StorageIndex index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h index 8de227b88..5714150c2 100644 --- a/Eigen/src/SparseCore/SparseUtil.h +++ b/Eigen/src/SparseCore/SparseUtil.h @@ -43,20 +43,22 @@ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) +// TODO this is mostly the same as EIGEN_GENERIC_PUBLIC_INTERFACE #define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ typedef typename Eigen::internal::nested::type Nested; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ + typedef typename Eigen::internal::traits::StorageIndex StorageIndex; \ enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ Flags = Eigen::internal::traits::Flags, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ using Base::derived; \ - using Base::const_cast_derived; + using Base::const_cast_derived; \ + using Base::convert_index; #define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) @@ -67,10 +69,10 @@ const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern; const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern; template class SparseMatrixBase; -template class SparseMatrix; -template class DynamicSparseMatrix; -template class SparseVector; -template class MappedSparseMatrix; +template class SparseMatrix; +template class DynamicSparseMatrix; +template class SparseVector; +template class MappedSparseMatrix; template class SparseSelfAdjointView; template class SparseDiagonalProduct; @@ -99,24 +101,25 @@ template struct eval template struct sparse_eval { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; public: - typedef SparseVector<_Scalar, RowMajor, _Index> type; + typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; public: - typedef SparseVector<_Scalar, ColMajor, _Index> type; + typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type; }; +// TODO this seems almost identical to plain_matrix_type template struct sparse_eval { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; enum { _Options = ((traits::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: - typedef SparseMatrix<_Scalar, _Options, _Index> type; + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type; }; template struct sparse_eval { @@ -128,10 +131,10 @@ template struct sparse_eval { template struct plain_matrix_type { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; enum { _Options = ((evaluator::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: - typedef SparseMatrix<_Scalar, _Options, _Index> type; + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type; }; template @@ -162,26 +165,26 @@ template<> struct glue_shapes { typedef SparseTria * * \sa SparseMatrix::setFromTriplets() */ -template::Index > +template::StorageIndex > class Triplet { public: Triplet() : m_row(0), m_col(0), m_value(0) {} - Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0)) + Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0)) : m_row(i), m_col(j), m_value(v) {} /** \returns the row index of the element */ - const Index& row() const { return m_row; } + const StorageIndex& row() const { return m_row; } /** \returns the column index of the element */ - const Index& col() const { return m_col; } + const StorageIndex& col() const { return m_col; } /** \returns the value of the element */ const Scalar& value() const { return m_value; } protected: - Index m_row, m_col; + StorageIndex m_row, m_col; Scalar m_value; }; diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h index 8b696a476..fd70cf2bc 100644 --- a/Eigen/src/SparseCore/SparseVector.h +++ b/Eigen/src/SparseCore/SparseVector.h @@ -26,11 +26,11 @@ namespace Eigen { */ namespace internal { -template -struct traits > +template +struct traits > { typedef _Scalar Scalar; - typedef _Index Index; + typedef _StorageIndex StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -61,9 +61,9 @@ struct sparse_vector_assign_selector; } -template +template class SparseVector - : public SparseMatrixBase > + : public SparseMatrixBase > { typedef SparseMatrixBase SparseBase; @@ -72,23 +72,23 @@ class SparseVector EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) - typedef internal::CompressedStorage Storage; + typedef internal::CompressedStorage Storage; enum { IsColVector = internal::traits::IsColVector }; enum { Options = _Options }; - EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } - EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } - EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } - EIGEN_STRONG_INLINE Index outerSize() const { return 1; } + EIGEN_STRONG_INLINE StorageIndex rows() const { return IsColVector ? m_size : 1; } + EIGEN_STRONG_INLINE StorageIndex cols() const { return IsColVector ? 1 : m_size; } + EIGEN_STRONG_INLINE StorageIndex innerSize() const { return m_size; } + EIGEN_STRONG_INLINE StorageIndex outerSize() const { return 1; } EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); } EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); } - EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); } - EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); } + EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return &m_data.index(0); } + EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return &m_data.index(0); } /** \internal */ inline Storage& data() { return m_data; } @@ -132,7 +132,7 @@ class SparseVector inline void setZero() { m_data.clear(); } /** \returns the number of non zero coefficients */ - inline Index nonZeros() const { return static_cast(m_data.size()); } + inline StorageIndex nonZeros() const { return static_cast(m_data.size()); } inline void startVec(Index outer) { @@ -188,7 +188,7 @@ class SparseVector m_data.value(p+1) = m_data.value(p); --p; } - m_data.index(p+1) = i; + m_data.index(p+1) = convert_index(i); m_data.value(p+1) = 0; return m_data.value(p+1); } @@ -207,13 +207,13 @@ class SparseVector void resize(Index rows, Index cols) { - eigen_assert(rows==1 || cols==1); + eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1"); resize(IsColVector ? rows : cols); } void resize(Index newSize) { - m_size = newSize; + m_size = convert_index(newSize); m_data.clear(); } @@ -348,27 +348,27 @@ protected: static void check_template_parameters() { - EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } Storage m_data; - Index m_size; + StorageIndex m_size; }; -template -class SparseVector::InnerIterator +template +class SparseVector::InnerIterator { public: explicit InnerIterator(const SparseVector& vec, Index outer=0) - : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) + : m_data(vec.m_data), m_id(0), m_end(convert_index(m_data.size())) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } - explicit InnerIterator(const internal::CompressedStorage& data) - : m_data(data), m_id(0), m_end(static_cast(m_data.size())) + explicit InnerIterator(const internal::CompressedStorage& data) + : m_data(data), m_id(0), m_end(convert_index(m_data.size())) {} inline InnerIterator& operator++() { m_id++; return *this; } @@ -376,16 +376,16 @@ class SparseVector::InnerIterator inline Scalar value() const { return m_data.value(m_id); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id)); } - inline Index index() const { return m_data.index(m_id); } - inline Index row() const { return IsColVector ? index() : 0; } - inline Index col() const { return IsColVector ? 0 : index(); } + inline StorageIndex index() const { return m_data.index(m_id); } + inline StorageIndex row() const { return IsColVector ? index() : 0; } + inline StorageIndex col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id < m_end); } protected: - const internal::CompressedStorage& m_data; - Index m_id; - const Index m_end; + const internal::CompressedStorage& m_data; + StorageIndex m_id; + const StorageIndex m_end; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix A; @@ -393,19 +393,19 @@ class SparseVector::InnerIterator template InnerIterator(const SparseMatrixBase&,Index outer=0); }; -template -class SparseVector::ReverseInnerIterator +template +class SparseVector::ReverseInnerIterator { public: explicit ReverseInnerIterator(const SparseVector& vec, Index outer=0) - : m_data(vec.m_data), m_id(static_cast(m_data.size())), m_start(0) + : m_data(vec.m_data), m_id(convert_index(m_data.size())), m_start(0) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } - explicit ReverseInnerIterator(const internal::CompressedStorage& data) - : m_data(data), m_id(static_cast(m_data.size())), m_start(0) + explicit ReverseInnerIterator(const internal::CompressedStorage& data) + : m_data(data), m_id(convert_index(m_data.size())), m_start(0) {} inline ReverseInnerIterator& operator--() { m_id--; return *this; } @@ -413,15 +413,15 @@ class SparseVector::ReverseInnerIterator inline Scalar value() const { return m_data.value(m_id-1); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id-1)); } - inline Index index() const { return m_data.index(m_id-1); } - inline Index row() const { return IsColVector ? index() : 0; } - inline Index col() const { return IsColVector ? 0 : index(); } + inline StorageIndex index() const { return m_data.index(m_id-1); } + inline StorageIndex row() const { return IsColVector ? index() : 0; } + inline StorageIndex col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id > m_start); } protected: - const internal::CompressedStorage& m_data; - Index m_id; + const internal::CompressedStorage& m_data; + StorageIndex m_id; const Index m_start; }; @@ -465,7 +465,7 @@ struct sparse_vector_assign_selector { eigen_internal_assert(src.outerSize()==src.size()); typedef typename internal::evaluator::type SrcEvaluatorType; SrcEvaluatorType srcEval(src); - for(typename Dest::Index i=0; i struct traits > : traits { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Sparse StorageKind; enum { Flags = int(traits::Flags) & (RowMajorBit) @@ -40,11 +40,11 @@ public: RealScalar m_epsilon = NumTraits::dummy_precision()) : m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {} - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } - inline Index innerSize() const { return m_matrix.innerSize(); } - inline Index outerSize() const { return m_matrix.outerSize(); } + inline StorageIndex innerSize() const { return m_matrix.innerSize(); } + inline StorageIndex outerSize() const { return m_matrix.outerSize(); } /** \returns the nested expression */ const typename internal::remove_all::type& @@ -126,7 +126,7 @@ struct unary_evaluator, IndexBased> typedef SparseView XprType; protected: enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; public: @@ -134,7 +134,7 @@ struct unary_evaluator, IndexBased> { public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer) : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize()) { incrementToNonZero(); @@ -153,17 +153,17 @@ struct unary_evaluator, IndexBased> : m_sve.m_argImpl.coeff(m_inner, m_outer); } - EIGEN_STRONG_INLINE Index index() const { return m_inner; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const unary_evaluator &m_sve; - Index m_inner; - const Index m_outer; - const Index m_end; + StorageIndex m_inner; + const StorageIndex m_outer; + const StorageIndex m_end; private: void incrementToNonZero() diff --git a/Eigen/src/SparseCore/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h index 98062e9c6..ccfbdc762 100644 --- a/Eigen/src/SparseCore/TriangularSolver.h +++ b/Eigen/src/SparseCore/TriangularSolver.h @@ -28,7 +28,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -66,7 +66,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -106,7 +106,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -142,7 +142,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -212,12 +212,12 @@ template struct sparse_solve_triangular_sparse_selector { typedef typename Rhs::Scalar Scalar; - typedef typename promote_index_type::Index, - typename traits::Index>::type Index; + typedef typename promote_index_type::StorageIndex, + typename traits::StorageIndex>::type StorageIndex; static void run(const Lhs& lhs, Rhs& other) { const bool IsLower = (UpLo==Lower); - AmbiVector tempVector(other.rows()*2); + AmbiVector tempVector(other.rows()*2); tempVector.setBounds(0,other.rows()); Rhs res(other.rows(), other.cols()); @@ -273,7 +273,7 @@ struct sparse_solve_triangular_sparse_selector Index count = 0; // FIXME compute a reference value to filter zeros - for (typename AmbiVector::Iterator it(tempVector/*,1e-12*/); it; ++it) + for (typename AmbiVector::Iterator it(tempVector/*,1e-12*/); it; ++it) { ++ count; // std::cerr << "fill " << it.index() << ", " << col << "\n"; diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h index d72d7f150..0c48fef3e 100644 --- a/Eigen/src/SparseLU/SparseLU.h +++ b/Eigen/src/SparseLU/SparseLU.h @@ -14,7 +14,7 @@ namespace Eigen { -template > class SparseLU; +template > class SparseLU; template struct SparseLUMatrixLReturnType; template struct SparseLUMatrixUReturnType; @@ -70,7 +70,7 @@ template struct SparseLUMatrixURetu * \sa \ref OrderingMethods_Module */ template -class SparseLU : public SparseSolverBase >, public internal::SparseLUImpl +class SparseLU : public SparseSolverBase >, public internal::SparseLUImpl { protected: typedef SparseSolverBase > APIBase; @@ -82,13 +82,13 @@ class SparseLU : public SparseSolverBase >, typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix NCMatrix; - typedef internal::MappedSuperNodalMatrix SCMatrix; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix NCMatrix; + typedef internal::MappedSuperNodalMatrix SCMatrix; typedef Matrix ScalarVector; - typedef Matrix IndexVector; - typedef PermutationMatrix PermutationType; - typedef internal::SparseLUImpl Base; + typedef Matrix IndexVector; + typedef PermutationMatrix PermutationType; + typedef internal::SparseLUImpl Base; public: SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) @@ -122,8 +122,8 @@ class SparseLU : public SparseSolverBase >, factorize(matrix); } - inline Index rows() const { return m_mat.rows(); } - inline Index cols() const { return m_mat.cols(); } + inline StorageIndex rows() const { return m_mat.rows(); } + inline StorageIndex cols() const { return m_mat.cols(); } /** Indicate that the pattern of the input matrix is symmetric */ void isSymmetric(bool sym) { @@ -146,9 +146,9 @@ class SparseLU : public SparseSolverBase >, * y = b; matrixU().solveInPlace(y); * \endcode */ - SparseLUMatrixUReturnType > matrixU() const + SparseLUMatrixUReturnType > matrixU() const { - return SparseLUMatrixUReturnType >(m_Lstore, m_Ustore); + return SparseLUMatrixUReturnType >(m_Lstore, m_Ustore); } /** @@ -324,7 +324,7 @@ class SparseLU : public SparseSolverBase >, std::string m_lastError; NCMatrix m_mat; // The input (permuted ) matrix SCMatrix m_Lstore; // The lower triangular matrix (supernodal) - MappedSparseMatrix m_Ustore; // The upper triangular matrix + MappedSparseMatrix m_Ustore; // The upper triangular matrix PermutationType m_perm_c; // Column permutation PermutationType m_perm_r ; // Row permutation IndexVector m_etree; // Column elimination tree @@ -334,10 +334,10 @@ class SparseLU : public SparseSolverBase >, // SparseLU options bool m_symmetricmode; // values for performance - internal::perfvalues m_perfv; + internal::perfvalues m_perfv; RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot - Index m_nnzL, m_nnzU; // Nonzeros in L and U factors - Index m_detPermR; // Determinant of the coefficient matrix + StorageIndex m_nnzL, m_nnzU; // Nonzeros in L and U factors + StorageIndex m_detPermR; // Determinant of the coefficient matrix private: // Disable copy constructor SparseLU (const SparseLU& ); @@ -375,7 +375,7 @@ void SparseLU::analyzePattern(const MatrixType& mat) { m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used. // Then, permute only the column pointers - ei_declare_aligned_stack_constructed_variable(Index,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast(mat.outerIndexPtr()):0); + ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast(mat.outerIndexPtr()):0); // If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed. if(!mat.isCompressed()) @@ -640,7 +640,7 @@ void SparseLU::factorize(const MatrixType& matrix) // Create supernode matrix L m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); // Create the column major upper sparse matrix U; - new (&m_Ustore) MappedSparseMatrix ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); + new (&m_Ustore) MappedSparseMatrix ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); m_info = Success; m_factorizationIsOk = true; @@ -649,12 +649,12 @@ void SparseLU::factorize(const MatrixType& matrix) template struct SparseLUMatrixLReturnType : internal::no_assignment_operator { - typedef typename MappedSupernodalType::Index Index; + typedef typename MappedSupernodalType::StorageIndex StorageIndex; typedef typename MappedSupernodalType::Scalar Scalar; explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL) { } - Index rows() { return m_mapL.rows(); } - Index cols() { return m_mapL.cols(); } + StorageIndex rows() { return m_mapL.rows(); } + StorageIndex cols() { return m_mapL.cols(); } template void solveInPlace( MatrixBase &X) const { @@ -666,21 +666,18 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator template struct SparseLUMatrixUReturnType : internal::no_assignment_operator { - typedef typename MatrixLType::Index Index; + typedef typename MatrixLType::StorageIndex StorageIndex; typedef typename MatrixLType::Scalar Scalar; explicit SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU) : m_mapL(mapL),m_mapU(mapU) { } - Index rows() { return m_mapL.rows(); } - Index cols() { return m_mapL.cols(); } + StorageIndex rows() { return m_mapL.rows(); } + StorageIndex cols() { return m_mapL.cols(); } template void solveInPlace(MatrixBase &X) const { - /* Explicit type conversion as the Index type of MatrixBase may be wider than Index */ - eigen_assert(X.rows() <= NumTraits::highest()); - eigen_assert(X.cols() <= NumTraits::highest()); - Index nrhs = Index(X.cols()); - Index n = Index(X.rows()); + Index nrhs = X.cols(); + Index n = X.rows(); // Backward solve with U for (Index k = m_mapL.nsuper(); k >= 0; k--) { diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h index e8ee35a94..098763765 100644 --- a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h @@ -29,20 +29,20 @@ namespace internal { * SuperInnerIterator to iterate through all supernodes * Function for triangular solve */ -template +template class MappedSuperNodalMatrix { public: typedef _Scalar Scalar; - typedef _Index Index; - typedef Matrix IndexVector; + typedef _StorageIndex StorageIndex; + typedef Matrix IndexVector; typedef Matrix ScalarVector; public: MappedSuperNodalMatrix() { } - MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + MappedSuperNodalMatrix(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col); @@ -58,7 +58,7 @@ class MappedSuperNodalMatrix * FIXME This class will be modified such that it can be use in the course * of the factorization. */ - void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + void setInfos(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { m_row = m; @@ -75,12 +75,12 @@ class MappedSuperNodalMatrix /** * Number of rows */ - Index rows() { return m_row; } + StorageIndex rows() { return m_row; } /** * Number of columns */ - Index cols() { return m_col; } + StorageIndex cols() { return m_col; } /** * Return the array of nonzero values packed by column @@ -96,12 +96,12 @@ class MappedSuperNodalMatrix /** * Return the pointers to the beginning of each column in \ref valuePtr() */ - Index* colIndexPtr() + StorageIndex* colIndexPtr() { return m_nzval_colptr; } - const Index* colIndexPtr() const + const StorageIndex* colIndexPtr() const { return m_nzval_colptr; } @@ -109,9 +109,9 @@ class MappedSuperNodalMatrix /** * Return the array of compressed row indices of all supernodes */ - Index* rowIndex() { return m_rowind; } + StorageIndex* rowIndex() { return m_rowind; } - const Index* rowIndex() const + const StorageIndex* rowIndex() const { return m_rowind; } @@ -119,9 +119,9 @@ class MappedSuperNodalMatrix /** * Return the location in \em rowvaluePtr() which starts each column */ - Index* rowIndexPtr() { return m_rowind_colptr; } + StorageIndex* rowIndexPtr() { return m_rowind_colptr; } - const Index* rowIndexPtr() const + const StorageIndex* rowIndexPtr() const { return m_rowind_colptr; } @@ -129,18 +129,18 @@ class MappedSuperNodalMatrix /** * Return the array of column-to-supernode mapping */ - Index* colToSup() { return m_col_to_sup; } + StorageIndex* colToSup() { return m_col_to_sup; } - const Index* colToSup() const + const StorageIndex* colToSup() const { return m_col_to_sup; } /** * Return the array of supernode-to-column mapping */ - Index* supToCol() { return m_sup_to_col; } + StorageIndex* supToCol() { return m_sup_to_col; } - const Index* supToCol() const + const StorageIndex* supToCol() const { return m_sup_to_col; } @@ -148,7 +148,7 @@ class MappedSuperNodalMatrix /** * Return the number of supernodes */ - Index nsuper() const + StorageIndex nsuper() const { return m_nsuper; } @@ -161,15 +161,15 @@ class MappedSuperNodalMatrix protected: - Index m_row; // Number of rows - Index m_col; // Number of columns - Index m_nsuper; // Number of supernodes + StorageIndex m_row; // Number of rows + StorageIndex m_col; // Number of columns + StorageIndex m_nsuper; // Number of supernodes Scalar* m_nzval; //array of nonzero values packed by column - Index* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j - Index* m_rowind; // Array of compressed row indices of rectangular supernodes - Index* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j - Index* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs - Index* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode + StorageIndex* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j + StorageIndex* m_rowind; // Array of compressed row indices of rectangular supernodes + StorageIndex* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j + StorageIndex* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs + StorageIndex* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode private : }; @@ -182,9 +182,9 @@ template class MappedSuperNodalMatrix::InnerIterator { public: - InnerIterator(const MappedSuperNodalMatrix& mat, Index outer) + InnerIterator(const MappedSuperNodalMatrix& mat, Eigen::Index outer) : m_matrix(mat), - m_outer(outer), + m_outer(convert_index(outer)), m_supno(mat.colToSup()[outer]), m_idval(mat.colIndexPtr()[outer]), m_startidval(m_idval), @@ -229,14 +229,14 @@ class MappedSuperNodalMatrix::InnerIterator * \brief Solve with the supernode triangular matrix * */ -template +template template -void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) const +void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) const { /* Explicit type conversion as the Index type of MatrixBase may be wider than Index */ - eigen_assert(X.rows() <= NumTraits::highest()); - eigen_assert(X.cols() <= NumTraits::highest()); - Index n = Index(X.rows()); +// eigen_assert(X.rows() <= NumTraits::highest()); +// eigen_assert(X.cols() <= NumTraits::highest()); + Index n = int(X.rows()); Index nrhs = Index(X.cols()); const Scalar * Lval = valuePtr(); // Nonzero values Matrix work(n, nrhs); // working vector diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h index 133211488..58bfc1cb4 100644 --- a/Eigen/src/SparseQR/SparseQR.h +++ b/Eigen/src/SparseQR/SparseQR.h @@ -21,7 +21,7 @@ namespace internal { template struct traits > { typedef typename SparseQRType::MatrixType ReturnType; - typedef typename ReturnType::Index Index; + typedef typename ReturnType::StorageIndex StorageIndex; typedef typename ReturnType::StorageKind StorageKind; }; template struct traits > @@ -73,11 +73,11 @@ class SparseQR : public SparseSolverBase > typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix QRMatrixType; - typedef Matrix IndexVector; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix QRMatrixType; + typedef Matrix IndexVector; typedef Matrix ScalarVector; - typedef PermutationMatrix PermutationType; + typedef PermutationMatrix PermutationType; public: SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { } @@ -109,11 +109,11 @@ class SparseQR : public SparseSolverBase > /** \returns the number of rows of the represented matrix. */ - inline Index rows() const { return m_pmat.rows(); } + inline StorageIndex rows() const { return m_pmat.rows(); } /** \returns the number of columns of the represented matrix. */ - inline Index cols() const { return m_pmat.cols();} + inline StorageIndex cols() const { return m_pmat.cols();} /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization. */ @@ -123,7 +123,7 @@ class SparseQR : public SparseSolverBase > * * \sa setPivotThreshold() */ - Index rank() const + StorageIndex rank() const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); return m_nonzeropivots; @@ -179,7 +179,7 @@ class SparseQR : public SparseSolverBase > b = y; // Solve with the triangular matrix R - y.resize((std::max)(cols(),Index(y.rows())),y.cols()); + y.resize((std::max)(cols(),y.rows()),y.cols()); y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView().solve(b.topRows(rank)); y.bottomRows(y.rows()-rank).setZero(); @@ -260,7 +260,7 @@ class SparseQR : public SparseSolverBase > PermutationType m_outputPerm_c; // The final column permutation RealScalar m_threshold; // Threshold to determine null Householder reflections bool m_useDefaultThreshold; // Use default threshold - Index m_nonzeropivots; // Number of non zero pivots found + StorageIndex m_nonzeropivots; // Number of non zero pivots found IndexVector m_etree; // Column elimination tree IndexVector m_firstRowElt; // First element in each row bool m_isQSorted; // whether Q is sorted or not @@ -289,9 +289,9 @@ void SparseQR::analyzePattern(const MatrixType& mat) // Compute the column fill reducing ordering OrderingType ord; ord(matCpy, m_perm_c); - Index n = mat.cols(); - Index m = mat.rows(); - Index diagSize = (std::min)(m,n); + StorageIndex n = mat.cols(); + StorageIndex m = mat.rows(); + StorageIndex diagSize = (std::min)(m,n); if (!m_perm_c.size()) { @@ -354,7 +354,7 @@ void SparseQR::factorize(const MatrixType& mat) // otherwise directly use the input matrix // IndexVector originalOuterIndicesCpy; - const Index *originalOuterIndices = mat.outerIndexPtr(); + const StorageIndex *originalOuterIndices = mat.outerIndexPtr(); if(MatrixType::IsRowMajor) { originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1); @@ -385,11 +385,11 @@ void SparseQR::factorize(const MatrixType& mat) // Initialize the numerical permutation m_pivotperm.setIdentity(n); - Index nonzeroCol = 0; // Record the number of valid pivots + StorageIndex nonzeroCol = 0; // Record the number of valid pivots m_Q.startVec(0); // Left looking rank-revealing QR factorization: compute a column of R and Q at a time - for (Index col = 0; col < n; ++col) + for (StorageIndex col = 0; col < n; ++col) { mark.setConstant(-1); m_R.startVec(col); @@ -405,12 +405,12 @@ void SparseQR::factorize(const MatrixType& mat) // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) { - Index curIdx = nonzeroCol; + StorageIndex curIdx = nonzeroCol; if(itp) curIdx = itp.row(); if(curIdx == nonzeroCol) found_diag = true; // Get the nonzeros indexes of the current column of R - Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here + StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here if (st < 0 ) { m_lastError = "Empty row found during numerical factorization"; @@ -467,7 +467,7 @@ void SparseQR::factorize(const MatrixType& mat) { for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) { - Index iQ = itq.row(); + StorageIndex iQ = itq.row(); if (mark(iQ) != col) { Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, @@ -578,7 +578,7 @@ struct SparseQR_QProduct : ReturnByValue struct SparseQRMatrixQReturnType : public EigenBase > { - typedef typename SparseQRType::Index Index; + typedef typename SparseQRType::StorageIndex StorageIndex; typedef typename SparseQRType::Scalar Scalar; typedef Matrix DenseMatrix; explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {} @@ -647,8 +647,8 @@ struct SparseQRMatrixQReturnType : public EigenBase(m_qr); } - inline Index rows() const { return m_qr.rows(); } - inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } + inline StorageIndex rows() const { return m_qr.rows(); } + inline StorageIndex cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } // To use for operations with the transpose of Q SparseQRMatrixQTransposeReturnType transpose() const { diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h index ef73587a7..f00bc3976 100644 --- a/Eigen/src/SuperLUSupport/SuperLUSupport.h +++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -156,10 +156,10 @@ struct SluMatrix : SuperMatrix res.setScalarType(); res.Mtype = SLU_GE; - res.nrow = mat.rows(); - res.ncol = mat.cols(); + res.nrow = internal::convert_index(mat.rows()); + res.ncol = internal::convert_index(mat.cols()); - res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride(); + res.storage.lda = internal::convert_index(MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride()); res.storage.values = (void*)(mat.data()); return res; } @@ -298,7 +298,7 @@ class SuperLUBase : public SparseSolverBase typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix Vector; typedef Matrix IntRowVectorType; typedef Matrix IntColVectorType; @@ -313,8 +313,8 @@ class SuperLUBase : public SparseSolverBase clearFactors(); } - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */ inline superlu_options_t& options() { return m_sluOptions; } @@ -457,7 +457,7 @@ class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> > typedef _MatrixType MatrixType; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::IntRowVectorType IntRowVectorType; typedef typename Base::IntColVectorType IntColVectorType; typedef typename Base::LUMatrixType LUMatrixType; @@ -616,8 +616,8 @@ void SuperLU::_solve_impl(const MatrixBase &b, MatrixBase { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); - const int size = m_matrix.rows(); - const int rhsCols = b.cols(); + const StorageIndex size = m_matrix.rows(); + const Index rhsCols = b.cols(); eigen_assert(size==b.rows()); m_sluOptions.Trans = NOTRANS; diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h index b8b216d5e..982aa2fca 100644 --- a/Eigen/src/UmfPackSupport/UmfPackSupport.h +++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -141,7 +141,7 @@ class UmfPackLU : public SparseSolverBase > typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix Vector; typedef Matrix IntRowVectorType; typedef Matrix IntColVectorType; @@ -164,8 +164,8 @@ class UmfPackLU : public SparseSolverBase > if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); } - inline Index rows() const { return m_copyMatrix.rows(); } - inline Index cols() const { return m_copyMatrix.cols(); } + inline StorageIndex rows() const { return m_copyMatrix.rows(); } + inline StorageIndex cols() const { return m_copyMatrix.cols(); } /** \brief Reports whether previous computation was successful. * @@ -279,7 +279,7 @@ class UmfPackLU : public SparseSolverBase > void grapInput_impl(const InputMatrixType& mat, internal::true_type) { m_copyMatrix.resize(mat.rows(), mat.cols()); - if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() ) + if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::StorageIndex)!=sizeof(int) || !mat.isCompressed() ) { // non supported input -> copy m_copyMatrix = mat; @@ -397,7 +397,7 @@ template template bool UmfPackLU::_solve_impl(const MatrixBase &b, MatrixBase &x) const { - const int rhsCols = b.cols(); + Index rhsCols = b.cols(); eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet"); eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet"); eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve"); -- cgit v1.2.3