From aaaade4b3d66d67d2c08af3372c3965e7255b2e8 Mon Sep 17 00:00:00 2001 From: Benoit Jacob Date: Sun, 30 May 2010 16:00:58 -0400 Subject: the Index types change. As discussed on the list (too long to explain here). --- Eigen/src/Array/Array.h | 10 +- Eigen/src/Array/ArrayBase.h | 8 +- Eigen/src/Array/ArrayWrapper.h | 48 +++--- Eigen/src/Array/BooleanRedux.h | 12 +- Eigen/src/Array/Random.h | 23 +-- Eigen/src/Array/Replicate.h | 26 ++-- Eigen/src/Array/Reverse.h | 20 +-- Eigen/src/Array/Select.h | 8 +- Eigen/src/Array/VectorwiseOp.h | 29 ++-- Eigen/src/Cholesky/LDLT.h | 37 ++--- Eigen/src/Cholesky/LLT.h | 29 ++-- Eigen/src/Core/Assign.h | 92 +++++------ Eigen/src/Core/BandMatrix.h | 57 +++---- Eigen/src/Core/Block.h | 162 ++++++++++---------- Eigen/src/Core/CommaInitializer.h | 10 +- Eigen/src/Core/CwiseBinaryOp.h | 12 +- Eigen/src/Core/CwiseNullaryOp.h | 125 +++++++-------- Eigen/src/Core/CwiseUnaryOp.h | 17 +-- Eigen/src/Core/CwiseUnaryView.h | 23 ++- Eigen/src/Core/DenseBase.h | 134 ++++++++-------- Eigen/src/Core/DenseCoeffsBase.h | 137 +++++++++-------- Eigen/src/Core/DenseStorageBase.h | 138 +++++++++-------- Eigen/src/Core/Diagonal.h | 86 +++++------ Eigen/src/Core/DiagonalMatrix.h | 24 +-- Eigen/src/Core/DiagonalProduct.h | 10 +- Eigen/src/Core/Dot.h | 4 +- Eigen/src/Core/EigenBase.h | 9 +- Eigen/src/Core/Flagged.h | 24 +-- Eigen/src/Core/ForceAlignedAccess.h | 24 +-- Eigen/src/Core/Functors.h | 27 ++-- Eigen/src/Core/Fuzzy.h | 7 +- Eigen/src/Core/IO.h | 13 +- Eigen/src/Core/Map.h | 8 +- Eigen/src/Core/MapBase.h | 39 ++--- Eigen/src/Core/MathFunctions.h | 2 +- Eigen/src/Core/Matrix.h | 10 +- Eigen/src/Core/MatrixBase.h | 48 +++--- Eigen/src/Core/MatrixStorage.h | 96 ++++++------ Eigen/src/Core/NestByValue.h | 24 +-- Eigen/src/Core/Product.h | 20 ++- Eigen/src/Core/ProductBase.h | 14 +- Eigen/src/Core/Redux.h | 41 ++--- Eigen/src/Core/ReturnByValue.h | 13 +- Eigen/src/Core/SelfAdjointView.h | 24 +-- Eigen/src/Core/SelfCwiseBinaryOp.h | 20 +-- Eigen/src/Core/SolveTriangular.h | 49 +++--- Eigen/src/Core/StableNorm.h | 16 +- Eigen/src/Core/Stride.h | 4 +- Eigen/src/Core/Swap.h | 20 +-- Eigen/src/Core/Transpose.h | 24 +-- Eigen/src/Core/TriangularMatrix.h | 115 +++++++------- Eigen/src/Core/VectorBlock.h | 38 ++--- Eigen/src/Core/Visitor.h | 65 ++++---- Eigen/src/Core/products/CoeffBasedProduct.h | 122 ++++++++------- Eigen/src/Core/products/GeneralBlockPanelKernel.h | 130 ++++++++-------- Eigen/src/Core/products/GeneralMatrixMatrix.h | 82 +++++----- Eigen/src/Core/products/GeneralMatrixVector.h | 136 ++++++++--------- Eigen/src/Core/products/Parallelizer.h | 28 ++-- Eigen/src/Core/products/SelfadjointMatrixMatrix.h | 176 +++++++++++----------- Eigen/src/Core/products/SelfadjointMatrixVector.h | 22 +-- Eigen/src/Core/products/SelfadjointProduct.h | 72 ++++----- Eigen/src/Core/products/SelfadjointRank2Update.h | 24 +-- Eigen/src/Core/products/TriangularMatrixMatrix.h | 142 ++++++++--------- Eigen/src/Core/products/TriangularMatrixVector.h | 42 +++--- Eigen/src/Core/products/TriangularSolverMatrix.h | 154 +++++++++---------- Eigen/src/Core/util/BlasUtil.h | 32 ++-- Eigen/src/Core/util/Macros.h | 12 ++ Eigen/src/Core/util/Memory.h | 8 +- Eigen/src/Core/util/XprHelper.h | 32 ++-- Eigen/src/Eigen2Support/Block.h | 10 +- Eigen/src/Eigen2Support/Minor.h | 17 ++- Eigen/src/Eigen2Support/VectorBlock.h | 16 +- Eigen/src/Eigenvalues/ComplexEigenSolver.h | 15 +- Eigen/src/Eigenvalues/ComplexSchur.h | 20 +-- Eigen/src/Eigenvalues/EigenSolver.h | 33 ++-- Eigen/src/Eigenvalues/HessenbergDecomposition.h | 16 +- Eigen/src/Eigenvalues/RealSchur.h | 52 ++++--- Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h | 29 ++-- Eigen/src/Eigenvalues/Tridiagonalization.h | 15 +- Eigen/src/Geometry/AlignedBox.h | 15 +- Eigen/src/Geometry/EulerAngles.h | 10 +- Eigen/src/Geometry/Homogeneous.h | 16 +- Eigen/src/Geometry/Hyperplane.h | 9 +- Eigen/src/Geometry/OrthoMethods.h | 5 +- Eigen/src/Geometry/ParametrizedLine.h | 5 +- Eigen/src/Geometry/Quaternion.h | 7 +- Eigen/src/Geometry/Transform.h | 11 +- Eigen/src/Geometry/Umeyama.h | 7 +- Eigen/src/Householder/HouseholderSequence.h | 46 +++--- Eigen/src/Jacobi/Jacobi.h | 33 ++-- Eigen/src/LU/FullPivLU.h | 82 +++++----- Eigen/src/LU/Inverse.h | 7 +- Eigen/src/LU/PartialPivLU.h | 65 ++++---- Eigen/src/QR/ColPivHouseholderQR.h | 51 +++---- Eigen/src/QR/FullPivHouseholderQR.h | 53 +++---- Eigen/src/QR/HouseholderQR.h | 23 +-- Eigen/src/SVD/JacobiSVD.h | 47 +++--- Eigen/src/SVD/SVD.h | 27 ++-- Eigen/src/SVD/UpperBidiagonalization.h | 11 +- Eigen/src/Sparse/AmbiVector.h | 65 ++++---- Eigen/src/Sparse/CholmodSupport.h | 6 +- Eigen/src/Sparse/CompressedStorage.h | 31 ++-- Eigen/src/Sparse/CoreIterators.h | 16 +- Eigen/src/Sparse/DynamicSparseMatrix.h | 92 +++++------ Eigen/src/Sparse/MappedSparseMatrix.h | 78 +++++----- Eigen/src/Sparse/RandomSetter.h | 68 +++++---- Eigen/src/Sparse/SparseBlock.h | 105 +++++++------ Eigen/src/Sparse/SparseCwiseBinaryOp.h | 47 +++--- Eigen/src/Sparse/SparseCwiseUnaryOp.h | 22 +-- Eigen/src/Sparse/SparseDiagonalProduct.h | 16 +- Eigen/src/Sparse/SparseLDLT.h | 75 ++++----- Eigen/src/Sparse/SparseLLT.h | 9 +- Eigen/src/Sparse/SparseMatrix.h | 160 ++++++++++---------- Eigen/src/Sparse/SparseMatrixBase.h | 55 +++---- Eigen/src/Sparse/SparseProduct.h | 38 ++--- Eigen/src/Sparse/SparseRedux.h | 2 +- Eigen/src/Sparse/SparseSelfAdjointView.h | 13 +- Eigen/src/Sparse/SparseTranspose.h | 22 +-- Eigen/src/Sparse/SparseTriangularView.h | 13 +- Eigen/src/Sparse/SparseUtil.h | 30 ++-- Eigen/src/Sparse/SparseVector.h | 90 +++++------ Eigen/src/misc/Image.h | 11 +- Eigen/src/misc/Kernel.h | 11 +- Eigen/src/misc/Solve.h | 7 +- 124 files changed, 2666 insertions(+), 2458 deletions(-) (limited to 'Eigen') diff --git a/Eigen/src/Array/Array.h b/Eigen/src/Array/Array.h index e9fabcc73..30d5529fd 100644 --- a/Eigen/src/Array/Array.h +++ b/Eigen/src/Array/Array.h @@ -101,7 +101,7 @@ class Array * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * - * \sa resize(int,int) + * \sa resize(Index,Index) */ EIGEN_STRONG_INLINE explicit Array() : Base() { @@ -126,7 +126,7 @@ class Array * it is redundant to pass the dimension here, so it makes more sense to use the default * constructor Matrix() instead. */ - EIGEN_STRONG_INLINE explicit Array(int dim) + EIGEN_STRONG_INLINE explicit Array(Index dim) : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) { Base::_check_template_params(); @@ -149,7 +149,7 @@ class Array * This is useful for dynamic-size matrices. For fixed-size matrices, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. */ - Array(int rows, int cols); + Array(Index rows, Index cols); /** constructs an initialized 2D vector with given coefficients */ Array(const Scalar& x, const Scalar& y); #endif @@ -217,8 +217,8 @@ class Array void swap(ArrayBase EIGEN_REF_TO_TEMPORARY other) { this->_swap(other.derived()); } - inline int innerStride() const { return 1; } - inline int outerStride() const { return this->innerSize(); } + inline Index innerStride() const { return 1; } + inline Index outerStride() const { return this->innerSize(); } #ifdef EIGEN_ARRAY_PLUGIN #include EIGEN_ARRAY_PLUGIN diff --git a/Eigen/src/Array/ArrayBase.h b/Eigen/src/Array/ArrayBase.h index b835a57ad..ccbc77202 100644 --- a/Eigen/src/Array/ArrayBase.h +++ b/Eigen/src/Array/ArrayBase.h @@ -60,8 +60,11 @@ template class ArrayBase using ei_special_scalar_op_base::Scalar, typename NumTraits::Scalar>::Real>::operator*; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; typedef typename ei_traits::Scalar Scalar; typedef typename ei_packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; typedef DenseBase Base; using Base::RowsAtCompileTime; @@ -88,7 +91,6 @@ template class ArrayBase using Base::operator*=; using Base::operator/=; - typedef typename Base::RealScalar RealScalar; typedef typename Base::CoeffReturnType CoeffReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN @@ -161,8 +163,8 @@ template class ArrayBase ArrayBase() : Base() {} private: - explicit ArrayBase(int); - ArrayBase(int,int); + explicit ArrayBase(Index); + ArrayBase(Index,Index); template explicit ArrayBase(const ArrayBase&); }; diff --git a/Eigen/src/Array/ArrayWrapper.h b/Eigen/src/Array/ArrayWrapper.h index 83cd8bac5..98d388d67 100644 --- a/Eigen/src/Array/ArrayWrapper.h +++ b/Eigen/src/Array/ArrayWrapper.h @@ -53,51 +53,51 @@ class ArrayWrapper : public ArrayBase > inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {} - inline int rows() const { return m_expression.rows(); } - inline int cols() const { return m_expression.cols(); } - inline int outerStride() const { return m_expression.outerStride(); } - inline int innerStride() const { return m_expression.innerStride(); } + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } - inline const CoeffReturnType coeff(int row, int col) const + inline const CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } - inline const CoeffReturnType coeff(int index) const + inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template - inline const PacketScalar packet(int row, int col) const + inline const PacketScalar packet(Index row, Index col) const { return m_expression.template packet(row, col); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(row, col, x); } template - inline const PacketScalar packet(int index) const + inline const PacketScalar packet(Index index) const { return m_expression.template packet(index); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(index, x); } @@ -138,51 +138,51 @@ class MatrixWrapper : public MatrixBase > inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {} - inline int rows() const { return m_expression.rows(); } - inline int cols() const { return m_expression.cols(); } - inline int outerStride() const { return m_expression.outerStride(); } - inline int innerStride() const { return m_expression.innerStride(); } + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } - inline const CoeffReturnType coeff(int row, int col) const + inline const CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } - inline const CoeffReturnType coeff(int index) const + inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template - inline const PacketScalar packet(int row, int col) const + inline const PacketScalar packet(Index row, Index col) const { return m_expression.template packet(row, col); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(row, col, x); } template - inline const PacketScalar packet(int index) const + inline const PacketScalar packet(Index index) const { return m_expression.template packet(index); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(index, x); } diff --git a/Eigen/src/Array/BooleanRedux.h b/Eigen/src/Array/BooleanRedux.h index 9c6985a29..67c29f546 100644 --- a/Eigen/src/Array/BooleanRedux.h +++ b/Eigen/src/Array/BooleanRedux.h @@ -97,8 +97,8 @@ inline bool DenseBase::all() const >::run(derived()); else { - for(int j = 0; j < cols(); ++j) - for(int i = 0; i < rows(); ++i) + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) if (!coeff(i, j)) return false; return true; } @@ -121,8 +121,8 @@ inline bool DenseBase::any() const >::run(derived()); else { - for(int j = 0; j < cols(); ++j) - for(int i = 0; i < rows(); ++i) + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) if (coeff(i, j)) return true; return false; } @@ -135,9 +135,9 @@ inline bool DenseBase::any() const * \sa all(), any() */ template -inline int DenseBase::count() const +inline typename DenseBase::Index DenseBase::count() const { - return derived().template cast().template cast().sum(); + return derived().template cast().template cast().sum(); } #endif // EIGEN_ALLANDANY_H diff --git a/Eigen/src/Array/Random.h b/Eigen/src/Array/Random.h index 9a81c7bb5..c4c482bfa 100644 --- a/Eigen/src/Array/Random.h +++ b/Eigen/src/Array/Random.h @@ -27,7 +27,8 @@ template struct ei_scalar_random_op { EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_random_op) - inline const Scalar operator() (int, int = 0) const { return ei_random(); } + template + inline const Scalar operator() (Index, Index = 0) const { return ei_random(); } }; template struct ei_functor_traits > @@ -51,11 +52,11 @@ struct ei_functor_traits > * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * - * \sa MatrixBase::setRandom(), MatrixBase::Random(int), MatrixBase::Random() + * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random() */ template inline const CwiseNullaryOp::Scalar>, Derived> -DenseBase::Random(int rows, int cols) +DenseBase::Random(Index rows, Index cols) { return NullaryExpr(rows, cols, ei_scalar_random_op()); } @@ -80,11 +81,11 @@ DenseBase::Random(int rows, int cols) * a temporary vector whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * - * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random() + * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random() */ template inline const CwiseNullaryOp::Scalar>, Derived> -DenseBase::Random(int size) +DenseBase::Random(Index size) { return NullaryExpr(size, ei_scalar_random_op()); } @@ -103,7 +104,7 @@ DenseBase::Random(int size) * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * - * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random(int) + * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index) */ template inline const CwiseNullaryOp::Scalar>, Derived> @@ -119,7 +120,7 @@ DenseBase::Random() * Example: \include MatrixBase_setRandom.cpp * Output: \verbinclude MatrixBase_setRandom.out * - * \sa class CwiseNullaryOp, setRandom(int), setRandom(int,int) + * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index) */ template inline Derived& DenseBase::setRandom() @@ -134,11 +135,11 @@ inline Derived& DenseBase::setRandom() * Example: \include Matrix_setRandom_int.cpp * Output: \verbinclude Matrix_setRandom_int.out * - * \sa MatrixBase::setRandom(), setRandom(int,int), class CwiseNullaryOp, MatrixBase::Random() + * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setRandom(int size) +DenseStorageBase::setRandom(Index size) { resize(size); return setRandom(); @@ -152,11 +153,11 @@ DenseStorageBase::setRandom(int size) * Example: \include Matrix_setRandom_int_int.cpp * Output: \verbinclude Matrix_setRandom_int_int.out * - * \sa MatrixBase::setRandom(), setRandom(int), class CwiseNullaryOp, MatrixBase::Random() + * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setRandom(int rows, int cols) +DenseStorageBase::setRandom(Index rows, Index cols) { resize(rows, cols); return setRandom(); diff --git a/Eigen/src/Array/Replicate.h b/Eigen/src/Array/Replicate.h index 63e4683b7..c60d9903b 100644 --- a/Eigen/src/Array/Replicate.h +++ b/Eigen/src/Array/Replicate.h @@ -90,28 +90,28 @@ template class Replicate THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) } - inline int rows() const { return m_matrix.rows() * m_rowFactor.value(); } - inline int cols() const { return m_matrix.cols() * m_colFactor.value(); } + inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } + inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { // try to avoid using modulo; this is a pure optimization strategy - const int actual_row = ei_traits::RowsAtCompileTime==1 ? 0 + const Index actual_row = ei_traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row%m_matrix.rows(); - const int actual_col = ei_traits::ColsAtCompileTime==1 ? 0 + const Index actual_col = ei_traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col%m_matrix.cols(); return m_matrix.coeff(actual_row, actual_col); } template - inline PacketScalar packet(int row, int col) const + inline PacketScalar packet(Index row, Index col) const { - const int actual_row = ei_traits::RowsAtCompileTime==1 ? 0 + const Index actual_row = ei_traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row%m_matrix.rows(); - const int actual_col = ei_traits::ColsAtCompileTime==1 ? 0 + const Index actual_col = ei_traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col%m_matrix.cols(); @@ -121,8 +121,8 @@ template class Replicate protected: const typename MatrixType::Nested m_matrix; - const ei_int_if_dynamic m_rowFactor; - const ei_int_if_dynamic m_colFactor; + const ei_variable_if_dynamic m_rowFactor; + const ei_variable_if_dynamic m_colFactor; }; /** \nonstableyet @@ -131,7 +131,7 @@ template class Replicate * Example: \include MatrixBase_replicate.cpp * Output: \verbinclude MatrixBase_replicate.out * - * \sa VectorwiseOp::replicate(), DenseBase::replicate(int,int), class Replicate + * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate */ template template @@ -151,7 +151,7 @@ DenseBase::replicate() const */ template inline const Replicate -DenseBase::replicate(int rowFactor,int colFactor) const +DenseBase::replicate(Index rowFactor,Index colFactor) const { return Replicate(derived(),rowFactor,colFactor); } @@ -166,7 +166,7 @@ DenseBase::replicate(int rowFactor,int colFactor) const */ template const typename VectorwiseOp::ReplicateReturnType -VectorwiseOp::replicate(int factor) const +VectorwiseOp::replicate(Index factor) const { return typename VectorwiseOp::ReplicateReturnType (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); diff --git a/Eigen/src/Array/Reverse.h b/Eigen/src/Array/Reverse.h index 0f56d5d5c..cca425142 100644 --- a/Eigen/src/Array/Reverse.h +++ b/Eigen/src/Array/Reverse.h @@ -103,33 +103,33 @@ template class Reverse EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse) - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row, ReverseCol ? m_matrix.cols() - col - 1 : col); } - inline const Scalar coeff(int row, int col) const + inline const Scalar coeff(Index row, Index col) const { return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row, ReverseCol ? m_matrix.cols() - col - 1 : col); } - inline const Scalar coeff(int index) const + inline const Scalar coeff(Index index) const { return m_matrix.coeff(m_matrix.size() - index - 1); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1); } template - inline const PacketScalar packet(int row, int col) const + inline const PacketScalar packet(Index row, Index col) const { return reverse_packet::run(m_matrix.template packet( ReverseRow ? m_matrix.rows() - row - OffsetRow : row, @@ -137,7 +137,7 @@ template class Reverse } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { m_matrix.const_cast_derived().template writePacket( ReverseRow ? m_matrix.rows() - row - OffsetRow : row, @@ -146,13 +146,13 @@ template class Reverse } template - inline const PacketScalar packet(int index) const + inline const PacketScalar packet(Index index) const { return ei_preverse(m_matrix.template packet( m_matrix.size() - index - PacketSize )); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { m_matrix.const_cast_derived().template writePacket(m_matrix.size() - index - PacketSize, ei_preverse(x)); } diff --git a/Eigen/src/Array/Select.h b/Eigen/src/Array/Select.h index 100a26445..8834156b6 100644 --- a/Eigen/src/Array/Select.h +++ b/Eigen/src/Array/Select.h @@ -81,10 +81,10 @@ class Select : ei_no_assignment_operator, ei_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols()); } - int rows() const { return m_condition.rows(); } - int cols() const { return m_condition.cols(); } + Index rows() const { return m_condition.rows(); } + Index cols() const { return m_condition.cols(); } - const Scalar coeff(int i, int j) const + const Scalar coeff(Index i, Index j) const { if (m_condition.coeff(i,j)) return m_then.coeff(i,j); @@ -92,7 +92,7 @@ class Select : ei_no_assignment_operator, return m_else.coeff(i,j); } - const Scalar coeff(int i) const + const Scalar coeff(Index i) const { if (m_condition.coeff(i)) return m_then.coeff(i); diff --git a/Eigen/src/Array/VectorwiseOp.h b/Eigen/src/Array/VectorwiseOp.h index c1f17f641..e338a9193 100644 --- a/Eigen/src/Array/VectorwiseOp.h +++ b/Eigen/src/Array/VectorwiseOp.h @@ -89,10 +89,10 @@ class PartialReduxExpr : ei_no_assignment_operator, PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp()) : m_matrix(mat), m_functor(func) {} - int rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); } - int cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); } + Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); } + Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); } - const Scalar coeff(int i, int j) const + const Scalar coeff(Index i, Index j) const { if (Direction==Vertical) return m_functor(m_matrix.col(j)); @@ -100,7 +100,7 @@ class PartialReduxExpr : ei_no_assignment_operator, return m_functor(m_matrix.row(i)); } - const Scalar coeff(int index) const + const Scalar coeff(Index index) const { if (Direction==Vertical) return m_functor(m_matrix.col(index)); @@ -177,7 +177,8 @@ template class VectorwiseOp { public: - typedef typename ei_traits::Scalar Scalar; + typedef typename ExpressionType::Scalar Scalar; + typedef typename ExpressionType::Index Index; typedef typename ei_meta_if::ret, ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; @@ -209,14 +210,14 @@ template class VectorwiseOp typedef typename ei_meta_if::ret SubVector; - SubVector subVector(int i) + SubVector subVector(Index i) { return SubVector(m_matrix.derived(),i); } /** \internal * \returns the number of subvectors in the direction \c Direction */ - int subVectors() const + Index subVectors() const { return Direction==Vertical?m_matrix.cols():m_matrix.rows(); } template struct ExtendedType { @@ -362,7 +363,7 @@ template class VectorwiseOp * Output: \verbinclude PartialRedux_count.out * * \sa DenseBase::count() */ - const PartialReduxExpr, Direction> count() const + const PartialReduxExpr, Direction> count() const { return _expression(); } /** \returns a row (or column) vector expression of the product @@ -387,7 +388,7 @@ template class VectorwiseOp { return Reverse( _expression() ); } typedef Replicate ReplicateReturnType; - const ReplicateReturnType replicate(int factor) const; + const ReplicateReturnType replicate(Index factor) const; /** \nonstableyet * \return an expression of the replication of each column (or row) of \c *this @@ -395,11 +396,11 @@ template class VectorwiseOp * Example: \include DirectionWise_replicate.cpp * Output: \verbinclude DirectionWise_replicate.out * - * \sa VectorwiseOp::replicate(int), DenseBase::replicate(), class Replicate + * \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate */ // NOTE implemented here because of sunstudio's compilation errors template const Replicate - replicate(int factor = Factor) const + replicate(Index factor = Factor) const { return Replicate (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); @@ -413,7 +414,7 @@ template class VectorwiseOp { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) //ei_assert((m_matrix.isNull()) == (other.isNull())); FIXME - for(int j=0; j(m_matrix); } @@ -423,7 +424,7 @@ template class VectorwiseOp ExpressionType& operator+=(const DenseBase& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) - for(int j=0; j(m_matrix); } @@ -433,7 +434,7 @@ template class VectorwiseOp ExpressionType& operator-=(const DenseBase& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) - for(int j=0; j(m_matrix); } diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 206ccef4d..a433f8d0f 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -65,7 +65,8 @@ template class LDLT }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename ei_plain_col_type::type IntColVectorType; + typedef typename MatrixType::Index Index; + typedef typename ei_plain_col_type::type IntColVectorType; typedef Matrix TmpMatrixType; /** \brief Default Constructor. @@ -81,7 +82,7 @@ template class LDLT * according to the specified problem \a size. * \sa LDLT() */ - LDLT(int size) : m_matrix(size, size), + LDLT(Index size) : m_matrix(size, size), m_p(size), m_transpositions(size), m_temporary(size), @@ -168,8 +169,8 @@ template class LDLT MatrixType reconstructedMatrix() const; - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } protected: /** \internal @@ -182,7 +183,7 @@ template class LDLT IntColVectorType m_p; IntColVectorType m_transpositions; // FIXME do we really need to store permanently the transpositions? TmpMatrixType m_temporary; - int m_sign; + Index m_sign; bool m_isInitialized; }; @@ -192,7 +193,7 @@ template LDLT& LDLT::compute(const MatrixType& a) { ei_assert(a.rows()==a.cols()); - const int size = a.rows(); + const Index size = a.rows(); m_matrix = a; @@ -215,10 +216,10 @@ LDLT& LDLT::compute(const MatrixType& a) // have optimal alignment. m_temporary.resize(size); - for (int j = 0; j < size; ++j) + for (Index j = 0; j < size; ++j) { // Find largest diagonal element - int index_of_biggest_in_corner; + Index index_of_biggest_in_corner; biggest_in_corner = m_matrix.diagonal().tail(size-j).cwiseAbs() .maxCoeff(&index_of_biggest_in_corner); index_of_biggest_in_corner += j; @@ -236,7 +237,7 @@ LDLT& LDLT::compute(const MatrixType& a) // Finish early if the matrix is not full rank. if(biggest_in_corner < cutoff) { - for(int i = j; i < size; i++) m_transpositions.coeffRef(i) = i; + for(Index i = j; i < size; i++) m_transpositions.coeffRef(i) = i; break; } @@ -256,7 +257,7 @@ LDLT& LDLT::compute(const MatrixType& a) RealScalar Djj = ei_real(m_matrix.coeff(j,j) - m_matrix.row(j).head(j).dot(m_matrix.col(j).head(j))); m_matrix.coeffRef(j,j) = Djj; - int endSize = size - j - 1; + Index endSize = size - j - 1; if (endSize > 0) { m_temporary.tail(endSize).noalias() = m_matrix.block(j+1,0, endSize, j) * m_matrix.col(j).head(j).conjugate(); @@ -272,8 +273,8 @@ LDLT& LDLT::compute(const MatrixType& a) } // Reverse applied swaps to get P matrix. - for(int k = 0; k < size; ++k) m_p.coeffRef(k) = k; - for(int k = size-1; k >= 0; --k) { + for(Index k = 0; k < size; ++k) m_p.coeffRef(k) = k; + for(Index k = size-1; k >= 0; --k) { std::swap(m_p.coeffRef(k), m_p.coeffRef(m_transpositions.coeff(k))); } @@ -310,11 +311,11 @@ template bool LDLT::solveInPlace(MatrixBase &bAndX) const { ei_assert(m_isInitialized && "LDLT is not initialized."); - const int size = m_matrix.rows(); + const Index size = m_matrix.rows(); ei_assert(size == bAndX.rows()); // z = P b - for(int i = 0; i < size; ++i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i)); + for(Index i = 0; i < size; ++i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i)); // y = L^-1 z //matrixL().solveInPlace(bAndX); @@ -327,7 +328,7 @@ bool LDLT::solveInPlace(MatrixBase &bAndX) const m_matrix.adjoint().template triangularView().solveInPlace(bAndX); // x = P^T u - for (int i = size-1; i >= 0; --i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i)); + for (Index i = size-1; i >= 0; --i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i)); return true; } @@ -339,12 +340,12 @@ template MatrixType LDLT::reconstructedMatrix() const { ei_assert(m_isInitialized && "LDLT is not initialized."); - const int size = m_matrix.rows(); + const Index size = m_matrix.rows(); MatrixType res(size,size); res.setIdentity(); // PI - for(int i = 0; i < size; ++i) res.row(m_transpositions.coeff(i)).swap(res.row(i)); + for(Index i = 0; i < size; ++i) res.row(m_transpositions.coeff(i)).swap(res.row(i)); // L^* P res = matrixL().adjoint() * res; // D(L^*P) @@ -352,7 +353,7 @@ MatrixType LDLT::reconstructedMatrix() const // L(DL^*P) res = matrixL() * res; // P^T (LDL^*P) - for (int i = size-1; i >= 0; --i) res.row(m_transpositions.coeff(i)).swap(res.row(i)); + for (Index i = size-1; i >= 0; --i) res.row(m_transpositions.coeff(i)).swap(res.row(i)); return res; } diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index 22d0c91c8..29fa465e1 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -65,6 +65,7 @@ template class LLT }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; enum { PacketSize = ei_packet_traits::size, @@ -88,7 +89,7 @@ template class LLT * according to the specified problem \a size. * \sa LLT() */ - LLT(int size) : m_matrix(size, size), + LLT(Index size) : m_matrix(size, size), m_isInitialized(false) {} LLT(const MatrixType& matrix) @@ -149,8 +150,8 @@ template class LLT MatrixType reconstructedMatrix() const; - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } protected: /** \internal @@ -171,11 +172,12 @@ template<> struct ei_llt_inplace { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; ei_assert(mat.rows()==mat.cols()); - const int size = mat.rows(); - for(int k = 0; k < size; ++k) + const Index size = mat.rows(); + for(Index k = 0; k < size; ++k) { - int rs = size-k-1; // remaining size + Index rs = size-k-1; // remaining size Block A21(mat,k+1,k,rs,1); Block A10(mat,k,0,1,k); @@ -195,19 +197,20 @@ template<> struct ei_llt_inplace template static bool blocked(MatrixType& m) { + typedef typename MatrixType::Index Index; ei_assert(m.rows()==m.cols()); - int size = m.rows(); + Index size = m.rows(); if(size<32) return unblocked(m); - int blockSize = size/8; + Index blockSize = size/8; blockSize = (blockSize/16)*16; - blockSize = std::min(std::max(blockSize,8), 128); + blockSize = std::min(std::max(blockSize,Index(8)), Index(128)); - for (int k=0; k A11(m,k, k, bs,bs); Block A21(m,k+bs,k, rs,bs); @@ -266,7 +269,7 @@ template LLT& LLT::compute(const MatrixType& a) { assert(a.rows()==a.cols()); - const int size = a.rows(); + const Index size = a.rows(); m_matrix.resize(size, size); m_matrix = a; diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h index eb7bca1da..494df7bd5 100644 --- a/Eigen/src/Core/Assign.h +++ b/Eigen/src/Core/Assign.h @@ -254,12 +254,13 @@ struct ei_assign_impl; template struct ei_assign_impl { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - const int innerSize = dst.innerSize(); - const int outerSize = dst.outerSize(); - for(int outer = 0; outer < outerSize; ++outer) - for(int inner = 0; inner < innerSize; ++inner) + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + for(Index inner = 0; inner < innerSize; ++inner) dst.copyCoeffByOuterInner(outer, inner, src); } }; @@ -277,10 +278,11 @@ struct ei_assign_impl template struct ei_assign_impl { + typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { - const int outerSize = dst.outerSize(); - for(int outer = 0; outer < outerSize; ++outer) + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) ei_assign_DefaultTraversal_InnerUnrolling ::run(dst, src, outer); } @@ -293,10 +295,11 @@ struct ei_assign_impl template struct ei_assign_impl { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - const int size = dst.size(); - for(int i = 0; i < size; ++i) + const Index size = dst.size(); + for(Index i = 0; i < size; ++i) dst.copyCoeff(i, src); } }; @@ -318,13 +321,14 @@ struct ei_assign_impl template struct ei_assign_impl { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - const int innerSize = dst.innerSize(); - const int outerSize = dst.outerSize(); - const int packetSize = ei_packet_traits::size; - for(int outer = 0; outer < outerSize; ++outer) - for(int inner = 0; inner < innerSize; inner+=packetSize) + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + const Index packetSize = ei_packet_traits::size; + for(Index outer = 0; outer < outerSize; ++outer) + for(Index inner = 0; inner < innerSize; inner+=packetSize) dst.template copyPacketByOuterInner(outer, inner, src); } }; @@ -342,10 +346,11 @@ struct ei_assign_impl struct ei_assign_impl { + typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { - const int outerSize = dst.outerSize(); - for(int outer = 0; outer < outerSize; ++outer) + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) ei_assign_innervec_InnerUnrolling ::run(dst, src, outer); } @@ -359,7 +364,7 @@ template struct ei_unaligned_assign_impl { template - static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, int, int) {} + static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {} }; template <> @@ -369,13 +374,13 @@ struct ei_unaligned_assign_impl // packet access path. #ifdef _MSC_VER template - static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end) + static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end) #else template - static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end) + static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end) #endif { - for (int index = start; index < end; ++index) + for (typename Derived::Index index = start; index < end; ++index) dst.copyCoeff(index, src); } }; @@ -383,17 +388,18 @@ struct ei_unaligned_assign_impl template struct ei_assign_impl { + typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { - const int size = dst.size(); - const int packetSize = ei_packet_traits::size; - const int alignedStart = ei_assign_traits::DstIsAligned ? 0 - : ei_first_aligned(&dst.coeffRef(0), size); - const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; + const Index size = dst.size(); + const Index packetSize = ei_packet_traits::size; + const Index alignedStart = ei_assign_traits::DstIsAligned ? 0 + : ei_first_aligned(&dst.coeffRef(0), size); + const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; ei_unaligned_assign_impl::DstIsAligned!=0>::run(src,dst,0,alignedStart); - for(int index = alignedStart; index < alignedEnd; index += packetSize) + for(Index index = alignedStart; index < alignedEnd; index += packetSize) { dst.template copyPacket::JointAlignment>(index, src); } @@ -405,11 +411,12 @@ struct ei_assign_impl struct ei_assign_impl { + typedef typename Derived1::Index Index; EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) { - const int size = Derived1::SizeAtCompileTime; - const int packetSize = ei_packet_traits::size; - const int alignedSize = (size/packetSize)*packetSize; + const Index size = Derived1::SizeAtCompileTime; + const Index packetSize = ei_packet_traits::size; + const Index alignedSize = (size/packetSize)*packetSize; ei_assign_innervec_CompleteUnrolling::run(dst, src); ei_assign_DefaultTraversal_CompleteUnrolling::run(dst, src); @@ -423,32 +430,33 @@ struct ei_assign_impl struct ei_assign_impl { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - const int packetSize = ei_packet_traits::size; - const int packetAlignedMask = packetSize - 1; - const int innerSize = dst.innerSize(); - const int outerSize = dst.outerSize(); - const int alignedStep = (packetSize - dst.outerStride() % packetSize) & packetAlignedMask; - int alignedStart = ei_assign_traits::DstIsAligned ? 0 - : ei_first_aligned(&dst.coeffRef(0,0), innerSize); - - for(int outer = 0; outer < outerSize; ++outer) + const Index packetSize = ei_packet_traits::size; + const Index packetAlignedMask = packetSize - 1; + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + const Index alignedStep = (packetSize - dst.outerStride() % packetSize) & packetAlignedMask; + Index alignedStart = ei_assign_traits::DstIsAligned ? 0 + : ei_first_aligned(&dst.coeffRef(0,0), innerSize); + + for(Index outer = 0; outer < outerSize; ++outer) { - const int alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); + const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); // do the non-vectorizable part of the assignment - for(int inner = 0; inner(outer, inner, src); // do the non-vectorizable part of the assignment - for(int inner = alignedEnd; inner((alignedStart+alignedStep)%packetSize, innerSize); + alignedStart = std::min((alignedStart+alignedStep)%packetSize, innerSize); } } }; diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h index 432df0b34..fbe7e394b 100644 --- a/Eigen/src/Core/BandMatrix.h +++ b/Eigen/src/Core/BandMatrix.h @@ -46,6 +46,7 @@ template > { typedef _Scalar Scalar; + typedef Dense StorageKind; enum { CoeffReadCost = NumTraits::ReadCost, RowsAtCompileTime = Rows, @@ -71,6 +72,7 @@ class BandMatrix : public EigenBase::Scalar Scalar; typedef Matrix DenseMatrixType; + typedef typename DenseMatrixType::Index Index; protected: enum { @@ -83,7 +85,7 @@ class BandMatrix : public EigenBase col(int i) + inline Block col(Index i) { EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); - int start = 0; - int len = m_data.rows(); + Index start = 0; + Index len = m_data.rows(); if (i<=supers()) { start = supers()-i; - len = std::min(rows(),std::max(0,m_data.rows() - (supers()-i))); + len = std::min(rows(),std::max(0,m_data.rows() - (supers()-i))); } else if (i>=rows()-subs()) - len = std::max(0,m_data.rows() - (i + 1 - rows() + subs())); + len = std::max(0,m_data.rows() - (i + 1 - rows() + subs())); return Block(m_data, start, i, len, 1); } @@ -146,30 +148,30 @@ class BandMatrix : public EigenBase::ret Type; }; - /** \returns a vector expression of the \a Index -th sub or super diagonal */ - template inline typename DiagonalIntReturnType::Type diagonal() + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template inline typename DiagonalIntReturnType::Type diagonal() { - return typename DiagonalIntReturnType::BuildType(m_data, supers()-Index, std::max(0,Index), 1, diagonalLength(Index)); + return typename DiagonalIntReturnType::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N)); } - /** \returns a vector expression of the \a Index -th sub or super diagonal */ - template inline const typename DiagonalIntReturnType::Type diagonal() const + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template inline const typename DiagonalIntReturnType::Type diagonal() const { - return typename DiagonalIntReturnType::BuildType(m_data, supers()-Index, std::max(0,Index), 1, diagonalLength(Index)); + return typename DiagonalIntReturnType::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ - inline Block diagonal(int i) + inline Block diagonal(Index i) { ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); - return Block(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); + return Block(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ - inline const Block diagonal(int i) const + inline const Block diagonal(Index i) const { ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); - return Block(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); + return Block(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); } template inline void evalTo(Dest& dst) const @@ -177,9 +179,9 @@ class BandMatrix : public EigenBase m_rows; - ei_int_if_dynamic m_supers; - ei_int_if_dynamic m_subs; + ei_variable_if_dynamic m_rows; + ei_variable_if_dynamic m_supers; + ei_variable_if_dynamic m_subs; }; /** \nonstableyet @@ -216,8 +218,9 @@ template class TridiagonalMatrix : public BandMatrix { typedef BandMatrix Base; + typedef typename Base::Index Index; public: - TridiagonalMatrix(int size = Size) : Base(size,size,1,1) {} + TridiagonalMatrix(Index size = Size) : Base(size,size,1,1) {} inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); } diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h index 79c9dd421..bb1b8a6b9 100644 --- a/Eigen/src/Core/Block.h +++ b/Eigen/src/Core/Block.h @@ -36,7 +36,7 @@ * \param _DirectAccessStatus \internal used for partial specialization * * This class represents an expression of either a fixed-size or dynamic-size block. It is the return - * type of DenseBase::block(int,int,int,int) and DenseBase::block(int,int) and + * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block(Index,Index) and * most of the time this is the only way it is used. * * However, if you want to directly maniputate block expressions, @@ -55,7 +55,7 @@ * \include class_FixedBlock.cpp * Output: \verbinclude class_FixedBlock.out * - * \sa DenseBase::block(int,int,int,int), DenseBase::block(int,int), class VectorBlock + * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock */ template struct ei_traits > : ei_traits @@ -110,7 +110,7 @@ template c /** Column or Row constructor */ - inline Block(const XprType& xpr, int i) + inline Block(const XprType& xpr, Index i) : m_xpr(xpr), // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, @@ -128,7 +128,7 @@ template c /** Fixed-size constructor */ - inline Block(const XprType& xpr, int startRow, int startCol) + inline Block(const XprType& xpr, Index startRow, Index startCol) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(BlockRows), m_blockCols(BlockCols) { @@ -140,8 +140,8 @@ template c /** Dynamic-size constructor */ inline Block(const XprType& xpr, - int startRow, int startCol, - int blockRows, int blockCols) + Index startRow, Index startCol, + Index blockRows, Index blockCols) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) { @@ -153,28 +153,28 @@ template c EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) - inline int rows() const { return m_blockRows.value(); } - inline int cols() const { return m_blockCols.value(); } + inline Index rows() const { return m_blockRows.value(); } + inline Index cols() const { return m_blockCols.value(); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_xpr.const_cast_derived() .coeffRef(row + m_startRow.value(), col + m_startCol.value()); } - inline const CoeffReturnType coeff(int row, int col) const + inline const CoeffReturnType coeff(Index row, Index col) const { return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value()); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_xpr.const_cast_derived() .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } - inline const CoeffReturnType coeff(int index) const + inline const CoeffReturnType coeff(Index index) const { return m_xpr .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), @@ -182,21 +182,21 @@ template c } template - inline PacketScalar packet(int row, int col) const + inline PacketScalar packet(Index row, Index col) const { return m_xpr.template packet (row + m_startRow.value(), col + m_startCol.value()); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { m_xpr.const_cast_derived().template writePacket (row + m_startRow.value(), col + m_startCol.value(), x); } template - inline PacketScalar packet(int index) const + inline PacketScalar packet(Index index) const { return m_xpr.template packet (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), @@ -204,7 +204,7 @@ template c } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { m_xpr.const_cast_derived().template writePacket (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), @@ -214,17 +214,17 @@ template c #ifdef EIGEN_PARSED_BY_DOXYGEN /** \sa MapBase::data() */ inline const Scalar* data() const; - inline int innerStride() const; - inline int outerStride() const; + inline Index innerStride() const; + inline Index outerStride() const; #endif protected: const typename XprType::Nested m_xpr; - const ei_int_if_dynamic m_startRow; - const ei_int_if_dynamic m_startCol; - const ei_int_if_dynamic m_blockRows; - const ei_int_if_dynamic m_blockCols; + const ei_variable_if_dynamic m_startRow; + const ei_variable_if_dynamic m_startCol; + const ei_variable_if_dynamic m_blockRows; + const ei_variable_if_dynamic m_blockCols; }; /** \internal */ @@ -241,7 +241,7 @@ class Block /** Column or Row constructor */ - inline Block(const XprType& xpr, int i) + inline Block(const XprType& xpr, Index i) : Base(&xpr.const_cast_derived().coeffRef( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0, (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), @@ -257,7 +257,7 @@ class Block /** Fixed-size constructor */ - inline Block(const XprType& xpr, int startRow, int startCol) + inline Block(const XprType& xpr, Index startRow, Index startCol) : Base(&xpr.const_cast_derived().coeffRef(startRow,startCol)), m_xpr(xpr) { ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() @@ -268,8 +268,8 @@ class Block /** Dynamic-size constructor */ inline Block(const XprType& xpr, - int startRow, int startCol, - int blockRows, int blockCols) + Index startRow, Index startCol, + Index blockRows, Index blockCols) : Base(&xpr.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols), m_xpr(xpr) { @@ -281,7 +281,7 @@ class Block } /** \sa MapBase::innerStride() */ - inline int innerStride() const + inline Index innerStride() const { return ei_traits::HasSameStorageOrderAsXprType ? m_xpr.innerStride() @@ -289,7 +289,7 @@ class Block } /** \sa MapBase::outerStride() */ - inline int outerStride() const + inline Index outerStride() const { return m_outerStride; } @@ -302,7 +302,7 @@ class Block #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal used by allowAligned() */ - inline Block(const XprType& xpr, const Scalar* data, int blockRows, int blockCols) + inline Block(const XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) : Base(data, blockRows, blockCols), m_xpr(xpr) { init(); @@ -335,19 +335,19 @@ class Block * when it is applied to a fixed-size matrix, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * - * \sa class Block, block(int,int) + * \sa class Block, block(Index,Index) */ template inline Block DenseBase - ::block(int startRow, int startCol, int blockRows, int blockCols) + ::block(Index startRow, Index startCol, Index blockRows, Index blockCols) { return Block(derived(), startRow, startCol, blockRows, blockCols); } -/** This is the const version of block(int,int,int,int). */ +/** This is the const version of block(Index,Index,Index,Index). */ template inline const Block DenseBase - ::block(int startRow, int startCol, int blockRows, int blockCols) const + ::block(Index startRow, Index startCol, Index blockRows, Index blockCols) const { return Block(derived(), startRow, startCol, blockRows, blockCols); } @@ -363,19 +363,19 @@ inline const Block DenseBase * Example: \include MatrixBase_topRightCorner_int_int.cpp * Output: \verbinclude MatrixBase_topRightCorner_int_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline Block DenseBase - ::topRightCorner(int cRows, int cCols) + ::topRightCorner(Index cRows, Index cCols) { return Block(derived(), 0, cols() - cCols, cRows, cCols); } -/** This is the const version of topRightCorner(int, int).*/ +/** This is the const version of topRightCorner(Index, Index).*/ template inline const Block -DenseBase::topRightCorner(int cRows, int cCols) const +DenseBase::topRightCorner(Index cRows, Index cCols) const { return Block(derived(), 0, cols() - cCols, cRows, cCols); } @@ -387,7 +387,7 @@ DenseBase::topRightCorner(int cRows, int cCols) const * Example: \include MatrixBase_template_int_int_topRightCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -417,19 +417,19 @@ DenseBase::topRightCorner() const * Example: \include MatrixBase_topLeftCorner_int_int.cpp * Output: \verbinclude MatrixBase_topLeftCorner_int_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline Block DenseBase - ::topLeftCorner(int cRows, int cCols) + ::topLeftCorner(Index cRows, Index cCols) { return Block(derived(), 0, 0, cRows, cCols); } -/** This is the const version of topLeftCorner(int, int).*/ +/** This is the const version of topLeftCorner(Index, Index).*/ template inline const Block -DenseBase::topLeftCorner(int cRows, int cCols) const +DenseBase::topLeftCorner(Index cRows, Index cCols) const { return Block(derived(), 0, 0, cRows, cCols); } @@ -441,7 +441,7 @@ DenseBase::topLeftCorner(int cRows, int cCols) const * Example: \include MatrixBase_template_int_int_topLeftCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -473,19 +473,19 @@ DenseBase::topLeftCorner() const * Example: \include MatrixBase_bottomRightCorner_int_int.cpp * Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline Block DenseBase - ::bottomRightCorner(int cRows, int cCols) + ::bottomRightCorner(Index cRows, Index cCols) { return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } -/** This is the const version of bottomRightCorner(int, int).*/ +/** This is the const version of bottomRightCorner(Index, Index).*/ template inline const Block -DenseBase::bottomRightCorner(int cRows, int cCols) const +DenseBase::bottomRightCorner(Index cRows, Index cCols) const { return Block(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } @@ -497,7 +497,7 @@ DenseBase::bottomRightCorner(int cRows, int cCols) const * Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -527,19 +527,19 @@ DenseBase::bottomRightCorner() const * Example: \include MatrixBase_bottomLeftCorner_int_int.cpp * Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline Block DenseBase - ::bottomLeftCorner(int cRows, int cCols) + ::bottomLeftCorner(Index cRows, Index cCols) { return Block(derived(), rows() - cRows, 0, cRows, cCols); } -/** This is the const version of bottomLeftCorner(int, int).*/ +/** This is the const version of bottomLeftCorner(Index, Index).*/ template inline const Block -DenseBase::bottomLeftCorner(int cRows, int cCols) const +DenseBase::bottomLeftCorner(Index cRows, Index cCols) const { return Block(derived(), rows() - cRows, 0, cRows, cCols); } @@ -551,7 +551,7 @@ DenseBase::bottomLeftCorner(int cRows, int cCols) const * Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -579,19 +579,19 @@ DenseBase::bottomLeftCorner() const * Example: \include MatrixBase_topRows_int.cpp * Output: \verbinclude MatrixBase_topRows_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline typename DenseBase::RowsBlockXpr DenseBase - ::topRows(int n) + ::topRows(Index n) { return RowsBlockXpr(derived(), 0, 0, n, cols()); } -/** This is the const version of topRows(int).*/ +/** This is the const version of topRows(Index).*/ template inline const typename DenseBase::RowsBlockXpr -DenseBase::topRows(int n) const +DenseBase::topRows(Index n) const { return RowsBlockXpr(derived(), 0, 0, n, cols()); } @@ -603,7 +603,7 @@ DenseBase::topRows(int n) const * Example: \include MatrixBase_template_int_topRows.cpp * Output: \verbinclude MatrixBase_template_int_topRows.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -633,19 +633,19 @@ DenseBase::topRows() const * Example: \include MatrixBase_bottomRows_int.cpp * Output: \verbinclude MatrixBase_bottomRows_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline typename DenseBase::RowsBlockXpr DenseBase - ::bottomRows(int n) + ::bottomRows(Index n) { return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); } -/** This is the const version of bottomRows(int).*/ +/** This is the const version of bottomRows(Index).*/ template inline const typename DenseBase::RowsBlockXpr -DenseBase::bottomRows(int n) const +DenseBase::bottomRows(Index n) const { return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); } @@ -657,7 +657,7 @@ DenseBase::bottomRows(int n) const * Example: \include MatrixBase_template_int_bottomRows.cpp * Output: \verbinclude MatrixBase_template_int_bottomRows.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -687,19 +687,19 @@ DenseBase::bottomRows() const * Example: \include MatrixBase_leftCols_int.cpp * Output: \verbinclude MatrixBase_leftCols_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline typename DenseBase::ColsBlockXpr DenseBase - ::leftCols(int n) + ::leftCols(Index n) { return ColsBlockXpr(derived(), 0, 0, rows(), n); } -/** This is the const version of leftCols(int).*/ +/** This is the const version of leftCols(Index).*/ template inline const typename DenseBase::ColsBlockXpr -DenseBase::leftCols(int n) const +DenseBase::leftCols(Index n) const { return ColsBlockXpr(derived(), 0, 0, rows(), n); } @@ -711,7 +711,7 @@ DenseBase::leftCols(int n) const * Example: \include MatrixBase_template_int_leftCols.cpp * Output: \verbinclude MatrixBase_template_int_leftCols.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -741,19 +741,19 @@ DenseBase::leftCols() const * Example: \include MatrixBase_rightCols_int.cpp * Output: \verbinclude MatrixBase_rightCols_int.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline typename DenseBase::ColsBlockXpr DenseBase - ::rightCols(int n) + ::rightCols(Index n) { return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); } -/** This is the const version of rightCols(int).*/ +/** This is the const version of rightCols(Index).*/ template inline const typename DenseBase::ColsBlockXpr -DenseBase::rightCols(int n) const +DenseBase::rightCols(Index n) const { return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); } @@ -765,7 +765,7 @@ DenseBase::rightCols(int n) const * Example: \include MatrixBase_template_int_rightCols.cpp * Output: \verbinclude MatrixBase_template_int_rightCols.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template @@ -802,21 +802,21 @@ DenseBase::rightCols() const * \note since block is a templated member, the keyword template has to be used * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template inline Block -DenseBase::block(int startRow, int startCol) +DenseBase::block(Index startRow, Index startCol) { return Block(derived(), startRow, startCol); } -/** This is the const version of block<>(int, int). */ +/** This is the const version of block<>(Index, Index). */ template template inline const Block -DenseBase::block(int startRow, int startCol) const +DenseBase::block(Index startRow, Index startCol) const { return Block(derived(), startRow, startCol); } @@ -829,7 +829,7 @@ DenseBase::block(int startRow, int startCol) const * \sa row(), class Block */ template inline typename DenseBase::ColXpr -DenseBase::col(int i) +DenseBase::col(Index i) { return ColXpr(derived(), i); } @@ -837,7 +837,7 @@ DenseBase::col(int i) /** This is the const version of col(). */ template inline const typename DenseBase::ColXpr -DenseBase::col(int i) const +DenseBase::col(Index i) const { return ColXpr(derived(), i); } @@ -850,7 +850,7 @@ DenseBase::col(int i) const * \sa col(), class Block */ template inline typename DenseBase::RowXpr -DenseBase::row(int i) +DenseBase::row(Index i) { return RowXpr(derived(), i); } @@ -858,7 +858,7 @@ DenseBase::row(int i) /** This is the const version of row(). */ template inline const typename DenseBase::RowXpr -DenseBase::row(int i) const +DenseBase::row(Index i) const { return RowXpr(derived(), i); } diff --git a/Eigen/src/Core/CommaInitializer.h b/Eigen/src/Core/CommaInitializer.h index adfca4f9a..311c90304 100644 --- a/Eigen/src/Core/CommaInitializer.h +++ b/Eigen/src/Core/CommaInitializer.h @@ -39,7 +39,9 @@ template struct CommaInitializer { - typedef typename ei_traits::Scalar Scalar; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::Index Index; + inline CommaInitializer(XprType& xpr, const Scalar& s) : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) { @@ -113,9 +115,9 @@ struct CommaInitializer inline XprType& finished() { return m_xpr; } XprType& m_xpr; // target expression - int m_row; // current row id - int m_col; // current col id - int m_currentBlockRows; // current block height + Index m_row; // current row id + Index m_col; // current col id + Index m_currentBlockRows; // current block height }; /** \anchor MatrixBaseCommaInitRef diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h index e0617e312..530777577 100644 --- a/Eigen/src/Core/CwiseBinaryOp.h +++ b/Eigen/src/Core/CwiseBinaryOp.h @@ -123,14 +123,14 @@ class CwiseBinaryOp : ei_no_assignment_operator, ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); } - EIGEN_STRONG_INLINE int rows() const { + EIGEN_STRONG_INLINE Index rows() const { // return the fixed size type if available to enable compile time optimizations if (ei_traits::type>::RowsAtCompileTime==Dynamic) return m_rhs.rows(); else return m_lhs.rows(); } - EIGEN_STRONG_INLINE int cols() const { + EIGEN_STRONG_INLINE Index cols() const { // return the fixed size type if available to enable compile time optimizations if (ei_traits::type>::ColsAtCompileTime==Dynamic) return m_rhs.cols(); @@ -161,27 +161,27 @@ class CwiseBinaryOpImpl typedef typename ei_dense_xpr_base >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE( Derived ) - EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const + EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return derived().functor()(derived().lhs().coeff(row, col), derived().rhs().coeff(row, col)); } template - EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const { return derived().functor().packetOp(derived().lhs().template packet(row, col), derived().rhs().template packet(row, col)); } - EIGEN_STRONG_INLINE const Scalar coeff(int index) const + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { return derived().functor()(derived().lhs().coeff(index), derived().rhs().coeff(index)); } template - EIGEN_STRONG_INLINE PacketScalar packet(int index) const + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return derived().functor().packetOp(derived().lhs().template packet(index), derived().rhs().template packet(index)); diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h index bb44703cf..af1643273 100644 --- a/Eigen/src/Core/CwiseNullaryOp.h +++ b/Eigen/src/Core/CwiseNullaryOp.h @@ -63,7 +63,7 @@ class CwiseNullaryOp : ei_no_assignment_operator, typedef typename ei_dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) - CwiseNullaryOp(int rows, int cols, const NullaryOp& func = NullaryOp()) + CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) : m_rows(rows), m_cols(cols), m_functor(func) { ei_assert(rows >= 0 @@ -72,34 +72,34 @@ class CwiseNullaryOp : ei_no_assignment_operator, && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); } - EIGEN_STRONG_INLINE int rows() const { return m_rows.value(); } - EIGEN_STRONG_INLINE int cols() const { return m_cols.value(); } + EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); } + EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); } - EIGEN_STRONG_INLINE const Scalar coeff(int rows, int cols) const + EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const { return m_functor(rows, cols); } template - EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const { return m_functor.packetOp(row, col); } - EIGEN_STRONG_INLINE const Scalar coeff(int index) const + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { return m_functor(index); } template - EIGEN_STRONG_INLINE PacketScalar packet(int index) const + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return m_functor.packetOp(index); } protected: - const ei_int_if_dynamic m_rows; - const ei_int_if_dynamic m_cols; + const ei_variable_if_dynamic m_rows; + const ei_variable_if_dynamic m_cols; const NullaryOp m_functor; }; @@ -120,7 +120,7 @@ class CwiseNullaryOp : ei_no_assignment_operator, template template EIGEN_STRONG_INLINE const CwiseNullaryOp -DenseBase::NullaryExpr(int rows, int cols, const CustomNullaryOp& func) +DenseBase::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) { return CwiseNullaryOp(rows, cols, func); } @@ -143,7 +143,7 @@ DenseBase::NullaryExpr(int rows, int cols, const CustomNullaryOp& func) template template EIGEN_STRONG_INLINE const CwiseNullaryOp -DenseBase::NullaryExpr(int size, const CustomNullaryOp& func) +DenseBase::NullaryExpr(Index size, const CustomNullaryOp& func) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) if(RowsAtCompileTime == 1) return CwiseNullaryOp(1, size, func); @@ -182,7 +182,7 @@ DenseBase::NullaryExpr(const CustomNullaryOp& func) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Constant(int rows, int cols, const Scalar& value) +DenseBase::Constant(Index rows, Index cols, const Scalar& value) { return DenseBase::NullaryExpr(rows, cols, ei_scalar_constant_op(value)); } @@ -204,7 +204,7 @@ DenseBase::Constant(int rows, int cols, const Scalar& value) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Constant(int size, const Scalar& value) +DenseBase::Constant(Index size, const Scalar& value) { return DenseBase::NullaryExpr(size, ei_scalar_constant_op(value)); } @@ -239,11 +239,11 @@ DenseBase::Constant(const Scalar& value) * Example: \include DenseBase_LinSpaced_seq.cpp * Output: \verbinclude DenseBase_LinSpaced_seq.out * - * \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Scalar,Scalar,int), CwiseNullaryOp + * \sa setLinSpaced(const Scalar&,const Scalar&,Index), LinSpaced(Scalar,Scalar,Index), CwiseNullaryOp */ template EIGEN_STRONG_INLINE const typename DenseBase::SequentialLinSpacedReturnType -DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size) +DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase::NullaryExpr(size, ei_linspaced_op(low,high,size)); @@ -259,11 +259,11 @@ DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig * Example: \include DenseBase_LinSpaced.cpp * Output: \verbinclude DenseBase_LinSpaced.out * - * \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Sequential_t,const Scalar&,const Scalar&,int), CwiseNullaryOp + * \sa setLinSpaced(const Scalar&,const Scalar&,Index), LinSpaced(Sequential_t,const Scalar&,const Scalar&,Index), CwiseNullaryOp */ template EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType -DenseBase::LinSpaced(const Scalar& low, const Scalar& high, int size) +DenseBase::LinSpaced(const Scalar& low, const Scalar& high, Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase::NullaryExpr(size, ei_linspaced_op(low,high,size)); @@ -274,8 +274,8 @@ template bool DenseBase::isApproxToConstant (const Scalar& value, RealScalar prec) const { - for(int j = 0; j < cols(); ++j) - for(int i = 0; i < rows(); ++i) + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) if(!ei_isApprox(this->coeff(i, j), value, prec)) return false; return true; @@ -303,7 +303,7 @@ EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& value) /** Sets all coefficients in this expression to \a value. * - * \sa fill(), setConstant(int,const Scalar&), setConstant(int,int,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() + * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() */ template EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& value) @@ -318,11 +318,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& value * Example: \include Matrix_setConstant_int.cpp * Output: \verbinclude Matrix_setConstant_int.out * - * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setConstant(int size, const Scalar& value) +DenseStorageBase::setConstant(Index size, const Scalar& value) { resize(size); return setConstant(value); @@ -336,11 +336,11 @@ DenseStorageBase::setConstant(int size, const Scalar& value) * Example: \include Matrix_setConstant_int_int.cpp * Output: \verbinclude Matrix_setConstant_int_int.out * - * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setConstant(int rows, int cols, const Scalar& value) +DenseStorageBase::setConstant(Index rows, Index cols, const Scalar& value) { resize(rows, cols); return setConstant(value); @@ -359,7 +359,7 @@ DenseStorageBase::setConstant(int rows, int cols, const Scalar& value) * \sa CwiseNullaryOp */ template -EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high, int size) +EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high, Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return derived() = Derived::NullaryExpr(size, ei_linspaced_op(low,high,size)); @@ -379,11 +379,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, * Example: \include MatrixBase_zero_int_int.cpp * Output: \verbinclude MatrixBase_zero_int_int.out * - * \sa Zero(), Zero(int) + * \sa Zero(), Zero(Index) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Zero(int rows, int cols) +DenseBase::Zero(Index rows, Index cols) { return Constant(rows, cols, Scalar(0)); } @@ -402,11 +402,11 @@ DenseBase::Zero(int rows, int cols) * Example: \include MatrixBase_zero_int.cpp * Output: \verbinclude MatrixBase_zero_int.out * - * \sa Zero(), Zero(int,int) + * \sa Zero(), Zero(Index,Index) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Zero(int size) +DenseBase::Zero(Index size) { return Constant(size, Scalar(0)); } @@ -419,7 +419,7 @@ DenseBase::Zero(int size) * Example: \include MatrixBase_zero.cpp * Output: \verbinclude MatrixBase_zero.out * - * \sa Zero(int), Zero(int,int) + * \sa Zero(Index), Zero(Index,Index) */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType @@ -439,8 +439,8 @@ DenseBase::Zero() template bool DenseBase::isZero(RealScalar prec) const { - for(int j = 0; j < cols(); ++j) - for(int i = 0; i < rows(); ++i) + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) if(!ei_isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) return false; return true; @@ -466,11 +466,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setZero() * Example: \include Matrix_setZero_int.cpp * Output: \verbinclude Matrix_setZero_int.out * - * \sa DenseBase::setZero(), setZero(int,int), class CwiseNullaryOp, DenseBase::Zero() + * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setZero(int size) +DenseStorageBase::setZero(Index size) { resize(size); return setConstant(Scalar(0)); @@ -484,11 +484,11 @@ DenseStorageBase::setZero(int size) * Example: \include Matrix_setZero_int_int.cpp * Output: \verbinclude Matrix_setZero_int_int.out * - * \sa DenseBase::setZero(), setZero(int), class CwiseNullaryOp, DenseBase::Zero() + * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setZero(int rows, int cols) +DenseStorageBase::setZero(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(0)); @@ -508,11 +508,11 @@ DenseStorageBase::setZero(int rows, int cols) * Example: \include MatrixBase_ones_int_int.cpp * Output: \verbinclude MatrixBase_ones_int_int.out * - * \sa Ones(), Ones(int), isOnes(), class Ones + * \sa Ones(), Ones(Index), isOnes(), class Ones */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Ones(int rows, int cols) +DenseBase::Ones(Index rows, Index cols) { return Constant(rows, cols, Scalar(1)); } @@ -531,11 +531,11 @@ DenseBase::Ones(int rows, int cols) * Example: \include MatrixBase_ones_int.cpp * Output: \verbinclude MatrixBase_ones_int.out * - * \sa Ones(), Ones(int,int), isOnes(), class Ones + * \sa Ones(), Ones(Index,Index), isOnes(), class Ones */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType -DenseBase::Ones(int size) +DenseBase::Ones(Index size) { return Constant(size, Scalar(1)); } @@ -548,7 +548,7 @@ DenseBase::Ones(int size) * Example: \include MatrixBase_ones.cpp * Output: \verbinclude MatrixBase_ones.out * - * \sa Ones(int), Ones(int,int), isOnes(), class Ones + * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones */ template EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType @@ -592,11 +592,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() * Example: \include Matrix_setOnes_int.cpp * Output: \verbinclude Matrix_setOnes_int.out * - * \sa MatrixBase::setOnes(), setOnes(int,int), class CwiseNullaryOp, MatrixBase::Ones() + * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setOnes(int size) +DenseStorageBase::setOnes(Index size) { resize(size); return setConstant(Scalar(1)); @@ -610,11 +610,11 @@ DenseStorageBase::setOnes(int size) * Example: \include Matrix_setOnes_int_int.cpp * Output: \verbinclude Matrix_setOnes_int_int.out * - * \sa MatrixBase::setOnes(), setOnes(int), class CwiseNullaryOp, MatrixBase::Ones() + * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() */ template EIGEN_STRONG_INLINE Derived& -DenseStorageBase::setOnes(int rows, int cols) +DenseStorageBase::setOnes(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(1)); @@ -638,7 +638,7 @@ DenseStorageBase::setOnes(int rows, int cols) */ template EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType -MatrixBase::Identity(int rows, int cols) +MatrixBase::Identity(Index rows, Index cols) { return DenseBase::NullaryExpr(rows, cols, ei_scalar_identity_op()); } @@ -651,7 +651,7 @@ MatrixBase::Identity(int rows, int cols) * Example: \include MatrixBase_identity.cpp * Output: \verbinclude MatrixBase_identity.out * - * \sa Identity(int,int), setIdentity(), isIdentity() + * \sa Identity(Index,Index), setIdentity(), isIdentity() */ template EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType @@ -668,15 +668,15 @@ MatrixBase::Identity() * Example: \include MatrixBase_isIdentity.cpp * Output: \verbinclude MatrixBase_isIdentity.out * - * \sa class CwiseNullaryOp, Identity(), Identity(int,int), setIdentity() + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity() */ template bool MatrixBase::isIdentity (RealScalar prec) const { - for(int j = 0; j < cols(); ++j) + for(Index j = 0; j < cols(); ++j) { - for(int i = 0; i < rows(); ++i) + for(Index i = 0; i < rows(); ++i) { if(i == j) { @@ -705,11 +705,12 @@ struct ei_setIdentity_impl template struct ei_setIdentity_impl { + typedef typename Derived::Index Index; static EIGEN_STRONG_INLINE Derived& run(Derived& m) { m.setZero(); - const int size = std::min(m.rows(), m.cols()); - for(int i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); + const Index size = std::min(m.rows(), m.cols()); + for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); return m; } }; @@ -719,7 +720,7 @@ struct ei_setIdentity_impl * Example: \include MatrixBase_setIdentity.cpp * Output: \verbinclude MatrixBase_setIdentity.out * - * \sa class CwiseNullaryOp, Identity(), Identity(int,int), isIdentity() + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() */ template EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() @@ -738,7 +739,7 @@ EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() */ template -EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(int rows, int cols) +EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index rows, Index cols) { derived().resize(rows, cols); return setIdentity(); @@ -748,10 +749,10 @@ EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(int rows, int cols * * \only_for_vectors * - * \sa MatrixBase::Unit(int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template -EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(int size, int i) +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index size, Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(size,size), i); @@ -763,10 +764,10 @@ EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBa * * This variant is for fixed-size vector only. * - * \sa MatrixBase::Unit(int,int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template -EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(int i) +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(),i); @@ -776,7 +777,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBa * * \only_for_vectors * - * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitX() @@ -786,7 +787,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBa * * \only_for_vectors * - * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitY() @@ -796,7 +797,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBa * * \only_for_vectors * - * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitZ() @@ -806,7 +807,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBa * * \only_for_vectors * - * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h index 8f95b69b0..da398d131 100644 --- a/Eigen/src/Core/CwiseUnaryOp.h +++ b/Eigen/src/Core/CwiseUnaryOp.h @@ -76,8 +76,8 @@ class CwiseUnaryOp : ei_no_assignment_operator, inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) : m_xpr(xpr), m_functor(func) {} - EIGEN_STRONG_INLINE int rows() const { return m_xpr.rows(); } - EIGEN_STRONG_INLINE int cols() const { return m_xpr.cols(); } + EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); } /** \returns the functor representing the unary operation */ const UnaryOp& functor() const { return m_functor; } @@ -100,32 +100,31 @@ class CwiseUnaryOp : ei_no_assignment_operator, template class CwiseUnaryOpImpl : public ei_dense_xpr_base >::type - { - typedef CwiseUnaryOp Derived; - +{ public: + typedef CwiseUnaryOp Derived; typedef typename ei_dense_xpr_base >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) - EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const + EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return derived().functor()(derived().nestedExpression().coeff(row, col)); } template - EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const { return derived().functor().packetOp(derived().nestedExpression().template packet(row, col)); } - EIGEN_STRONG_INLINE const Scalar coeff(int index) const + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { return derived().functor()(derived().nestedExpression().coeff(index)); } template - EIGEN_STRONG_INLINE PacketScalar packet(int index) const + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return derived().functor().packetOp(derived().nestedExpression().template packet(index)); } diff --git a/Eigen/src/Core/CwiseUnaryView.h b/Eigen/src/Core/CwiseUnaryView.h index 9cdd03477..11a23c66a 100644 --- a/Eigen/src/Core/CwiseUnaryView.h +++ b/Eigen/src/Core/CwiseUnaryView.h @@ -74,8 +74,8 @@ class CwiseUnaryView : ei_no_assignment_operator, EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) - EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); } - EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); } + EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); } /** \returns the functor representing unary operation */ const ViewOp& functor() const { return m_functor; } @@ -98,40 +98,39 @@ template class CwiseUnaryViewImpl : public ei_dense_xpr_base< CwiseUnaryView >::type { - typedef CwiseUnaryView Derived; - public: + typedef CwiseUnaryView Derived; typedef typename ei_dense_xpr_base< CwiseUnaryView >::type Base; - inline int innerStride() const + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + + inline Index innerStride() const { return derived().nestedExpression().innerStride() * sizeof(typename ei_traits::Scalar) / sizeof(Scalar); } - inline int outerStride() const + inline Index outerStride() const { return derived().nestedExpression().outerStride(); } - EIGEN_DENSE_PUBLIC_INTERFACE(Derived) - - EIGEN_STRONG_INLINE CoeffReturnType coeff(int row, int col) const + EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return derived().functor()(derived().nestedExpression().coeff(row, col)); } - EIGEN_STRONG_INLINE CoeffReturnType coeff(int index) const + EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return derived().functor()(derived().nestedExpression().coeff(index)); } - EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col) + EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col)); } - EIGEN_STRONG_INLINE Scalar& coeffRef(int index) + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index)); } diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h index e2429be93..c4b4057a4 100644 --- a/Eigen/src/Core/DenseBase.h +++ b/Eigen/src/Core/DenseBase.h @@ -50,8 +50,12 @@ template class DenseBase class InnerIterator; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; typedef typename ei_traits::Scalar Scalar; typedef typename ei_packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef DenseCoeffsBase Base; using Base::derived; using Base::const_cast_derived; @@ -168,19 +172,9 @@ template class DenseBase OuterStrideAtCompileTime = ei_outer_stride_at_compile_time::ret }; -#ifndef EIGEN_PARSED_BY_DOXYGEN - /** This is the "real scalar" type; if the \a Scalar type is already real numbers - * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If - * \a Scalar is \a std::complex then RealScalar is \a T. - * - * \sa class NumTraits - */ - typedef typename NumTraits::Real RealScalar; -#endif // not EIGEN_PARSED_BY_DOXYGEN - /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ - inline int nonZeros() const { return size(); } + inline Index nonZeros() const { return size(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode @@ -191,7 +185,7 @@ template class DenseBase * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension * with respect to the storage order, i.e., the number of columns for a column-major matrix, * and the number of rows for a row-major matrix. */ - int outerSize() const + Index outerSize() const { return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols(); @@ -202,7 +196,7 @@ template class DenseBase * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension * with respect to the storage order, i.e., the number of rows for a column-major matrix, * and the number of columns for a row-major matrix. */ - int innerSize() const + Index innerSize() const { return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows(); @@ -212,7 +206,7 @@ template class DenseBase * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ - void resize(int size) + void resize(Index size) { EIGEN_ONLY_USED_FOR_DEBUG(size); ei_assert(size == this->size() @@ -222,7 +216,7 @@ template class DenseBase * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ - void resize(int rows, int cols) + void resize(Index rows, Index cols) { EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(cols); @@ -301,41 +295,41 @@ template class DenseBase public: #endif - RowXpr row(int i); - const RowXpr row(int i) const; + RowXpr row(Index i); + const RowXpr row(Index i) const; - ColXpr col(int i); - const ColXpr col(int i) const; + ColXpr col(Index i); + const ColXpr col(Index i) const; - Block block(int startRow, int startCol, int blockRows, int blockCols); - const Block block(int startRow, int startCol, int blockRows, int blockCols) const; + Block block(Index startRow, Index startCol, Index blockRows, Index blockCols); + const Block block(Index startRow, Index startCol, Index blockRows, Index blockCols) const; - VectorBlock segment(int start, int size); - const VectorBlock segment(int start, int size) const; + VectorBlock segment(Index start, Index size); + const VectorBlock segment(Index start, Index size) const; - VectorBlock head(int size); - const VectorBlock head(int size) const; + VectorBlock head(Index size); + const VectorBlock head(Index size) const; - VectorBlock tail(int size); - const VectorBlock tail(int size) const; + VectorBlock tail(Index size); + const VectorBlock tail(Index size) const; - Block topLeftCorner(int cRows, int cCols); - const Block topLeftCorner(int cRows, int cCols) const; - Block topRightCorner(int cRows, int cCols); - const Block topRightCorner(int cRows, int cCols) const; - Block bottomLeftCorner(int cRows, int cCols); - const Block bottomLeftCorner(int cRows, int cCols) const; - Block bottomRightCorner(int cRows, int cCols); - const Block bottomRightCorner(int cRows, int cCols) const; + Block topLeftCorner(Index cRows, Index cCols); + const Block topLeftCorner(Index cRows, Index cCols) const; + Block topRightCorner(Index cRows, Index cCols); + const Block topRightCorner(Index cRows, Index cCols) const; + Block bottomLeftCorner(Index cRows, Index cCols); + const Block bottomLeftCorner(Index cRows, Index cCols) const; + Block bottomRightCorner(Index cRows, Index cCols); + const Block bottomRightCorner(Index cRows, Index cCols) const; - RowsBlockXpr topRows(int n); - const RowsBlockXpr topRows(int n) const; - RowsBlockXpr bottomRows(int n); - const RowsBlockXpr bottomRows(int n) const; - ColsBlockXpr leftCols(int n); - const ColsBlockXpr leftCols(int n) const; - ColsBlockXpr rightCols(int n); - const ColsBlockXpr rightCols(int n) const; + RowsBlockXpr topRows(Index n); + const RowsBlockXpr topRows(Index n) const; + RowsBlockXpr bottomRows(Index n); + const RowsBlockXpr bottomRows(Index n) const; + ColsBlockXpr leftCols(Index n); + const ColsBlockXpr leftCols(Index n) const; + ColsBlockXpr rightCols(Index n); + const ColsBlockXpr rightCols(Index n) const; template Block topLeftCorner(); template const Block topLeftCorner() const; @@ -356,9 +350,9 @@ template class DenseBase template const typename NColsBlockXpr::Type rightCols() const; template - Block block(int startRow, int startCol); + Block block(Index startRow, Index startCol); template - const Block block(int startRow, int startCol) const; + const Block block(Index startRow, Index startCol) const; template VectorBlock head(void); template const VectorBlock head() const; @@ -366,8 +360,8 @@ template class DenseBase template VectorBlock tail(); template const VectorBlock tail() const; - template VectorBlock segment(int start); - template const VectorBlock segment(int start) const; + template VectorBlock segment(Index start); + template const VectorBlock segment(Index start) const; Diagonal diagonal(); const Diagonal diagonal() const; @@ -375,8 +369,8 @@ template class DenseBase template Diagonal diagonal(); template const Diagonal diagonal() const; - Diagonal diagonal(int index); - const Diagonal diagonal(int index) const; + Diagonal diagonal(Index index); + const Diagonal diagonal(Index index) const; template TriangularView part(); template const TriangularView part() const; @@ -388,37 +382,37 @@ template class DenseBase template const SelfAdjointView selfadjointView() const; static const ConstantReturnType - Constant(int rows, int cols, const Scalar& value); + Constant(Index rows, Index cols, const Scalar& value); static const ConstantReturnType - Constant(int size, const Scalar& value); + Constant(Index size, const Scalar& value); static const ConstantReturnType Constant(const Scalar& value); static const SequentialLinSpacedReturnType - LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size); + LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, Index size); static const RandomAccessLinSpacedReturnType - LinSpaced(const Scalar& low, const Scalar& high, int size); + LinSpaced(const Scalar& low, const Scalar& high, Index size); template static const CwiseNullaryOp - NullaryExpr(int rows, int cols, const CustomNullaryOp& func); + NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); template static const CwiseNullaryOp - NullaryExpr(int size, const CustomNullaryOp& func); + NullaryExpr(Index size, const CustomNullaryOp& func); template static const CwiseNullaryOp NullaryExpr(const CustomNullaryOp& func); - static const ConstantReturnType Zero(int rows, int cols); - static const ConstantReturnType Zero(int size); + static const ConstantReturnType Zero(Index rows, Index cols); + static const ConstantReturnType Zero(Index size); static const ConstantReturnType Zero(); - static const ConstantReturnType Ones(int rows, int cols); - static const ConstantReturnType Ones(int size); + static const ConstantReturnType Ones(Index rows, Index cols); + static const ConstantReturnType Ones(Index size); static const ConstantReturnType Ones(); void fill(const Scalar& value); Derived& setConstant(const Scalar& value); - Derived& setLinSpaced(const Scalar& low, const Scalar& high, int size); + Derived& setLinSpaced(const Scalar& low, const Scalar& high, Index size); Derived& setZero(); Derived& setOnes(); Derived& setRandom(); @@ -471,11 +465,11 @@ template class DenseBase typename ei_traits::Scalar minCoeff() const; typename ei_traits::Scalar maxCoeff() const; - typename ei_traits::Scalar minCoeff(int* row, int* col) const; - typename ei_traits::Scalar maxCoeff(int* row, int* col) const; + typename ei_traits::Scalar minCoeff(Index* row, Index* col) const; + typename ei_traits::Scalar maxCoeff(Index* row, Index* col) const; - typename ei_traits::Scalar minCoeff(int* index) const; - typename ei_traits::Scalar maxCoeff(int* index) const; + typename ei_traits::Scalar minCoeff(Index* index) const; + typename ei_traits::Scalar maxCoeff(Index* index) const; template typename ei_result_of::Scalar)>::type @@ -490,15 +484,15 @@ template class DenseBase bool all(void) const; bool any(void) const; - int count() const; + Index count() const; const VectorwiseOp rowwise() const; VectorwiseOp rowwise(); const VectorwiseOp colwise() const; VectorwiseOp colwise(); - static const CwiseNullaryOp,Derived> Random(int rows, int cols); - static const CwiseNullaryOp,Derived> Random(int size); + static const CwiseNullaryOp,Derived> Random(Index rows, Index cols); + static const CwiseNullaryOp,Derived> Random(Index size); static const CwiseNullaryOp,Derived> Random(); template @@ -518,7 +512,7 @@ template class DenseBase template const Replicate replicate() const; - const Replicate replicate(int rowFacor,int colFactor) const; + const Replicate replicate(Index rowFacor,Index colFactor) const; Eigen::Reverse reverse(); const Eigen::Reverse reverse() const; @@ -526,8 +520,8 @@ template class DenseBase #ifdef EIGEN2_SUPPORT - Block corner(CornerType type, int cRows, int cCols); - const Block corner(CornerType type, int cRows, int cCols) const; + Block corner(CornerType type, Index cRows, Index cCols); + const Block corner(CornerType type, Index cRows, Index cCols) const; template Block corner(CornerType type); template diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h index ccf959b86..7026bbe34 100644 --- a/Eigen/src/Core/DenseCoeffsBase.h +++ b/Eigen/src/Core/DenseCoeffsBase.h @@ -29,7 +29,10 @@ template class DenseCoeffsBase : public EigenBase { public: + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; typedef typename ei_traits::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; typedef typename ei_meta_if::ret, const Scalar&, Scalar>::ret CoeffReturnType; typedef EigenBase Base; @@ -38,7 +41,7 @@ class DenseCoeffsBase : public EigenBase using Base::size; using Base::derived; - EIGEN_STRONG_INLINE int rowIndexByOuterInner(int outer, int inner) const + EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const { return int(Derived::RowsAtCompileTime) == 1 ? 0 : int(Derived::ColsAtCompileTime) == 1 ? inner @@ -46,7 +49,7 @@ class DenseCoeffsBase : public EigenBase : inner; } - EIGEN_STRONG_INLINE int colIndexByOuterInner(int outer, int inner) const + EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const { return int(Derived::ColsAtCompileTime) == 1 ? 0 : int(Derived::RowsAtCompileTime) == 1 ? inner @@ -55,27 +58,27 @@ class DenseCoeffsBase : public EigenBase } /** Short version: don't use this function, use - * \link operator()(int,int) const \endlink instead. + * \link operator()(Index,Index) const \endlink instead. * * Long version: this function is similar to - * \link operator()(int,int) const \endlink, but without the assertion. + * \link operator()(Index,Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this - * function equivalent to \link operator()(int,int) const \endlink. + * function equivalent to \link operator()(Index,Index) const \endlink. * - * \sa operator()(int,int) const, coeffRef(int,int), coeff(int) const + * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const */ - EIGEN_STRONG_INLINE const CoeffReturnType coeff(int row, int col) const + EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const { ei_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return derived().coeff(row, col); } - EIGEN_STRONG_INLINE const CoeffReturnType coeffByOuterInner(int outer, int inner) const + EIGEN_STRONG_INLINE const CoeffReturnType coeffByOuterInner(Index outer, Index inner) const { return coeff(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); @@ -83,9 +86,9 @@ class DenseCoeffsBase : public EigenBase /** \returns the coefficient at given the given row and column. * - * \sa operator()(int,int), operator[](int) + * \sa operator()(Index,Index), operator[](Index) */ - EIGEN_STRONG_INLINE const CoeffReturnType operator()(int row, int col) const + EIGEN_STRONG_INLINE const CoeffReturnType operator()(Index row, Index col) const { ei_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); @@ -93,22 +96,22 @@ class DenseCoeffsBase : public EigenBase } /** Short version: don't use this function, use - * \link operator[](int) const \endlink instead. + * \link operator[](Index) const \endlink instead. * * Long version: this function is similar to - * \link operator[](int) const \endlink, but without the assertion. + * \link operator[](Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameter \a index is in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this - * function equivalent to \link operator[](int) const \endlink. + * function equivalent to \link operator[](Index) const \endlink. * - * \sa operator[](int) const, coeffRef(int), coeff(int,int) const + * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const */ EIGEN_STRONG_INLINE const CoeffReturnType - coeff(int index) const + coeff(Index index) const { ei_internal_assert(index >= 0 && index < size()); return derived().coeff(index); @@ -119,12 +122,12 @@ class DenseCoeffsBase : public EigenBase * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * - * \sa operator[](int), operator()(int,int) const, x() const, y() const, + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_STRONG_INLINE const CoeffReturnType - operator[](int index) const + operator[](Index index) const { EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) @@ -134,16 +137,16 @@ class DenseCoeffsBase : public EigenBase /** \returns the coefficient at given index. * - * This is synonymous to operator[](int) const. + * This is synonymous to operator[](Index) const. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * - * \sa operator[](int), operator()(int,int) const, x() const, y() const, + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_STRONG_INLINE const CoeffReturnType - operator()(int index) const + operator()(Index index) const { ei_assert(index >= 0 && index < size()); return derived().coeff(index); @@ -180,17 +183,17 @@ class DenseCoeffsBase : public EigenBase template EIGEN_STRONG_INLINE typename ei_packet_traits::type - packet(int row, int col) const + packet(Index row, Index col) const { ei_internal_assert(row >= 0 && row < rows() - && col >= 0 && col < cols()); + && col >= 0 && col < cols()); return derived().template packet(row,col); } template EIGEN_STRONG_INLINE typename ei_packet_traits::type - packetByOuterInner(int outer, int inner) const + packetByOuterInner(Index outer, Index inner) const { return packet(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); @@ -207,7 +210,7 @@ class DenseCoeffsBase : public EigenBase template EIGEN_STRONG_INLINE typename ei_packet_traits::type - packet(int index) const + packet(Index index) const { ei_internal_assert(index >= 0 && index < size()); return derived().template packet(index); @@ -240,8 +243,14 @@ class DenseCoeffsBase : public DenseCoeffsBase public: typedef DenseCoeffsBase Base; + + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; typedef typename ei_traits::Scalar Scalar; - using typename Base::CoeffReturnType; + typedef typename ei_packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + using Base::coeff; using Base::rows; using Base::cols; @@ -257,20 +266,20 @@ class DenseCoeffsBase : public DenseCoeffsBase using Base::w; /** Short version: don't use this function, use - * \link operator()(int,int) \endlink instead. + * \link operator()(Index,Index) \endlink instead. * * Long version: this function is similar to - * \link operator()(int,int) \endlink, but without the assertion. + * \link operator()(Index,Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this - * function equivalent to \link operator()(int,int) \endlink. + * function equivalent to \link operator()(Index,Index) \endlink. * - * \sa operator()(int,int), coeff(int, int) const, coeffRef(int) + * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) */ - EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col) + EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { ei_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); @@ -278,7 +287,7 @@ class DenseCoeffsBase : public DenseCoeffsBase } EIGEN_STRONG_INLINE Scalar& - coeffRefByOuterInner(int outer, int inner) + coeffRefByOuterInner(Index outer, Index inner) { return coeffRef(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); @@ -286,11 +295,11 @@ class DenseCoeffsBase : public DenseCoeffsBase /** \returns a reference to the coefficient at given the given row and column. * - * \sa operator[](int) + * \sa operator[](Index) */ EIGEN_STRONG_INLINE Scalar& - operator()(int row, int col) + operator()(Index row, Index col) { ei_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); @@ -299,22 +308,22 @@ class DenseCoeffsBase : public DenseCoeffsBase /** Short version: don't use this function, use - * \link operator[](int) \endlink instead. + * \link operator[](Index) \endlink instead. * * Long version: this function is similar to - * \link operator[](int) \endlink, but without the assertion. + * \link operator[](Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this - * function equivalent to \link operator[](int) \endlink. + * function equivalent to \link operator[](Index) \endlink. * - * \sa operator[](int), coeff(int) const, coeffRef(int,int) + * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) */ EIGEN_STRONG_INLINE Scalar& - coeffRef(int index) + coeffRef(Index index) { ei_internal_assert(index >= 0 && index < size()); return derived().coeffRef(index); @@ -324,11 +333,11 @@ class DenseCoeffsBase : public DenseCoeffsBase * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * - * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w() + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_STRONG_INLINE Scalar& - operator[](int index) + operator[](Index index) { EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) @@ -338,15 +347,15 @@ class DenseCoeffsBase : public DenseCoeffsBase /** \returns a reference to the coefficient at given index. * - * This is synonymous to operator[](int). + * This is synonymous to operator[](Index). * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * - * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w() + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_STRONG_INLINE Scalar& - operator()(int index) + operator()(Index index) { ei_assert(index >= 0 && index < size()); return derived().coeffRef(index); @@ -383,7 +392,7 @@ class DenseCoeffsBase : public DenseCoeffsBase template EIGEN_STRONG_INLINE void writePacket - (int row, int col, const typename ei_packet_traits::type& x) + (Index row, Index col, const typename ei_packet_traits::type& x) { ei_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); @@ -393,7 +402,7 @@ class DenseCoeffsBase : public DenseCoeffsBase template EIGEN_STRONG_INLINE void writePacketByOuterInner - (int outer, int inner, const typename ei_packet_traits::type& x) + (Index outer, Index inner, const typename ei_packet_traits::type& x) { writePacket(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner), @@ -411,7 +420,7 @@ class DenseCoeffsBase : public DenseCoeffsBase template EIGEN_STRONG_INLINE void writePacket - (int index, const typename ei_packet_traits::type& x) + (Index index, const typename ei_packet_traits::type& x) { ei_internal_assert(index >= 0 && index < size()); derived().template writePacket(index,x); @@ -428,7 +437,7 @@ class DenseCoeffsBase : public DenseCoeffsBase */ template - EIGEN_STRONG_INLINE void copyCoeff(int row, int col, const DenseBase& other) + EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase& other) { ei_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); @@ -444,7 +453,7 @@ class DenseCoeffsBase : public DenseCoeffsBase */ template - EIGEN_STRONG_INLINE void copyCoeff(int index, const DenseBase& other) + EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase& other) { ei_internal_assert(index >= 0 && index < size()); derived().coeffRef(index) = other.derived().coeff(index); @@ -452,10 +461,10 @@ class DenseCoeffsBase : public DenseCoeffsBase template - EIGEN_STRONG_INLINE void copyCoeffByOuterInner(int outer, int inner, const DenseBase& other) + EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase& other) { - const int row = rowIndexByOuterInner(outer,inner); - const int col = colIndexByOuterInner(outer,inner); + const Index row = rowIndexByOuterInner(outer,inner); + const Index col = colIndexByOuterInner(outer,inner); // derived() is important here: copyCoeff() may be reimplemented in Derived! derived().copyCoeff(row, col, other); } @@ -469,7 +478,7 @@ class DenseCoeffsBase : public DenseCoeffsBase */ template - EIGEN_STRONG_INLINE void copyPacket(int row, int col, const DenseBase& other) + EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase& other) { ei_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); @@ -486,7 +495,7 @@ class DenseCoeffsBase : public DenseCoeffsBase */ template - EIGEN_STRONG_INLINE void copyPacket(int index, const DenseBase& other) + EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase& other) { ei_internal_assert(index >= 0 && index < size()); derived().template writePacket(index, @@ -494,10 +503,10 @@ class DenseCoeffsBase : public DenseCoeffsBase } template - EIGEN_STRONG_INLINE void copyPacketByOuterInner(int outer, int inner, const DenseBase& other) + EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase& other) { - const int row = rowIndexByOuterInner(outer,inner); - const int col = colIndexByOuterInner(outer,inner); + const Index row = rowIndexByOuterInner(outer,inner); + const Index col = colIndexByOuterInner(outer,inner); // derived() is important here: copyCoeff() may be reimplemented in Derived! derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other); } @@ -507,7 +516,7 @@ class DenseCoeffsBase : public DenseCoeffsBase * * \sa outerStride(), rowStride(), colStride() */ - inline int innerStride() const + inline Index innerStride() const { return derived().innerStride(); } @@ -517,12 +526,12 @@ class DenseCoeffsBase : public DenseCoeffsBase * * \sa innerStride(), rowStride(), colStride() */ - inline int outerStride() const + inline Index outerStride() const { return derived().outerStride(); } - inline int stride() const + inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } @@ -531,7 +540,7 @@ class DenseCoeffsBase : public DenseCoeffsBase * * \sa innerStride(), outerStride(), colStride() */ - inline int rowStride() const + inline Index rowStride() const { return Derived::IsRowMajor ? outerStride() : innerStride(); } @@ -540,7 +549,7 @@ class DenseCoeffsBase : public DenseCoeffsBase * * \sa innerStride(), outerStride(), rowStride() */ - inline int colStride() const + inline Index colStride() const { return Derived::IsRowMajor ? innerStride() : outerStride(); } @@ -549,14 +558,14 @@ class DenseCoeffsBase : public DenseCoeffsBase template struct ei_first_aligned_impl { - inline static int run(const Derived&) + inline static typename Derived::Index run(const Derived&) { return 0; } }; template struct ei_first_aligned_impl { - inline static int run(const Derived& m) + inline static typename Derived::Index run(const Derived& m) { return ei_first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); } @@ -568,7 +577,7 @@ struct ei_first_aligned_impl * documentation. */ template -inline static int ei_first_aligned(const Derived& m) +inline static typename Derived::Index ei_first_aligned(const Derived& m) { return ei_first_aligned_impl diff --git a/Eigen/src/Core/DenseStorageBase.h b/Eigen/src/Core/DenseStorageBase.h index d2bbb07f7..15f3988ea 100644 --- a/Eigen/src/Core/DenseStorageBase.h +++ b/Eigen/src/Core/DenseStorageBase.h @@ -44,9 +44,13 @@ class DenseStorageBase : public ei_dense_xpr_base::type public: enum { Options = ei_traits::Options }; typedef typename ei_dense_xpr_base::type Base; - typedef typename Base::PlainObject PlainObject; - typedef typename Base::Scalar Scalar; - typedef typename Base::PacketScalar PacketScalar; + + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; + typedef typename ei_traits::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; @@ -72,10 +76,10 @@ class DenseStorageBase : public ei_dense_xpr_base::type Base& base() { return *static_cast(this); } const Base& base() const { return *static_cast(this); } - EIGEN_STRONG_INLINE int rows() const { return m_storage.rows(); } - EIGEN_STRONG_INLINE int cols() const { return m_storage.cols(); } + EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); } - EIGEN_STRONG_INLINE const Scalar& coeff(int row, int col) const + EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const { if(Flags & RowMajorBit) return m_storage.data()[col + row * m_storage.cols()]; @@ -83,12 +87,12 @@ class DenseStorageBase : public ei_dense_xpr_base::type return m_storage.data()[row + col * m_storage.rows()]; } - EIGEN_STRONG_INLINE const Scalar& coeff(int index) const + EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const { return m_storage.data()[index]; } - EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col) + EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { if(Flags & RowMajorBit) return m_storage.data()[col + row * m_storage.cols()]; @@ -96,13 +100,13 @@ class DenseStorageBase : public ei_dense_xpr_base::type return m_storage.data()[row + col * m_storage.rows()]; } - EIGEN_STRONG_INLINE Scalar& coeffRef(int index) + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_storage.data()[index]; } template - EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const { return ei_ploadt (m_storage.data() + (Flags & RowMajorBit @@ -111,13 +115,13 @@ class DenseStorageBase : public ei_dense_xpr_base::type } template - EIGEN_STRONG_INLINE PacketScalar packet(int index) const + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return ei_ploadt(m_storage.data() + index); } template - EIGEN_STRONG_INLINE void writePacket(int row, int col, const PacketScalar& x) + EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x) { ei_pstoret (m_storage.data() + (Flags & RowMajorBit @@ -126,7 +130,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type } template - EIGEN_STRONG_INLINE void writePacket(int index, const PacketScalar& x) + EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x) { ei_pstoret(m_storage.data() + index, x); } @@ -143,7 +147,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type * * This method is intended for dynamic-size matrices, although it is legal to call it on any * matrix as long as fixed dimensions are left unchanged. If you only want to change the number - * of rows and/or of columns, you can use resize(NoChange_t, int), resize(int, NoChange_t). + * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). * * If the current number of coefficients of \c *this exactly matches the * product \a rows * \a cols, then no memory allocation is performed and @@ -153,12 +157,12 @@ class DenseStorageBase : public ei_dense_xpr_base::type * Example: \include Matrix_resize_int_int.cpp * Output: \verbinclude Matrix_resize_int_int.out * - * \sa resize(int) for vectors, resize(NoChange_t, int), resize(int, NoChange_t) + * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) */ - inline void resize(int rows, int cols) + inline void resize(Index rows, Index cols) { #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO - int size = rows*cols; + Index size = rows*cols; bool size_changed = size != this->size(); m_storage.resize(size, rows, cols); if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED @@ -176,9 +180,9 @@ class DenseStorageBase : public ei_dense_xpr_base::type * Example: \include Matrix_resize_int.cpp * Output: \verbinclude Matrix_resize_int.out * - * \sa resize(int,int), resize(NoChange_t, int), resize(int, NoChange_t) + * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) */ - inline void resize(int size) + inline void resize(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(DenseStorageBase) ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); @@ -200,9 +204,9 @@ class DenseStorageBase : public ei_dense_xpr_base::type * Example: \include Matrix_resize_NoChange_int.cpp * Output: \verbinclude Matrix_resize_NoChange_int.out * - * \sa resize(int,int) + * \sa resize(Index,Index) */ - inline void resize(NoChange_t, int cols) + inline void resize(NoChange_t, Index cols) { resize(rows(), cols); } @@ -213,9 +217,9 @@ class DenseStorageBase : public ei_dense_xpr_base::type * Example: \include Matrix_resize_int_NoChange.cpp * Output: \verbinclude Matrix_resize_int_NoChange.out * - * \sa resize(int,int) + * \sa resize(Index,Index) */ - inline void resize(int rows, NoChange_t) + inline void resize(Index rows, NoChange_t) { resize(rows, cols()); } @@ -231,7 +235,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) { const OtherDerived& other = _other.derived(); - const int othersize = other.rows()*other.cols(); + const Index othersize = other.rows()*other.cols(); if(RowsAtCompileTime == 1) { ei_assert(other.rows() == 1 || other.cols() == 1); @@ -248,26 +252,26 @@ class DenseStorageBase : public ei_dense_xpr_base::type /** Resizes \c *this to a \a rows x \a cols matrix while leaving old values of \c *this untouched. * * This method is intended for dynamic-size matrices. If you only want to change the number - * of rows and/or of columns, you can use conservativeResize(NoChange_t, int), - * conservativeResize(int, NoChange_t). + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index), + * conservativeResize(Index, NoChange_t). * * The top-left part of the resized matrix will be the same as the overlapping top-left corner * of \c *this. In case values need to be appended to the matrix they will be uninitialized. */ - EIGEN_STRONG_INLINE void conservativeResize(int rows, int cols) + EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) { ei_conservative_resize_like_impl::run(*this, rows, cols); } - EIGEN_STRONG_INLINE void conservativeResize(int rows, NoChange_t) + EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) { - // Note: see the comment in conservativeResize(int,int) + // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows, cols()); } - EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, int cols) + EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) { - // Note: see the comment in conservativeResize(int,int) + // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows(), cols); } @@ -279,7 +283,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type * * When values are appended, they will be uninitialized. */ - EIGEN_STRONG_INLINE void conservativeResize(int size) + EIGEN_STRONG_INLINE void conservativeResize(Index size) { ei_conservative_resize_like_impl::run(*this, size); } @@ -329,7 +333,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type } #endif - EIGEN_STRONG_INLINE DenseStorageBase(int size, int rows, int cols) + EIGEN_STRONG_INLINE DenseStorageBase(Index size, Index rows, Index cols) : m_storage(size, rows, cols) { // _check_template_params(); @@ -370,44 +374,44 @@ class DenseStorageBase : public ei_dense_xpr_base::type { return UnalignedMapType(data); } inline static UnalignedMapType Map(Scalar* data) { return UnalignedMapType(data); } - inline static const UnalignedMapType Map(const Scalar* data, int size) + inline static const UnalignedMapType Map(const Scalar* data, Index size) { return UnalignedMapType(data, size); } - inline static UnalignedMapType Map(Scalar* data, int size) + inline static UnalignedMapType Map(Scalar* data, Index size) { return UnalignedMapType(data, size); } - inline static const UnalignedMapType Map(const Scalar* data, int rows, int cols) + inline static const UnalignedMapType Map(const Scalar* data, Index rows, Index cols) { return UnalignedMapType(data, rows, cols); } - inline static UnalignedMapType Map(Scalar* data, int rows, int cols) + inline static UnalignedMapType Map(Scalar* data, Index rows, Index cols) { return UnalignedMapType(data, rows, cols); } inline static const AlignedMapType MapAligned(const Scalar* data) { return AlignedMapType(data); } inline static AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } - inline static const AlignedMapType MapAligned(const Scalar* data, int size) + inline static const AlignedMapType MapAligned(const Scalar* data, Index size) { return AlignedMapType(data, size); } - inline static AlignedMapType MapAligned(Scalar* data, int size) + inline static AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } - inline static const AlignedMapType MapAligned(const Scalar* data, int rows, int cols) + inline static const AlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } - inline static AlignedMapType MapAligned(Scalar* data, int rows, int cols) + inline static AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } //@} using Base::setConstant; - Derived& setConstant(int size, const Scalar& value); - Derived& setConstant(int rows, int cols, const Scalar& value); + Derived& setConstant(Index size, const Scalar& value); + Derived& setConstant(Index rows, Index cols, const Scalar& value); using Base::setZero; - Derived& setZero(int size); - Derived& setZero(int rows, int cols); + Derived& setZero(Index size); + Derived& setZero(Index rows, Index cols); using Base::setOnes; - Derived& setOnes(int size); - Derived& setOnes(int rows, int cols); + Derived& setOnes(Index size); + Derived& setOnes(Index rows, Index cols); using Base::setRandom; - Derived& setRandom(int size); - Derived& setRandom(int rows, int cols); + Derived& setRandom(Index size); + Derived& setRandom(Index rows, Index cols); #ifdef EIGEN_DENSESTORAGEBASE_PLUGIN #include EIGEN_DENSESTORAGEBASE_PLUGIN @@ -474,7 +478,7 @@ class DenseStorageBase : public ei_dense_xpr_base::type } template - EIGEN_STRONG_INLINE void _init2(int rows, int cols, typename ei_enable_if::type* = 0) + EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename ei_enable_if::type* = 0) { ei_assert(rows > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); @@ -526,7 +530,8 @@ class DenseStorageBase : public ei_dense_xpr_base::type template struct ei_conservative_resize_like_impl { - static void run(DenseBase& _this, int rows, int cols) + typedef typename Derived::Index Index; + static void run(DenseBase& _this, Index rows, Index cols) { if (_this.rows() == rows && _this.cols() == cols) return; EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) @@ -540,8 +545,8 @@ struct ei_conservative_resize_like_impl { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(rows,cols); - const int common_rows = std::min(rows, _this.rows()); - const int common_cols = std::min(cols, _this.cols()); + const Index common_rows = std::min(rows, _this.rows()); + const Index common_cols = std::min(cols, _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } @@ -551,10 +556,10 @@ struct ei_conservative_resize_like_impl { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; - // Note: Here is space for improvement. Basically, for conservativeResize(int,int), + // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the - // dimensions is dynamic, one could use either conservativeResize(int rows, NoChange_t) or - // conservativeResize(NoChange_t, int cols). For these methods new static asserts like + // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or + // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) @@ -562,8 +567,8 @@ struct ei_conservative_resize_like_impl if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns { - const int new_rows = other.rows() - _this.rows(); - const int new_cols = other.cols() - _this.cols(); + const Index new_rows = other.rows() - _this.rows(); + const Index new_cols = other.cols() - _this.cols(); _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); if (new_rows>0) _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); @@ -574,8 +579,8 @@ struct ei_conservative_resize_like_impl { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(other); - const int common_rows = std::min(tmp.rows(), _this.rows()); - const int common_cols = std::min(tmp.cols(), _this.cols()); + const Index common_rows = std::min(tmp.rows(), _this.rows()); + const Index common_cols = std::min(tmp.cols(), _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } @@ -585,10 +590,11 @@ struct ei_conservative_resize_like_impl template struct ei_conservative_resize_like_impl { - static void run(DenseBase& _this, int size) + typedef typename Derived::Index Index; + static void run(DenseBase& _this, Index size) { - const int new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; - const int new_cols = Derived::RowsAtCompileTime==1 ? size : 1; + const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; + const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1; _this.derived().m_storage.conservativeResize(size,new_rows,new_cols); } @@ -596,10 +602,10 @@ struct ei_conservative_resize_like_impl { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; - const int num_new_elements = other.size() - _this.size(); + const Index num_new_elements = other.size() - _this.size(); - const int new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); - const int new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; + const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); + const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); if (num_new_elements > 0) diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h index 9ae7d79ce..a4326a299 100644 --- a/Eigen/src/Core/Diagonal.h +++ b/Eigen/src/Core/Diagonal.h @@ -30,33 +30,34 @@ * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix * * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal - * \param Index the index of the sub/super diagonal. The default is 0 and it means the main diagonal. + * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. * A positive value means a superdiagonal, a negative value means a subdiagonal. * You can also use Dynamic so the index can be set at runtime. * * The matrix is not required to be square. * * This class represents an expression of the main diagonal, or any sub/super diagonal - * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(int) and most of the + * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the * time this is the only way it is used. * - * \sa MatrixBase::diagonal(), MatrixBase::diagonal(int) + * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) */ -template -struct ei_traits > +template +struct ei_traits > : ei_traits { typedef typename ei_nested::type MatrixTypeNested; typedef typename ei_unref::type _MatrixTypeNested; + typedef typename MatrixType::StorageKind StorageKind; enum { - AbsIndex = Index<0 ? -Index : Index, // only used if Index != Dynamic - RowsAtCompileTime = (int(Index) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic + AbsDiagIndex = DiagIndex<0 ? -DiagIndex : DiagIndex, // only used if DiagIndex != Dynamic + RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic : (EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime, - MatrixType::ColsAtCompileTime) - AbsIndex), + MatrixType::ColsAtCompileTime) - AbsDiagIndex), ColsAtCompileTime = 1, MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic - : Index == Dynamic ? EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - : (EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsIndex), + : DiagIndex == Dynamic ? EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) + : (EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsDiagIndex), MaxColsAtCompileTime = 1, Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit) & ~RowMajorBit, CoeffReadCost = _MatrixTypeNested::CoeffReadCost, @@ -66,61 +67,62 @@ struct ei_traits > }; }; -template class Diagonal - : public ei_dense_xpr_base< Diagonal >::type +template class Diagonal + : public ei_dense_xpr_base< Diagonal >::type { - // some compilers may fail to optimize std::max etc in case of compile-time constants... - EIGEN_STRONG_INLINE int absIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } - EIGEN_STRONG_INLINE int rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } - EIGEN_STRONG_INLINE int colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } - public: typedef typename ei_dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) - inline Diagonal(const MatrixType& matrix, int index = Index) : m_matrix(matrix), m_index(index) {} + inline Diagonal(const MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) - inline int rows() const + inline Index rows() const { return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); } - inline int cols() const { return 1; } + inline Index cols() const { return 1; } - inline int innerStride() const + inline Index innerStride() const { return m_matrix.outerStride() + 1; } - inline int outerStride() const + inline Index outerStride() const { return 0; } - inline Scalar& coeffRef(int row, int) + inline Scalar& coeffRef(Index row, Index) { return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); } - inline CoeffReturnType coeff(int row, int) const + inline CoeffReturnType coeff(Index row, Index) const { return m_matrix.coeff(row+rowOffset(), row+colOffset()); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); } - inline CoeffReturnType coeff(int index) const + inline CoeffReturnType coeff(Index index) const { return m_matrix.coeff(index+rowOffset(), index+colOffset()); } protected: const typename MatrixType::Nested m_matrix; - const ei_int_if_dynamic m_index; + const ei_variable_if_dynamic m_index; + + private: + // some compilers may fail to optimize std::max etc in case of compile-time constants... + EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } + EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } + EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } }; /** \returns an expression of the main diagonal of the matrix \c *this @@ -146,12 +148,12 @@ MatrixBase::diagonal() const return Diagonal(derived()); } -/** \returns an expression of the \a Index-th sub or super diagonal of the matrix \c *this +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this * * \c *this is not required to be square. * - * The template parameter \a Index represent a super diagonal if \a Index > 0 - * and a sub diagonal otherwise. \a Index == 0 is equivalent to the main diagonal. + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. * * Example: \include MatrixBase_diagonal_int.cpp * Output: \verbinclude MatrixBase_diagonal_int.out @@ -159,45 +161,45 @@ MatrixBase::diagonal() const * \sa MatrixBase::diagonal(), class Diagonal */ template inline Diagonal -MatrixBase::diagonal(int index) +MatrixBase::diagonal(Index index) { return Diagonal(derived(), index); } -/** This is the const version of diagonal(int). */ +/** This is the const version of diagonal(Index). */ template inline const Diagonal -MatrixBase::diagonal(int index) const +MatrixBase::diagonal(Index index) const { return Diagonal(derived(), index); } -/** \returns an expression of the \a Index-th sub or super diagonal of the matrix \c *this +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this * * \c *this is not required to be square. * - * The template parameter \a Index represent a super diagonal if \a Index > 0 - * and a sub diagonal otherwise. \a Index == 0 is equivalent to the main diagonal. + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. * * Example: \include MatrixBase_diagonal_template_int.cpp * Output: \verbinclude MatrixBase_diagonal_template_int.out * * \sa MatrixBase::diagonal(), class Diagonal */ template -template -inline Diagonal +template +inline Diagonal MatrixBase::diagonal() { - return Diagonal(derived()); + return Diagonal(derived()); } /** This is the const version of diagonal(). */ template -template -inline const Diagonal +template +inline const Diagonal MatrixBase::diagonal() const { - return Diagonal(derived()); + return Diagonal(derived()); } #endif // EIGEN_DIAGONAL_H diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h index 774b0d7ae..8d3b458a9 100644 --- a/Eigen/src/Core/DiagonalMatrix.h +++ b/Eigen/src/Core/DiagonalMatrix.h @@ -33,6 +33,8 @@ class DiagonalBase : public EigenBase public: typedef typename ei_traits::DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, @@ -61,8 +63,8 @@ class DiagonalBase : public EigenBase inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } inline DiagonalVectorType& diagonal() { return derived().diagonal(); } - inline int rows() const { return diagonal().size(); } - inline int cols() const { return diagonal().size(); } + inline Index rows() const { return diagonal().size(); } + inline Index cols() const { return diagonal().size(); } template const DiagonalProduct @@ -100,6 +102,7 @@ struct ei_traits : ei_traits > { typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; + typedef Dense StorageKind; }; template @@ -111,6 +114,8 @@ class DiagonalMatrix typedef typename ei_traits::DiagonalVectorType DiagonalVectorType; typedef const DiagonalMatrix& Nested; typedef _Scalar Scalar; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; #endif protected: @@ -128,7 +133,7 @@ class DiagonalMatrix inline DiagonalMatrix() {} /** Constructs a diagonal matrix with given dimension */ - inline DiagonalMatrix(int dim) : m_diagonal(dim) {} + inline DiagonalMatrix(Index dim) : m_diagonal(dim) {} /** 2D constructor. */ inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {} @@ -170,15 +175,15 @@ class DiagonalMatrix #endif /** Resizes to given size. */ - inline void resize(int size) { m_diagonal.resize(size); } + inline void resize(Index size) { m_diagonal.resize(size); } /** Sets all coefficients to zero. */ inline void setZero() { m_diagonal.setZero(); } /** Resizes and sets all coefficients to zero. */ - inline void setZero(int size) { m_diagonal.setZero(size); } + inline void setZero(Index size) { m_diagonal.setZero(size); } /** Sets this matrix to be the identity matrix of the current size. */ inline void setIdentity() { m_diagonal.setOnes(); } /** Sets this matrix to be the identity matrix of the given size. */ - inline void setIdentity(int size) { m_diagonal.setOnes(size); } + inline void setIdentity(Index size) { m_diagonal.setOnes(size); } }; /** \class DiagonalWrapper @@ -198,6 +203,7 @@ struct ei_traits > { typedef _DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::StorageKind StorageKind; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, @@ -257,13 +263,13 @@ bool MatrixBase::isDiagonal { if(cols() != rows()) return false; RealScalar maxAbsOnDiagonal = static_cast(-1); - for(int j = 0; j < cols(); ++j) + for(Index j = 0; j < cols(); ++j) { RealScalar absOnDiagonal = ei_abs(coeff(j,j)); if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; } - for(int j = 0; j < cols(); ++j) - for(int i = 0; i < j; ++i) + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < j; ++i) { if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; diff --git a/Eigen/src/Core/DiagonalProduct.h b/Eigen/src/Core/DiagonalProduct.h index 868b4419a..f3af814f8 100644 --- a/Eigen/src/Core/DiagonalProduct.h +++ b/Eigen/src/Core/DiagonalProduct.h @@ -57,23 +57,23 @@ class DiagonalProduct : ei_no_assignment_operator, ei_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols())); } - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } - const Scalar coeff(int row, int col) const + const Scalar coeff(Index row, Index col) const { return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col); } template - EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const { enum { StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor, InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned }; - const int indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col; + const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col; if((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)) diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h index 4bd81872d..6e54dac3c 100644 --- a/Eigen/src/Core/Dot.h +++ b/Eigen/src/Core/Dot.h @@ -159,11 +159,11 @@ template bool MatrixBase::isUnitary(RealScalar prec) const { typename Derived::Nested nested(derived()); - for(int i = 0; i < cols(); ++i) + for(Index i = 0; i < cols(); ++i) { if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast(1), prec)) return false; - for(int j = 0; j < i; ++j) + for(Index j = 0; j < i; ++j) if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast(1), prec)) return false; } diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h index e583fddc3..c9d3bd875 100644 --- a/Eigen/src/Core/EigenBase.h +++ b/Eigen/src/Core/EigenBase.h @@ -39,6 +39,9 @@ template struct EigenBase { // typedef typename ei_plain_matrix_type::type PlainObject; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; + /** \returns a reference to the derived object */ Derived& derived() { return *static_cast(this); } /** \returns a const reference to the derived object */ @@ -48,12 +51,12 @@ template struct EigenBase { return *static_cast(const_cast(this)); } /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ - inline int rows() const { return derived().rows(); } + inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ - inline int cols() const { return derived().cols(); } + inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ - inline int size() const { return rows() * cols(); } + inline Index size() const { return rows() * cols(); } /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ template inline void evalTo(Dest& dst) const diff --git a/Eigen/src/Core/Flagged.h b/Eigen/src/Core/Flagged.h index 9413b74fa..7936f9dcf 100644 --- a/Eigen/src/Core/Flagged.h +++ b/Eigen/src/Core/Flagged.h @@ -58,51 +58,51 @@ template clas inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {} - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } - inline int outerStride() const { return m_matrix.outerStride(); } - inline int innerStride() const { return m_matrix.innerStride(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + inline Index outerStride() const { return m_matrix.outerStride(); } + inline Index innerStride() const { return m_matrix.innerStride(); } - inline const Scalar coeff(int row, int col) const + inline const Scalar coeff(Index row, Index col) const { return m_matrix.coeff(row, col); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_matrix.const_cast_derived().coeffRef(row, col); } - inline const Scalar coeff(int index) const + inline const Scalar coeff(Index index) const { return m_matrix.coeff(index); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_matrix.const_cast_derived().coeffRef(index); } template - inline const PacketScalar packet(int row, int col) const + inline const PacketScalar packet(Index row, Index col) const { return m_matrix.template packet(row, col); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { m_matrix.const_cast_derived().template writePacket(row, col, x); } template - inline const PacketScalar packet(int index) const + inline const PacketScalar packet(Index index) const { return m_matrix.template packet(index); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { m_matrix.const_cast_derived().template writePacket(index, x); } diff --git a/Eigen/src/Core/ForceAlignedAccess.h b/Eigen/src/Core/ForceAlignedAccess.h index eedd57751..7db138b41 100644 --- a/Eigen/src/Core/ForceAlignedAccess.h +++ b/Eigen/src/Core/ForceAlignedAccess.h @@ -50,51 +50,51 @@ template class ForceAlignedAccess inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} - inline int rows() const { return m_expression.rows(); } - inline int cols() const { return m_expression.cols(); } - inline int outerStride() const { return m_expression.outerStride(); } - inline int innerStride() const { return m_expression.innerStride(); } + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } - inline const CoeffReturnType coeff(int row, int col) const + inline const CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } - inline const CoeffReturnType coeff(int index) const + inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template - inline const PacketScalar packet(int row, int col) const + inline const PacketScalar packet(Index row, Index col) const { return m_expression.template packet(row, col); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(row, col, x); } template - inline const PacketScalar packet(int index) const + inline const PacketScalar packet(Index index) const { return m_expression.template packet(index); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(index, x); } diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h index a42f36b1b..d559deedf 100644 --- a/Eigen/src/Core/Functors.h +++ b/Eigen/src/Core/Functors.h @@ -464,8 +464,10 @@ struct ei_scalar_constant_op { typedef typename ei_packet_traits::type PacketScalar; EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { } EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { } - EIGEN_STRONG_INLINE const Scalar operator() (int, int = 0) const { return m_other; } - EIGEN_STRONG_INLINE const PacketScalar packetOp(int, int = 0) const { return ei_pset1(m_other); } + template + EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(Index, Index = 0) const { return ei_pset1(m_other); } const Scalar m_other; }; template @@ -474,7 +476,8 @@ struct ei_functor_traits > template struct ei_scalar_identity_op { EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_identity_op) - EIGEN_STRONG_INLINE const Scalar operator() (int row, int col) const { return row==col ? Scalar(1) : Scalar(0); } + template + EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); } }; template struct ei_functor_traits > @@ -497,8 +500,10 @@ struct ei_linspaced_op_impl m_packetStep(ei_pset1(ei_packet_traits::size*step)), m_base(ei_padd(ei_pset1(low),ei_pmul(ei_pset1(step),ei_plset(-ei_packet_traits::size)))) {} - EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; } - EIGEN_STRONG_INLINE const PacketScalar packetOp(int) const { return m_base = ei_padd(m_base,m_packetStep); } + template + EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(Index) const { return m_base = ei_padd(m_base,m_packetStep); } const Scalar m_low; const Scalar m_step; @@ -518,8 +523,10 @@ struct ei_linspaced_op_impl m_low(low), m_step(step), m_lowPacket(ei_pset1(m_low)), m_stepPacket(ei_pset1(m_step)), m_interPacket(ei_plset(0)) {} - EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; } - EIGEN_STRONG_INLINE const PacketScalar packetOp(int i) const + template + EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(Index i) const { return ei_padd(m_lowPacket, ei_pmul(m_stepPacket, ei_padd(ei_pset1(i),m_interPacket))); } const Scalar m_low; @@ -541,8 +548,10 @@ template struct ei_linspaced_op { typedef typename ei_packet_traits::type PacketScalar; ei_linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {} - EIGEN_STRONG_INLINE const Scalar operator() (int i, int = 0) const { return impl(i); } - EIGEN_STRONG_INLINE const PacketScalar packetOp(int i, int = 0) const { return impl.packetOp(i); } + template + EIGEN_STRONG_INLINE const Scalar operator() (Index i, Index = 0) const { return impl(i); } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(Index i, Index = 0) const { return impl.packetOp(i); } // This proxy object handles the actual required temporaries, the different // implementations (random vs. sequential access) as well as the piping // correct piping to size 2/4 packet operations. diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h index 432da4288..299640911 100644 --- a/Eigen/src/Core/Fuzzy.h +++ b/Eigen/src/Core/Fuzzy.h @@ -201,13 +201,14 @@ template struct ei_fuzzy_selector { typedef typename Derived::RealScalar RealScalar; + typedef typename Derived::Index Index; static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec) { EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); typename Derived::Nested nested(self); typename OtherDerived::Nested otherNested(other); - for(int i = 0; i < self.cols(); ++i) + for(Index i = 0; i < self.cols(); ++i) if((nested.col(i) - otherNested.col(i)).squaredNorm() > std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec) return false; @@ -216,7 +217,7 @@ struct ei_fuzzy_selector static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec) { typename Derived::Nested nested(self); - for(int i = 0; i < self.cols(); ++i) + for(Index i = 0; i < self.cols(); ++i) if(nested.col(i).squaredNorm() > ei_abs2(other * prec)) return false; return true; @@ -227,7 +228,7 @@ struct ei_fuzzy_selector ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); typename Derived::Nested nested(self); typename OtherDerived::Nested otherNested(other); - for(int i = 0; i < self.cols(); ++i) + for(Index i = 0; i < self.cols(); ++i) if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec) return false; return true; diff --git a/Eigen/src/Core/IO.h b/Eigen/src/Core/IO.h index 3da92d21a..f9b2f083e 100644 --- a/Eigen/src/Core/IO.h +++ b/Eigen/src/Core/IO.h @@ -157,8 +157,9 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm { const typename Derived::Nested m = _m; typedef typename Derived::Scalar Scalar; + typedef typename Derived::Index Index; - int width = 0; + Index width = 0; std::streamsize explicit_precision; if(fmt.precision == StreamPrecision) @@ -185,26 +186,26 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm if(align_cols) { // compute the largest width - for(int j = 1; j < m.cols(); ++j) - for(int i = 0; i < m.rows(); ++i) + for(Index j = 1; j < m.cols(); ++j) + for(Index i = 0; i < m.rows(); ++i) { std::stringstream sstr; if(explicit_precision) sstr.precision(explicit_precision); sstr << m.coeff(i,j); - width = std::max(width, int(sstr.str().length())); + width = std::max(width, Index(sstr.str().length())); } } std::streamsize old_precision = 0; if(explicit_precision) old_precision = s.precision(explicit_precision); s << fmt.matPrefix; - for(int i = 0; i < m.rows(); ++i) + for(Index i = 0; i < m.rows(); ++i) { if (i) s << fmt.rowSpacer; s << fmt.rowPrefix; if(width) s.width(width); s << m.coeff(i, 0); - for(int j = 1; j < m.cols(); ++j) + for(Index j = 1; j < m.cols(); ++j) { s << fmt.coeffSeparator; if (width) s.width(width); diff --git a/Eigen/src/Core/Map.h b/Eigen/src/Core/Map.h index a22779bf9..6ca24b77d 100644 --- a/Eigen/src/Core/Map.h +++ b/Eigen/src/Core/Map.h @@ -109,12 +109,12 @@ template class Ma EIGEN_DENSE_PUBLIC_INTERFACE(Map) - inline int innerStride() const + inline Index innerStride() const { return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; } - inline int outerStride() const + inline Index outerStride() const { return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() : IsVectorAtCompileTime ? this->size() @@ -139,7 +139,7 @@ template class Ma * \param size the size of the vector expression * \param stride optional Stride object, passing the strides. */ - inline Map(const Scalar* data, int size, const StrideType& stride = StrideType()) + inline Map(const Scalar* data, Index size, const StrideType& stride = StrideType()) : Base(data, size), m_stride(stride) { PlainObjectType::Base::_check_template_params(); @@ -152,7 +152,7 @@ template class Ma * \param cols the number of columns of the matrix expression * \param stride optional Stride object, passing the strides. */ - inline Map(const Scalar* data, int rows, int cols, const StrideType& stride = StrideType()) + inline Map(const Scalar* data, Index rows, Index cols, const StrideType& stride = StrideType()) : Base(data, rows, cols), m_stride(stride) { PlainObjectType::Base::_check_template_params(); diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h index 08b81c134..8cdd452ac 100644 --- a/Eigen/src/Core/MapBase.h +++ b/Eigen/src/Core/MapBase.h @@ -44,8 +44,13 @@ template class MapBase SizeAtCompileTime = Base::SizeAtCompileTime }; + + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; typedef typename ei_traits::Scalar Scalar; - typedef typename Base::PacketScalar PacketScalar; + typedef typename ei_packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + using Base::derived; // using Base::RowsAtCompileTime; // using Base::ColsAtCompileTime; @@ -82,8 +87,8 @@ template class MapBase typedef typename Base::CoeffReturnType CoeffReturnType; - inline int rows() const { return m_rows.value(); } - inline int cols() const { return m_cols.value(); } + inline Index rows() const { return m_rows.value(); } + inline Index cols() const { return m_cols.value(); } /** Returns a pointer to the first coefficient of the matrix or vector. * @@ -93,50 +98,50 @@ template class MapBase */ inline const Scalar* data() const { return m_data; } - inline const Scalar& coeff(int row, int col) const + inline const Scalar& coeff(Index row, Index col) const { return m_data[col * colStride() + row * rowStride()]; } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return const_cast(m_data)[col * colStride() + row * rowStride()]; } - inline const Scalar& coeff(int index) const + inline const Scalar& coeff(Index index) const { ei_assert(Derived::IsVectorAtCompileTime || (ei_traits::Flags & LinearAccessBit)); return m_data[index * innerStride()]; } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { ei_assert(Derived::IsVectorAtCompileTime || (ei_traits::Flags & LinearAccessBit)); return const_cast(m_data)[index * innerStride()]; } template - inline PacketScalar packet(int row, int col) const + inline PacketScalar packet(Index row, Index col) const { return ei_ploadt (m_data + (col * colStride() + row * rowStride())); } template - inline PacketScalar packet(int index) const + inline PacketScalar packet(Index index) const { return ei_ploadt(m_data + index * innerStride()); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { ei_pstoret (const_cast(m_data) + (col * colStride() + row * rowStride()), x); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { ei_pstoret (const_cast(m_data) + index * innerStride(), x); @@ -148,10 +153,10 @@ template class MapBase checkSanity(); } - inline MapBase(const Scalar* data, int size) + inline MapBase(const Scalar* data, Index size) : m_data(data), - m_rows(RowsAtCompileTime == Dynamic ? size : RowsAtCompileTime), - m_cols(ColsAtCompileTime == Dynamic ? size : ColsAtCompileTime) + m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)), + m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime)) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) ei_assert(size >= 0); @@ -159,7 +164,7 @@ template class MapBase checkSanity(); } - inline MapBase(const Scalar* data, int rows, int cols) + inline MapBase(const Scalar* data, Index rows, Index cols) : m_data(data), m_rows(rows), m_cols(cols) { ei_assert( (data == 0) @@ -187,8 +192,8 @@ template class MapBase } const Scalar* EIGEN_RESTRICT m_data; - const ei_int_if_dynamic m_rows; - const ei_int_if_dynamic m_cols; + const ei_variable_if_dynamic m_rows; + const ei_variable_if_dynamic m_cols; }; #endif // EIGEN_MAPBASE_H diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h index cc77799d7..53e576258 100644 --- a/Eigen/src/Core/MathFunctions.h +++ b/Eigen/src/Core/MathFunctions.h @@ -657,7 +657,7 @@ struct ei_pow_default_impl { static inline Scalar run(Scalar x, Scalar y) { - int res = 1; + Scalar res = 1; ei_assert(!NumTraits::IsSigned || y >= 0); if(y & 1) res *= x; y >>= 1; diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h index c61a3f3f5..4407b0db1 100644 --- a/Eigen/src/Core/Matrix.h +++ b/Eigen/src/Core/Matrix.h @@ -206,7 +206,7 @@ class Matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * - * \sa resize(int,int) + * \sa resize(Index,Index) */ EIGEN_STRONG_INLINE explicit Matrix() : Base() { @@ -225,7 +225,7 @@ class Matrix * it is redundant to pass the dimension here, so it makes more sense to use the default * constructor Matrix() instead. */ - EIGEN_STRONG_INLINE explicit Matrix(int dim) + EIGEN_STRONG_INLINE explicit Matrix(Index dim) : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) { Base::_check_template_params(); @@ -248,7 +248,7 @@ class Matrix * This is useful for dynamic-size matrices. For fixed-size matrices, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. */ - Matrix(int rows, int cols); + Matrix(Index rows, Index cols); /** \brief Constructs an initialized 2D vector with given coefficients */ Matrix(const Scalar& x, const Scalar& y); #endif @@ -321,8 +321,8 @@ class Matrix void swap(MatrixBase EIGEN_REF_TO_TEMPORARY other) { this->_swap(other.derived()); } - inline int innerStride() const { return 1; } - inline int outerStride() const { return this->innerSize(); } + inline Index innerStride() const { return 1; } + inline Index outerStride() const { return this->innerSize(); } /////////// Geometry module /////////// diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 9e2afe7e4..633b0106e 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -56,14 +56,14 @@ template class MatrixBase { public: #ifndef EIGEN_PARSED_BY_DOXYGEN - /** The base class for a given storage type. */ typedef MatrixBase StorageBaseType; - + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; typedef typename ei_traits::Scalar Scalar; typedef typename ei_packet_traits::type PacketScalar; - + typedef typename NumTraits::Real RealScalar; + typedef DenseBase Base; - using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; @@ -97,14 +97,6 @@ template class MatrixBase #ifndef EIGEN_PARSED_BY_DOXYGEN - /** This is the "real scalar" type; if the \a Scalar type is already real numbers - * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If - * \a Scalar is \a std::complex then RealScalar is \a T. - * - * \sa class NumTraits - */ - typedef typename NumTraits::Real RealScalar; - /** type of the equivalent square matrix */ typedef Matrix SquareMatrixType; @@ -112,7 +104,7 @@ template class MatrixBase /** \returns the size of the main diagonal, which is min(rows(),cols()). * \sa rows(), cols(), SizeAtCompileTime. */ - inline int diagonalSize() const { return std::min(rows(),cols()); } + inline Index diagonalSize() const { return std::min(rows(),cols()); } /** \brief The plain matrix type corresponding to this expression. * @@ -211,8 +203,8 @@ template class MatrixBase template Diagonal diagonal(); template const Diagonal diagonal() const; - Diagonal diagonal(int index); - const Diagonal diagonal(int index) const; + Diagonal diagonal(Index index); + const Diagonal diagonal(Index index) const; template TriangularView part(); template const TriangularView part() const; @@ -224,9 +216,9 @@ template class MatrixBase template const SelfAdjointView selfadjointView() const; static const IdentityReturnType Identity(); - static const IdentityReturnType Identity(int rows, int cols); - static const BasisReturnType Unit(int size, int i); - static const BasisReturnType Unit(int i); + static const IdentityReturnType Identity(Index rows, Index cols); + static const BasisReturnType Unit(Index size, Index i); + static const BasisReturnType Unit(Index i); static const BasisReturnType UnitX(); static const BasisReturnType UnitY(); static const BasisReturnType UnitZ(); @@ -235,7 +227,7 @@ template class MatrixBase const DiagonalWrapper asDiagonal() const; Derived& setIdentity(); - Derived& setIdentity(int rows, int cols); + Derived& setIdentity(Index rows, Index cols); bool isIdentity(RealScalar prec = NumTraits::dummy_precision()) const; bool isDiagonal(RealScalar prec = NumTraits::dummy_precision()) const; @@ -329,7 +321,7 @@ template class MatrixBase template PlainObject cross3(const MatrixBase& other) const; PlainObject unitOrthogonal(void) const; - Matrix eulerAngles(int a0, int a1, int a2) const; + Matrix eulerAngles(Index a0, Index a1, Index a2) const; const ScalarMultipleReturnType operator*(const UniformScaling& s) const; enum { SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 @@ -362,9 +354,9 @@ template class MatrixBase ///////// Jacobi module ///////// template - void applyOnTheLeft(int p, int q, const PlanarRotation& j); + void applyOnTheLeft(Index p, Index q, const PlanarRotation& j); template - void applyOnTheRight(int p, int q, const PlanarRotation& j); + void applyOnTheRight(Index p, Index q, const PlanarRotation& j); ///////// MatrixFunctions module ///////// @@ -398,17 +390,17 @@ template class MatrixBase inline const Cwise cwise() const; inline Cwise cwise(); - VectorBlock start(int size); - const VectorBlock start(int size) const; - VectorBlock end(int size); - const VectorBlock end(int size) const; + VectorBlock start(Index size); + const VectorBlock start(Index size) const; + VectorBlock end(Index size); + const VectorBlock end(Index size) const; template VectorBlock start(); template const VectorBlock start() const; template VectorBlock end(); template const VectorBlock end() const; - Minor minor(int row, int col); - const Minor minor(int row, int col) const; + Minor minor(Index row, Index col); + const Minor minor(Index row, Index col) const; #endif protected: diff --git a/Eigen/src/Core/MatrixStorage.h b/Eigen/src/Core/MatrixStorage.h index f1b92ae13..aff83a64c 100644 --- a/Eigen/src/Core/MatrixStorage.h +++ b/Eigen/src/Core/MatrixStorage.h @@ -97,12 +97,12 @@ template class ei_matr inline explicit ei_matrix_storage() {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(ei_constructor_without_unaligned_array_assert()) {} - inline ei_matrix_storage(int,int,int) {} + inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {} inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); } - inline static int rows(void) {return _Rows;} - inline static int cols(void) {return _Cols;} - inline void conservativeResize(int,int,int) {} - inline void resize(int,int,int) {} + inline static DenseIndex rows(void) {return _Rows;} + inline static DenseIndex cols(void) {return _Cols;} + inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} + inline void resize(DenseIndex,DenseIndex,DenseIndex) {} inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; @@ -113,12 +113,12 @@ template class ei_matrix_storage public: inline explicit ei_matrix_storage() {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) {} - inline ei_matrix_storage(int,int,int) {} + inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {} inline void swap(ei_matrix_storage& ) {} - inline static int rows(void) {return _Rows;} - inline static int cols(void) {return _Cols;} - inline void conservativeResize(int,int,int) {} - inline void resize(int,int,int) {} + inline static DenseIndex rows(void) {return _Rows;} + inline static DenseIndex cols(void) {return _Cols;} + inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} + inline void resize(DenseIndex,DenseIndex,DenseIndex) {} inline const T *data() const { return 0; } inline T *data() { return 0; } }; @@ -127,19 +127,19 @@ template class ei_matrix_storage template class ei_matrix_storage { ei_matrix_array m_data; - int m_rows; - int m_cols; + DenseIndex m_rows; + DenseIndex m_cols; public: inline explicit ei_matrix_storage() : m_rows(0), m_cols(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} - inline ei_matrix_storage(int, int rows, int cols) : m_rows(rows), m_cols(cols) {} + inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {} inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } - inline int rows(void) const {return m_rows;} - inline int cols(void) const {return m_cols;} - inline void conservativeResize(int, int rows, int cols) { m_rows = rows; m_cols = cols; } - inline void resize(int, int rows, int cols) { m_rows = rows; m_cols = cols; } + inline DenseIndex rows(void) const {return m_rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; } + inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; } inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; @@ -148,17 +148,17 @@ template class ei_matrix_storage class ei_matrix_storage { ei_matrix_array m_data; - int m_rows; + DenseIndex m_rows; public: inline explicit ei_matrix_storage() : m_rows(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0) {} - inline ei_matrix_storage(int, int rows, int) : m_rows(rows) {} + inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {} inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } - inline int rows(void) const {return m_rows;} - inline int cols(void) const {return _Cols;} - inline void conservativeResize(int, int rows, int) { m_rows = rows; } - inline void resize(int, int rows, int) { m_rows = rows; } + inline DenseIndex rows(void) const {return m_rows;} + inline DenseIndex cols(void) const {return _Cols;} + inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; } + inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; } inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; @@ -167,17 +167,17 @@ template class ei_matrix_storage< template class ei_matrix_storage { ei_matrix_array m_data; - int m_cols; + DenseIndex m_cols; public: inline explicit ei_matrix_storage() : m_cols(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(ei_constructor_without_unaligned_array_assert()), m_cols(0) {} - inline ei_matrix_storage(int, int, int cols) : m_cols(cols) {} + inline ei_matrix_storage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {} inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } - inline int rows(void) const {return _Rows;} - inline int cols(void) const {return m_cols;} - inline void conservativeResize(int, int, int cols) { m_cols = cols; } - inline void resize(int, int, int cols) { m_cols = cols; } + inline DenseIndex rows(void) const {return _Rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; } + inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; } inline const T *data() const { return m_data.array; } inline T *data() { return m_data.array; } }; @@ -186,27 +186,27 @@ template class ei_matrix_storage< template class ei_matrix_storage { T *m_data; - int m_rows; - int m_cols; + DenseIndex m_rows; + DenseIndex m_cols; public: inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0), m_cols(0) {} - inline ei_matrix_storage(int size, int rows, int cols) + inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex cols) : m_data(ei_conditional_aligned_new(size)), m_rows(rows), m_cols(cols) { EIGEN_INT_DEBUG_MATRIX_CTOR } inline ~ei_matrix_storage() { ei_conditional_aligned_delete(m_data, m_rows*m_cols); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } - inline int rows(void) const {return m_rows;} - inline int cols(void) const {return m_cols;} - inline void conservativeResize(int size, int rows, int cols) + inline DenseIndex rows(void) const {return m_rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols) { m_data = ei_conditional_aligned_realloc_new(m_data, size, m_rows*m_cols); m_rows = rows; m_cols = cols; } - void resize(int size, int rows, int cols) + void resize(DenseIndex size, DenseIndex rows, DenseIndex cols) { if(size != m_rows*m_cols) { @@ -228,22 +228,22 @@ template class ei_matrix_storage class ei_matrix_storage { T *m_data; - int m_cols; + DenseIndex m_cols; public: inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} - inline ei_matrix_storage(int size, int, int cols) : m_data(ei_conditional_aligned_new(size)), m_cols(cols) + inline ei_matrix_storage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(ei_conditional_aligned_new(size)), m_cols(cols) { EIGEN_INT_DEBUG_MATRIX_CTOR } inline ~ei_matrix_storage() { ei_conditional_aligned_delete(m_data, _Rows*m_cols); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } - inline static int rows(void) {return _Rows;} - inline int cols(void) const {return m_cols;} - inline void conservativeResize(int size, int, int cols) + inline static DenseIndex rows(void) {return _Rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols) { m_data = ei_conditional_aligned_realloc_new(m_data, size, _Rows*m_cols); m_cols = cols; } - void resize(int size, int, int cols) + void resize(DenseIndex size, DenseIndex, DenseIndex cols) { if(size != _Rows*m_cols) { @@ -264,22 +264,22 @@ template class ei_matrix_storage class ei_matrix_storage { T *m_data; - int m_rows; + DenseIndex m_rows; public: inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} - inline ei_matrix_storage(int size, int rows, int) : m_data(ei_conditional_aligned_new(size)), m_rows(rows) + inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(ei_conditional_aligned_new(size)), m_rows(rows) { EIGEN_INT_DEBUG_MATRIX_CTOR } inline ~ei_matrix_storage() { ei_conditional_aligned_delete(m_data, _Cols*m_rows); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } - inline int rows(void) const {return m_rows;} - inline static int cols(void) {return _Cols;} - inline void conservativeResize(int size, int rows, int) + inline DenseIndex rows(void) const {return m_rows;} + inline static DenseIndex cols(void) {return _Cols;} + inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex) { m_data = ei_conditional_aligned_realloc_new(m_data, size, m_rows*_Cols); m_rows = rows; } - void resize(int size, int rows, int) + void resize(DenseIndex size, DenseIndex rows, DenseIndex) { if(size != m_rows*_Cols) { diff --git a/Eigen/src/Core/NestByValue.h b/Eigen/src/Core/NestByValue.h index a8ca28e0a..ececf2734 100644 --- a/Eigen/src/Core/NestByValue.h +++ b/Eigen/src/Core/NestByValue.h @@ -51,51 +51,51 @@ template class NestByValue inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} - inline int rows() const { return m_expression.rows(); } - inline int cols() const { return m_expression.cols(); } - inline int outerStride() const { return m_expression.outerStride(); } - inline int innerStride() const { return m_expression.innerStride(); } + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } - inline const CoeffReturnType coeff(int row, int col) const + inline const CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } - inline const CoeffReturnType coeff(int index) const + inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template - inline const PacketScalar packet(int row, int col) const + inline const PacketScalar packet(Index row, Index col) const { return m_expression.template packet(row, col); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(row, col, x); } template - inline const PacketScalar packet(int index) const + inline const PacketScalar packet(Index index) const { return m_expression.template packet(index); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket(index, x); } diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index 156f043b7..93e978779 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -216,10 +216,11 @@ class GeneralProduct template<> struct ei_outer_product_selector { template EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; // FIXME make sure lhs is sequentially stored // FIXME not very good if rhs is real and lhs complex while alpha is real too - const int cols = dest.cols(); - for (int j=0; j struct ei_outer_product_selector { template<> struct ei_outer_product_selector { template EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; // FIXME make sure rhs is sequentially stored // FIXME not very good if lhs is real and rhs complex while alpha is real too - const int rows = dest.rows(); - for (int i=0; i struct ei_gemv_selector template static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; // TODO makes sure dest is sequentially stored in memory, otherwise use a temp - const int size = prod.rhs().rows(); - for(int k=0; k struct ei_gemv_selector template static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp - const int rows = prod.rows(); - for(int i=0; i && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } - inline int rows() const { return m_lhs.rows(); } - inline int cols() const { return m_rhs.cols(); } + inline Index rows() const { return m_lhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } template inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } @@ -133,7 +133,7 @@ class ProductBase : public MatrixBase const Diagonal diagonal() const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } - const Diagonal diagonal(int index) const + const Diagonal diagonal(Index index) const { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); } protected: @@ -146,10 +146,10 @@ class ProductBase : public MatrixBase private: // discard coeff methods - void coeff(int,int) const; - void coeffRef(int,int); - void coeff(int) const; - void coeffRef(int); + void coeff(Index,Index) const; + void coeffRef(Index,Index); + void coeff(Index) const; + void coeffRef(Index); }; // here we need to overload the nested rule for products diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h index ef0f76043..3fd5de74c 100644 --- a/Eigen/src/Core/Redux.h +++ b/Eigen/src/Core/Redux.h @@ -176,15 +176,16 @@ template struct ei_redux_impl { typedef typename Derived::Scalar Scalar; + typedef typename Derived::Index Index; static Scalar run(const Derived& mat, const Func& func) { ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix"); Scalar res; res = mat.coeffByOuterInner(0, 0); - for(int i = 1; i < mat.innerSize(); ++i) + for(Index i = 1; i < mat.innerSize(); ++i) res = func(res, mat.coeffByOuterInner(0, i)); - for(int i = 1; i < mat.outerSize(); ++i) - for(int j = 0; j < mat.innerSize(); ++j) + for(Index i = 1; i < mat.outerSize(); ++i) + for(Index j = 0; j < mat.innerSize(); ++j) res = func(res, mat.coeffByOuterInner(i, j)); return res; } @@ -200,37 +201,38 @@ struct ei_redux_impl { typedef typename Derived::Scalar Scalar; typedef typename ei_packet_traits::type PacketScalar; + typedef typename Derived::Index Index; static Scalar run(const Derived& mat, const Func& func) { - const int size = mat.size(); - const int packetSize = ei_packet_traits::size; - const int alignedStart = ei_first_aligned(mat); + const Index size = mat.size(); + const Index packetSize = ei_packet_traits::size; + const Index alignedStart = ei_first_aligned(mat); enum { alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit) ? Aligned : Unaligned }; - const int alignedSize = ((size-alignedStart)/packetSize)*packetSize; - const int alignedEnd = alignedStart + alignedSize; + const Index alignedSize = ((size-alignedStart)/packetSize)*packetSize; + const Index alignedEnd = alignedStart + alignedSize; Scalar res; if(alignedSize) { PacketScalar packet_res = mat.template packet(alignedStart); - for(int index = alignedStart + packetSize; index < alignedEnd; index += packetSize) + for(Index index = alignedStart + packetSize; index < alignedEnd; index += packetSize) packet_res = func.packetOp(packet_res, mat.template packet(index)); res = func.predux(packet_res); - for(int index = 0; index < alignedStart; ++index) + for(Index index = 0; index < alignedStart; ++index) res = func(res,mat.coeff(index)); - for(int index = alignedEnd; index < size; ++index) + for(Index index = alignedEnd; index < size; ++index) res = func(res,mat.coeff(index)); } else // too small to vectorize anything. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. { res = mat.coeff(0); - for(int index = 1; index < size; ++index) + for(Index index = 1; index < size; ++index) res = func(res,mat.coeff(index)); } @@ -243,26 +245,27 @@ struct ei_redux_impl { typedef typename Derived::Scalar Scalar; typedef typename ei_packet_traits::type PacketScalar; + typedef typename Derived::Index Index; static Scalar run(const Derived& mat, const Func& func) { - const int innerSize = mat.innerSize(); - const int outerSize = mat.outerSize(); + const Index innerSize = mat.innerSize(); + const Index outerSize = mat.outerSize(); enum { packetSize = ei_packet_traits::size }; - const int packetedInnerSize = ((innerSize)/packetSize)*packetSize; + const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize; Scalar res; if(packetedInnerSize) { PacketScalar packet_res = mat.template packet(0,0); - for(int j=0; j(j,i)); res = func.predux(packet_res); - for(int j=0; j class ReturnByValue { public: typedef typename ei_traits::ReturnType ReturnType; + typedef typename ei_dense_xpr_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) template inline void evalTo(Dest& dst) const { static_cast(this)->evalTo(dst); } - inline int rows() const { return static_cast(this)->rows(); } - inline int cols() const { return static_cast(this)->cols(); } + inline Index rows() const { return static_cast(this)->rows(); } + inline Index cols() const { return static_cast(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT @@ -72,10 +73,10 @@ template class ReturnByValue Unusable(const Unusable&) {} Unusable& operator=(const Unusable&) {return *this;} }; - const Unusable& coeff(int) const { return *reinterpret_cast(this); } - const Unusable& coeff(int,int) const { return *reinterpret_cast(this); } - Unusable& coeffRef(int) { return *reinterpret_cast(this); } - Unusable& coeffRef(int,int) { return *reinterpret_cast(this); } + const Unusable& coeff(Index) const { return *reinterpret_cast(this); } + const Unusable& coeff(Index,Index) const { return *reinterpret_cast(this); } + Unusable& coeffRef(Index) { return *reinterpret_cast(this); } + Unusable& coeffRef(Index,Index) { return *reinterpret_cast(this); } #endif }; diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h index 277108dd4..eed3f9336 100644 --- a/Eigen/src/Core/SelfAdjointView.h +++ b/Eigen/src/Core/SelfAdjointView.h @@ -65,6 +65,8 @@ template class SelfAdjointView typedef TriangularBase Base; typedef typename ei_traits::Scalar Scalar; + typedef typename MatrixType::Index Index; + enum { Mode = ei_traits::Mode }; @@ -73,15 +75,15 @@ template class SelfAdjointView inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) { ei_assert(ei_are_flags_consistent::ret); } - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } - inline int outerStride() const { return m_matrix.outerStride(); } - inline int innerStride() const { return m_matrix.innerStride(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + inline Index outerStride() const { return m_matrix.outerStride(); } + inline Index innerStride() const { return m_matrix.innerStride(); } /** \sa MatrixBase::coeff() * \warning the coordinates must fit into the referenced triangular part */ - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { Base::check_coordinates_internal(row, col); return m_matrix.coeff(row, col); @@ -90,7 +92,7 @@ template class SelfAdjointView /** \sa MatrixBase::coeffRef() * \warning the coordinates must fit into the referenced triangular part */ - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { Base::check_coordinates_internal(row, col); return m_matrix.const_cast_derived().coeffRef(row, col); @@ -230,11 +232,12 @@ struct ei_triangular_assignment_selector struct ei_triangular_assignment_selector { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); ++j) + for(Index j = 0; j < dst.cols(); ++j) { - for(int i = 0; i < j; ++i) + for(Index i = 0; i < j; ++i) { dst.copyCoeff(i, j, src); dst.coeffRef(j,i) = ei_conj(dst.coeff(i,j)); @@ -249,9 +252,10 @@ struct ei_triangular_assignment_selector class SelfCwiseBinaryOp inline SelfCwiseBinaryOp(MatrixType& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {} - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } - inline int outerStride() const { return m_matrix.outerStride(); } - inline int innerStride() const { return m_matrix.innerStride(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + inline Index outerStride() const { return m_matrix.outerStride(); } + inline Index innerStride() const { return m_matrix.innerStride(); } inline const Scalar* data() const { return m_matrix.data(); } // note that this function is needed by assign to correctly align loads/stores // TODO make Assign use .data() - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_matrix.const_cast_derived().coeffRef(row, col); } // note that this function is needed by assign to correctly align loads/stores // TODO make Assign use .data() - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_matrix.const_cast_derived().coeffRef(index); } template - void copyCoeff(int row, int col, const DenseBase& other) + void copyCoeff(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(row >= 0 && row < rows() @@ -86,7 +86,7 @@ template class SelfCwiseBinaryOp } template - void copyCoeff(int index, const DenseBase& other) + void copyCoeff(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(index >= 0 && index < m_matrix.size()); @@ -95,7 +95,7 @@ template class SelfCwiseBinaryOp } template - void copyPacket(int row, int col, const DenseBase& other) + void copyPacket(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(row >= 0 && row < rows() @@ -105,7 +105,7 @@ template class SelfCwiseBinaryOp } template - void copyPacket(int index, const DenseBase& other) + void copyPacket(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(index >= 0 && index < m_matrix.size()); diff --git a/Eigen/src/Core/SolveTriangular.h b/Eigen/src/Core/SolveTriangular.h index f74c6eef1..083c9cea2 100644 --- a/Eigen/src/Core/SolveTriangular.h +++ b/Eigen/src/Core/SolveTriangular.h @@ -56,29 +56,30 @@ struct ei_triangular_solver_selector LhsProductTraits; typedef typename LhsProductTraits::ExtractType ActualLhsType; + typedef typename Lhs::Index Index; enum { IsLower = ((Mode&Lower)==Lower) }; static void run(const Lhs& lhs, Rhs& other) { - static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; ActualLhsType actualLhs = LhsProductTraits::extract(lhs); - const int size = lhs.cols(); - for(int pi=IsLower ? 0 : size; + const Index size = lhs.cols(); + for(Index pi=IsLower ? 0 : size; IsLower ? pi0; IsLower ? pi+=PanelWidth : pi-=PanelWidth) { - int actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); + Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); - int r = IsLower ? pi : size - pi; // remaining size + Index r = IsLower ? pi : size - pi; // remaining size if (r > 0) { // let's directly call the low level product function because: // 1 - it is faster to compile // 2 - it is slighlty faster at runtime - int startRow = IsLower ? pi : pi-actualPanelWidth; - int startCol = IsLower ? 0 : pi; + Index startRow = IsLower ? pi : pi-actualPanelWidth; + Index startCol = IsLower ? 0 : pi; VectorBlock target(other,startRow,actualPanelWidth); ei_cache_friendly_product_rowmajor_times_vector( @@ -87,10 +88,10 @@ struct ei_triangular_solver_selector0) other.coeffRef(i) -= (lhs.row(i).segment(s,k).transpose().cwiseProduct(other.segment(s,k))).sum(); @@ -109,6 +110,7 @@ struct ei_triangular_solver_selector::type Packet; typedef ei_blas_traits LhsProductTraits; typedef typename LhsProductTraits::ExtractType ActualLhsType; + typedef typename Lhs::Index Index; enum { PacketSize = ei_packet_traits::size, IsLower = ((Mode&Lower)==Lower) @@ -116,30 +118,30 @@ struct ei_triangular_solver_selector0; IsLower ? pi+=PanelWidth : pi-=PanelWidth) { - int actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); - int startBlock = IsLower ? pi : pi-actualPanelWidth; - int endBlock = IsLower ? pi + actualPanelWidth : 0; + Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); + Index startBlock = IsLower ? pi : pi-actualPanelWidth; + Index endBlock = IsLower ? pi + actualPanelWidth : 0; - for(int k=0; k0) other.segment(s,r) -= other.coeffRef(i) * Block(lhs, s, i, r, 1); } - int r = IsLower ? size - endBlock : startBlock; // remaining size + Index r = IsLower ? size - endBlock : startBlock; // remaining size if (r > 0) { // let's directly call the low level product function because: @@ -168,7 +170,7 @@ struct ei_triangular_solver_selector +template struct ei_triangular_solve_matrix; // the rhs is a matrix @@ -176,12 +178,13 @@ template struct ei_triangular_solver_selector { typedef typename Rhs::Scalar Scalar; + typedef typename Rhs::Index Index; typedef ei_blas_traits LhsProductTraits; typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType; static void run(const Lhs& lhs, Rhs& rhs) { const ActualLhsType actualLhs = LhsProductTraits::extract(lhs); - ei_triangular_solve_matrix ::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeff(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride()); } diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index c2ce93779..d2bed929b 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h @@ -54,15 +54,15 @@ template inline typename NumTraits::Scalar>::Real MatrixBase::stableNorm() const { - const int blockSize = 4096; + const Index blockSize = 4096; RealScalar scale = 0; RealScalar invScale = 1; RealScalar ssq = 0; // sum of square enum { Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0 }; - int n = size(); - int bi = ei_first_aligned(derived()); + Index n = size(); + Index bi = ei_first_aligned(derived()); if (bi>0) ei_stable_norm_kernel(this->head(bi), ssq, scale, invScale); for (; bi inline typename NumTraits::Scalar>::Real MatrixBase::blueNorm() const { - static int nmax = -1; + static Index nmax = -1; static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr; if(nmax <= 0) { - int nbig, ibeta, it, iemin, iemax, iexp; + Index nbig, ibeta, it, iemin, iemax, iexp; RealScalar abig, eps; // This program calculates the machine-dependent constants // bl, b2, slm, s2m, relerr overfl, nmax @@ -97,7 +97,7 @@ MatrixBase::blueNorm() const // For portability, the PORT subprograms "ilmaeh" and "rlmach" // are used. For any specific computer, each of the assignment // statements can be replaced - nbig = std::numeric_limits::max(); // largest integer + nbig = std::numeric_limits::max(); // largest integer ibeta = std::numeric_limits::radix; // base for floating-point numbers it = std::numeric_limits::digits; // number of base-beta digits in mantissa iemin = std::numeric_limits::min_exponent; // minimum exponent @@ -121,12 +121,12 @@ MatrixBase::blueNorm() const if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n else nmax = nbig; } - int n = size(); + Index n = size(); RealScalar ab2 = b2 / RealScalar(n); RealScalar asml = RealScalar(0); RealScalar amed = RealScalar(0); RealScalar abig = RealScalar(0); - for(int j=0; j ab2) abig += ei_abs2(ax*s2m); diff --git a/Eigen/src/Core/Stride.h b/Eigen/src/Core/Stride.h index d960dd2fc..afae0345e 100644 --- a/Eigen/src/Core/Stride.h +++ b/Eigen/src/Core/Stride.h @@ -86,8 +86,8 @@ class Stride inline int inner() const { return m_inner.value(); } protected: - ei_int_if_dynamic m_outer; - ei_int_if_dynamic m_inner; + ei_variable_if_dynamic m_outer; + ei_variable_if_dynamic m_inner; }; /** \brief Convenience specialization of Stride to specify only an inner stride */ diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h index 02ff8adc5..8e5994aa9 100644 --- a/Eigen/src/Core/Swap.h +++ b/Eigen/src/Core/Swap.h @@ -45,23 +45,23 @@ template class SwapWrapper inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {} - inline int rows() const { return m_expression.rows(); } - inline int cols() const { return m_expression.cols(); } - inline int outerStride() const { return m_expression.outerStride(); } - inline int innerStride() const { return m_expression.innerStride(); } + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template - void copyCoeff(int row, int col, const DenseBase& other) + void copyCoeff(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(row >= 0 && row < rows() @@ -72,7 +72,7 @@ template class SwapWrapper } template - void copyCoeff(int index, const DenseBase& other) + void copyCoeff(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(index >= 0 && index < m_expression.size()); @@ -82,7 +82,7 @@ template class SwapWrapper } template - void copyPacket(int row, int col, const DenseBase& other) + void copyPacket(Index row, Index col, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(row >= 0 && row < rows() @@ -95,7 +95,7 @@ template class SwapWrapper } template - void copyPacket(int index, const DenseBase& other) + void copyPacket(Index index, const DenseBase& other) { OtherDerived& _other = other.const_cast_derived(); ei_internal_assert(index >= 0 && index < m_expression.size()); diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index 6cb86426a..38d942e04 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -72,8 +72,8 @@ template class Transpose EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) - inline int rows() const { return m_matrix.cols(); } - inline int cols() const { return m_matrix.rows(); } + inline Index rows() const { return m_matrix.cols(); } + inline Index cols() const { return m_matrix.rows(); } /** \returns the nested expression */ const typename ei_cleantype::type& @@ -107,51 +107,51 @@ template class TransposeImpl typedef typename ei_TransposeImpl_base::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Transpose) - inline int innerStride() const { return derived().nestedExpression().innerStride(); } - inline int outerStride() const { return derived().nestedExpression().outerStride(); } + inline Index innerStride() const { return derived().nestedExpression().innerStride(); } + inline Index outerStride() const { return derived().nestedExpression().outerStride(); } inline Scalar* data() { return derived().nestedExpression().data(); } inline const Scalar* data() const { return derived().nestedExpression().data(); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return const_cast_derived().nestedExpression().coeffRef(col, row); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return const_cast_derived().nestedExpression().coeffRef(index); } - inline const CoeffReturnType coeff(int row, int col) const + inline const CoeffReturnType coeff(Index row, Index col) const { return derived().nestedExpression().coeff(col, row); } - inline const CoeffReturnType coeff(int index) const + inline const CoeffReturnType coeff(Index index) const { return derived().nestedExpression().coeff(index); } template - inline const PacketScalar packet(int row, int col) const + inline const PacketScalar packet(Index row, Index col) const { return derived().nestedExpression().template packet(col, row); } template - inline void writePacket(int row, int col, const PacketScalar& x) + inline void writePacket(Index row, Index col, const PacketScalar& x) { const_cast_derived().nestedExpression().template writePacket(col, row, x); } template - inline const PacketScalar packet(int index) const + inline const PacketScalar packet(Index index) const { return derived().nestedExpression().template packet(index); } template - inline void writePacket(int index, const PacketScalar& x) + inline void writePacket(Index index, const PacketScalar& x) { const_cast_derived().nestedExpression().template writePacket(index, x); } diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h index 3eb52a5bb..47c11ceb6 100644 --- a/Eigen/src/Core/TriangularMatrix.h +++ b/Eigen/src/Core/TriangularMatrix.h @@ -45,31 +45,33 @@ template class TriangularBase : public EigenBase MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime }; typedef typename ei_traits::Scalar Scalar; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; inline TriangularBase() { ei_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } - inline int rows() const { return derived().rows(); } - inline int cols() const { return derived().cols(); } - inline int outerStride() const { return derived().outerStride(); } - inline int innerStride() const { return derived().innerStride(); } + inline Index rows() const { return derived().rows(); } + inline Index cols() const { return derived().cols(); } + inline Index outerStride() const { return derived().outerStride(); } + inline Index innerStride() const { return derived().innerStride(); } - inline Scalar coeff(int row, int col) const { return derived().coeff(row,col); } - inline Scalar& coeffRef(int row, int col) { return derived().coeffRef(row,col); } + inline Scalar coeff(Index row, Index col) const { return derived().coeff(row,col); } + inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); } /** \see MatrixBase::copyCoeff(row,col) */ template - EIGEN_STRONG_INLINE void copyCoeff(int row, int col, Other& other) + EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other) { derived().coeffRef(row, col) = other.coeff(row, col); } - inline Scalar operator()(int row, int col) const + inline Scalar operator()(Index row, Index col) const { check_coordinates(row, col); return coeff(row,col); } - inline Scalar& operator()(int row, int col) + inline Scalar& operator()(Index row, Index col) { check_coordinates(row, col); return coeffRef(row,col); @@ -87,7 +89,7 @@ template class TriangularBase : public EigenBase protected: - void check_coordinates(int row, int col) + void check_coordinates(Index row, Index col) { EIGEN_ONLY_USED_FOR_DEBUG(row); EIGEN_ONLY_USED_FOR_DEBUG(col); @@ -99,12 +101,12 @@ template class TriangularBase : public EigenBase } #ifdef EIGEN_INTERNAL_DEBUGGING - void check_coordinates_internal(int row, int col) + void check_coordinates_internal(Index row, Index col) { check_coordinates(row, col); } #else - void check_coordinates_internal(int , int ) {} + void check_coordinates_internal(Index , Index ) {} #endif }; @@ -156,6 +158,9 @@ template class TriangularView typedef typename ei_cleantype::type _MatrixTypeNested; using TriangularBase >::evalToLazy; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; + enum { Mode = _Mode, TransposeMode = (Mode & Upper ? Lower : 0) @@ -167,10 +172,10 @@ template class TriangularView inline TriangularView(const MatrixType& matrix) : m_matrix(matrix) { ei_assert(ei_are_flags_consistent::ret); } - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } - inline int outerStride() const { return m_matrix.outerStride(); } - inline int innerStride() const { return m_matrix.innerStride(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + inline Index outerStride() const { return m_matrix.outerStride(); } + inline Index innerStride() const { return m_matrix.innerStride(); } /** \sa MatrixBase::operator+=() */ template TriangularView& operator+=(const Other& other) { return *this = m_matrix + other; } @@ -194,7 +199,7 @@ template class TriangularView /** \sa MatrixBase::coeff() * \warning the coordinates must fit into the referenced triangular part */ - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { Base::check_coordinates_internal(row, col); return m_matrix.coeff(row, col); @@ -203,7 +208,7 @@ template class TriangularView /** \sa MatrixBase::coeffRef() * \warning the coordinates must fit into the referenced triangular part */ - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { Base::check_coordinates_internal(row, col); return m_matrix.const_cast_derived().coeffRef(row, col); @@ -371,15 +376,16 @@ struct ei_triangular_assignment_selector struct ei_triangular_assignment_selector { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); ++j) + for(Index j = 0; j < dst.cols(); ++j) { - int maxi = std::min(j, dst.rows()-1); - for(int i = 0; i <= maxi; ++i) + Index maxi = std::min(j, dst.rows()-1); + for(Index i = 0; i <= maxi; ++i) dst.copyCoeff(i, j, src); if (ClearOpposite) - for(int i = maxi+1; i < dst.rows(); ++i) + for(Index i = maxi+1; i < dst.rows(); ++i) dst.coeffRef(i, j) = 0; } } @@ -388,15 +394,16 @@ struct ei_triangular_assignment_selector struct ei_triangular_assignment_selector { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); ++j) + for(Index j = 0; j < dst.cols(); ++j) { - for(int i = j; i < dst.rows(); ++i) + for(Index i = j; i < dst.rows(); ++i) dst.copyCoeff(i, j, src); - int maxi = std::min(j, dst.rows()); + Index maxi = std::min(j, dst.rows()); if (ClearOpposite) - for(int i = 0; i < maxi; ++i) + for(Index i = 0; i < maxi; ++i) dst.coeffRef(i, j) = 0; } } @@ -405,15 +412,16 @@ struct ei_triangular_assignment_selector struct ei_triangular_assignment_selector { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); ++j) + for(Index j = 0; j < dst.cols(); ++j) { - int maxi = std::min(j, dst.rows()); - for(int i = 0; i < maxi; ++i) + Index maxi = std::min(j, dst.rows()); + for(Index i = 0; i < maxi; ++i) dst.copyCoeff(i, j, src); if (ClearOpposite) - for(int i = maxi; i < dst.rows(); ++i) + for(Index i = maxi; i < dst.rows(); ++i) dst.coeffRef(i, j) = 0; } } @@ -422,15 +430,16 @@ struct ei_triangular_assignment_selector struct ei_triangular_assignment_selector { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); ++j) + for(Index j = 0; j < dst.cols(); ++j) { - for(int i = j+1; i < dst.rows(); ++i) + for(Index i = j+1; i < dst.rows(); ++i) dst.copyCoeff(i, j, src); - int maxi = std::min(j, dst.rows()-1); + Index maxi = std::min(j, dst.rows()-1); if (ClearOpposite) - for(int i = 0; i <= maxi; ++i) + for(Index i = 0; i <= maxi; ++i) dst.coeffRef(i, j) = 0; } } @@ -439,16 +448,17 @@ struct ei_triangular_assignment_selector struct ei_triangular_assignment_selector { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); ++j) + for(Index j = 0; j < dst.cols(); ++j) { - int maxi = std::min(j, dst.rows()); - for(int i = 0; i < maxi; ++i) + Index maxi = std::min(j, dst.rows()); + for(Index i = 0; i < maxi; ++i) dst.copyCoeff(i, j, src); if (ClearOpposite) { - for(int i = maxi+1; i < dst.rows(); ++i) + for(Index i = maxi+1; i < dst.rows(); ++i) dst.coeffRef(i, j) = 0; } } @@ -458,16 +468,17 @@ struct ei_triangular_assignment_selector struct ei_triangular_assignment_selector { + typedef typename Derived1::Index Index; inline static void run(Derived1 &dst, const Derived2 &src) { - for(int j = 0; j < dst.cols(); ++j) + for(Index j = 0; j < dst.cols(); ++j) { - int maxi = std::min(j, dst.rows()); - for(int i = maxi+1; i < dst.rows(); ++i) + Index maxi = std::min(j, dst.rows()); + for(Index i = maxi+1; i < dst.rows(); ++i) dst.copyCoeff(i, j, src); if (ClearOpposite) { - for(int i = 0; i < maxi; ++i) + for(Index i = 0; i < maxi; ++i) dst.coeffRef(i, j) = 0; } } @@ -638,18 +649,18 @@ template bool MatrixBase::isUpperTriangular(RealScalar prec) const { RealScalar maxAbsOnUpperPart = static_cast(-1); - for(int j = 0; j < cols(); ++j) + for(Index j = 0; j < cols(); ++j) { - int maxi = std::min(j, rows()-1); - for(int i = 0; i <= maxi; ++i) + Index maxi = std::min(j, rows()-1); + for(Index i = 0; i <= maxi; ++i) { RealScalar absValue = ei_abs(coeff(i,j)); if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue; } } RealScalar threshold = maxAbsOnUpperPart * prec; - for(int j = 0; j < cols(); ++j) - for(int i = j+1; i < rows(); ++i) + for(Index j = 0; j < cols(); ++j) + for(Index i = j+1; i < rows(); ++i) if(ei_abs(coeff(i, j)) > threshold) return false; return true; } @@ -663,17 +674,17 @@ template bool MatrixBase::isLowerTriangular(RealScalar prec) const { RealScalar maxAbsOnLowerPart = static_cast(-1); - for(int j = 0; j < cols(); ++j) - for(int i = j; i < rows(); ++i) + for(Index j = 0; j < cols(); ++j) + for(Index i = j; i < rows(); ++i) { RealScalar absValue = ei_abs(coeff(i,j)); if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue; } RealScalar threshold = maxAbsOnLowerPart * prec; - for(int j = 1; j < cols(); ++j) + for(Index j = 1; j < cols(); ++j) { - int maxi = std::min(j, rows()-1); - for(int i = 0; i < maxi; ++i) + Index maxi = std::min(j, rows()-1); + for(Index i = 0; i < maxi; ++i) if(ei_abs(coeff(i, j)) > threshold) return false; } return true; diff --git a/Eigen/src/Core/VectorBlock.h b/Eigen/src/Core/VectorBlock.h index adb69b6b4..c3212b825 100644 --- a/Eigen/src/Core/VectorBlock.h +++ b/Eigen/src/Core/VectorBlock.h @@ -34,7 +34,7 @@ * \param Size size of the sub-vector we are taking at compile time (optional) * * This class represents an expression of either a fixed-size or dynamic-size sub-vector. - * It is the return type of DenseBase::segment(int,int) and DenseBase::segment(int) and + * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment(Index) and * most of the time this is the only way it is used. * * However, if you want to directly maniputate sub-vector expressions, @@ -53,7 +53,7 @@ * \include class_FixedVectorBlock.cpp * Output: \verbinclude class_FixedVectorBlock.out * - * \sa class Block, DenseBase::segment(int,int,int,int), DenseBase::segment(int,int) + * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index) */ template struct ei_traits > @@ -81,7 +81,7 @@ template class VectorBlock /** Dynamic-size constructor */ - inline VectorBlock(const VectorType& vector, int start, int size) + inline VectorBlock(const VectorType& vector, Index start, Index size) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start, IsColVector ? size : 1, IsColVector ? 1 : size) @@ -91,7 +91,7 @@ template class VectorBlock /** Fixed-size constructor */ - inline VectorBlock(const VectorType& vector, int start) + inline VectorBlock(const VectorType& vector, Index start) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); @@ -113,20 +113,20 @@ template class VectorBlock * when it is applied to a fixed-size vector, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * - * \sa class Block, segment(int) + * \sa class Block, segment(Index) */ template inline VectorBlock DenseBase - ::segment(int start, int size) + ::segment(Index start, Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), start, size); } -/** This is the const version of segment(int,int).*/ +/** This is the const version of segment(Index,Index).*/ template inline const VectorBlock -DenseBase::segment(int start, int size) const +DenseBase::segment(Index start, Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), start, size); @@ -145,20 +145,20 @@ DenseBase::segment(int start, int size) const * when it is applied to a fixed-size vector, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * - * \sa class Block, block(int,int) + * \sa class Block, block(Index,Index) */ template inline VectorBlock -DenseBase::head(int size) +DenseBase::head(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), 0, size); } -/** This is the const version of head(int).*/ +/** This is the const version of head(Index).*/ template inline const VectorBlock -DenseBase::head(int size) const +DenseBase::head(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), 0, size); @@ -177,20 +177,20 @@ DenseBase::head(int size) const * when it is applied to a fixed-size vector, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * - * \sa class Block, block(int,int) + * \sa class Block, block(Index,Index) */ template inline VectorBlock -DenseBase::tail(int size) +DenseBase::tail(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), this->size() - size, size); } -/** This is the const version of tail(int).*/ +/** This is the const version of tail(Index).*/ template inline const VectorBlock -DenseBase::tail(int size) const +DenseBase::tail(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), this->size() - size, size); @@ -212,17 +212,17 @@ DenseBase::tail(int size) const template template inline VectorBlock -DenseBase::segment(int start) +DenseBase::segment(Index start) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), start); } -/** This is the const version of segment(int).*/ +/** This is the const version of segment(Index).*/ template template inline const VectorBlock -DenseBase::segment(int start) const +DenseBase::segment(Index start) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), start); diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h index e6f02b79b..2e96cfe6b 100644 --- a/Eigen/src/Core/Visitor.h +++ b/Eigen/src/Core/Visitor.h @@ -52,13 +52,14 @@ struct ei_visitor_impl template struct ei_visitor_impl { + typedef typename Derived::Index Index; inline static void run(const Derived& mat, Visitor& visitor) { visitor.init(mat.coeff(0,0), 0, 0); - for(int i = 1; i < mat.rows(); ++i) + for(Index i = 1; i < mat.rows(); ++i) visitor(mat.coeff(i, 0), i, 0); - for(int j = 1; j < mat.cols(); ++j) - for(int i = 0; i < mat.rows(); ++i) + for(Index j = 1; j < mat.cols(); ++j) + for(Index i = 0; i < mat.rows(); ++i) visitor(mat.coeff(i, j), i, j); } }; @@ -70,16 +71,16 @@ struct ei_visitor_impl * \code * struct MyVisitor { * // called for the first coefficient - * void init(const Scalar& value, int i, int j); + * void init(const Scalar& value, Index i, Index j); * // called for all other coefficients - * void operator() (const Scalar& value, int i, int j); + * void operator() (const Scalar& value, Index i, Index j); * }; * \endcode * * \note compared to one or two \em for \em loops, visitors offer automatic * unrolling for small fixed size matrix. * - * \sa minCoeff(int*,int*), maxCoeff(int*,int*), DenseBase::redux() + * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux() */ template template @@ -96,12 +97,14 @@ void DenseBase::visit(Visitor& visitor) const /** \internal * \brief Base class to implement min and max visitors */ -template +template struct ei_coeff_visitor { - int row, col; + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + Index row, col; Scalar res; - inline void init(const Scalar& value, int i, int j) + inline void init(const Scalar& value, Index i, Index j) { res = value; row = i; @@ -112,12 +115,14 @@ struct ei_coeff_visitor /** \internal * \brief Visitor computing the min coefficient with its value and coordinates * - * \sa DenseBase::minCoeff(int*, int*) + * \sa DenseBase::minCoeff(Index*, Index*) */ -template -struct ei_min_coeff_visitor : ei_coeff_visitor +template +struct ei_min_coeff_visitor : ei_coeff_visitor { - void operator() (const Scalar& value, int i, int j) + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + void operator() (const Scalar& value, Index i, Index j) { if(value < this->res) { @@ -138,12 +143,14 @@ struct ei_functor_traits > { /** \internal * \brief Visitor computing the max coefficient with its value and coordinates * - * \sa DenseBase::maxCoeff(int*, int*) + * \sa DenseBase::maxCoeff(Index*, Index*) */ -template -struct ei_max_coeff_visitor : ei_coeff_visitor +template +struct ei_max_coeff_visitor : ei_coeff_visitor { - void operator() (const Scalar& value, int i, int j) + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + void operator() (const Scalar& value, Index i, Index j) { if(value > this->res) { @@ -164,13 +171,13 @@ struct ei_functor_traits > { /** \returns the minimum of all coefficients of *this * and puts in *row and *col its location. * - * \sa DenseBase::minCoeff(int*), DenseBase::maxCoeff(int*,int*), DenseBase::visitor(), DenseBase::minCoeff() + * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff() */ template typename ei_traits::Scalar -DenseBase::minCoeff(int* row, int* col) const +DenseBase::minCoeff(Index* row, Index* col) const { - ei_min_coeff_visitor minVisitor; + ei_min_coeff_visitor minVisitor; this->visit(minVisitor); *row = minVisitor.row; if (col) *col = minVisitor.col; @@ -180,14 +187,14 @@ DenseBase::minCoeff(int* row, int* col) const /** \returns the minimum of all coefficients of *this * and puts in *index its location. * - * \sa DenseBase::minCoeff(int*,int*), DenseBase::maxCoeff(int*,int*), DenseBase::visitor(), DenseBase::minCoeff() + * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff() */ template typename ei_traits::Scalar -DenseBase::minCoeff(int* index) const +DenseBase::minCoeff(Index* index) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - ei_min_coeff_visitor minVisitor; + ei_min_coeff_visitor minVisitor; this->visit(minVisitor); *index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row; return minVisitor.res; @@ -196,13 +203,13 @@ DenseBase::minCoeff(int* index) const /** \returns the maximum of all coefficients of *this * and puts in *row and *col its location. * - * \sa DenseBase::minCoeff(int*,int*), DenseBase::visitor(), DenseBase::maxCoeff() + * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff() */ template typename ei_traits::Scalar -DenseBase::maxCoeff(int* row, int* col) const +DenseBase::maxCoeff(Index* row, Index* col) const { - ei_max_coeff_visitor maxVisitor; + ei_max_coeff_visitor maxVisitor; this->visit(maxVisitor); *row = maxVisitor.row; if (col) *col = maxVisitor.col; @@ -212,14 +219,14 @@ DenseBase::maxCoeff(int* row, int* col) const /** \returns the maximum of all coefficients of *this * and puts in *index its location. * - * \sa DenseBase::maxCoeff(int*,int*), DenseBase::minCoeff(int*,int*), DenseBase::visitor(), DenseBase::maxCoeff() + * \sa DenseBase::maxCoeff(Index*,Index*), DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff() */ template typename ei_traits::Scalar -DenseBase::maxCoeff(int* index) const +DenseBase::maxCoeff(Index* index) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - ei_max_coeff_visitor maxVisitor; + ei_max_coeff_visitor maxVisitor; this->visit(maxVisitor); *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row; return maxVisitor.res; diff --git a/Eigen/src/Core/products/CoeffBasedProduct.h b/Eigen/src/Core/products/CoeffBasedProduct.h index 2f7b32c65..0c39cbd84 100644 --- a/Eigen/src/Core/products/CoeffBasedProduct.h +++ b/Eigen/src/Core/products/CoeffBasedProduct.h @@ -39,10 +39,10 @@ * Note that here the inner-loops should always be unrolled. */ -template +template struct ei_product_coeff_impl; -template +template struct ei_product_packet_impl; template @@ -159,10 +159,10 @@ class CoeffBasedProduct && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } - EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); } - EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); } + EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } - EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const + EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { Scalar res; ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); @@ -172,17 +172,17 @@ class CoeffBasedProduct /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, * which is why we don't set the LinearAccessBit. */ - EIGEN_STRONG_INLINE const Scalar coeff(int index) const + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { Scalar res; - const int row = RowsAtCompileTime == 1 ? 0 : index; - const int col = RowsAtCompileTime == 1 ? index : 0; + const Index row = RowsAtCompileTime == 1 ? 0 : index; + const Index col = RowsAtCompileTime == 1 ? index : 0; ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); return res; } template - EIGEN_STRONG_INLINE const PacketScalar packet(int row, int col) const + EIGEN_STRONG_INLINE const PacketScalar packet(Index row, Index col) const { PacketScalar res; ei_product_packet_impl diagonal() const { return reinterpret_cast(*this); } - template - const Diagonal diagonal() const + template + const Diagonal diagonal() const { return reinterpret_cast(*this); } - const Diagonal diagonal(int index) const + const Diagonal diagonal(Index index) const { return reinterpret_cast(*this).diagonal(index); } protected: @@ -235,20 +235,22 @@ struct ei_nested -struct ei_product_coeff_impl +template +struct ei_product_coeff_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { - ei_product_coeff_impl::run(row, col, lhs, rhs, res); - res += lhs.coeff(row, Index) * rhs.coeff(Index, col); + ei_product_coeff_impl::run(row, col, lhs, rhs, res); + res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col); } }; template struct ei_product_coeff_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { res = lhs.coeff(row, 0) * rhs.coeff(0, col); } @@ -257,11 +259,12 @@ struct ei_product_coeff_impl template struct ei_product_coeff_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) { ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = lhs.coeff(row, 0) * rhs.coeff(0, col); - for(int i = 1; i < lhs.cols(); ++i) + for(Index i = 1; i < lhs.cols(); ++i) res += lhs.coeff(row, i) * rhs.coeff(i, col); } }; @@ -270,43 +273,47 @@ struct ei_product_coeff_impl template struct ei_product_coeff_impl { - EIGEN_STRONG_INLINE static void run(int, int, const Lhs&, const Rhs&, RetScalar&) {} + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index, Index, const Lhs&, const Rhs&, RetScalar&) {} }; /******************************************* *** Scalar path with inner vectorization *** *******************************************/ -template +template struct ei_product_coeff_vectorized_unroller { + typedef typename Lhs::Index Index; enum { PacketSize = ei_packet_traits::size }; - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) { - ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); - pres = ei_padd(pres, ei_pmul( lhs.template packet(row, Index) , rhs.template packet(Index, col) )); + ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); + pres = ei_padd(pres, ei_pmul( lhs.template packet(row, UnrollingIndex) , rhs.template packet(UnrollingIndex, col) )); } }; template struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, PacketScalar> { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) { pres = ei_pmul(lhs.template packet(row, 0) , rhs.template packet(0, col)); } }; -template -struct ei_product_coeff_impl +template +struct ei_product_coeff_impl { typedef typename Lhs::PacketScalar PacketScalar; + typedef typename Lhs::Index Index; enum { PacketSize = ei_packet_traits::size }; - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { PacketScalar pres; - ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); - ei_product_coeff_impl::run(row, col, lhs, rhs, res); + ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); + ei_product_coeff_impl::run(row, col, lhs, rhs, res); res = ei_predux(pres); } }; @@ -314,7 +321,8 @@ struct ei_product_coeff_impl struct ei_product_coeff_vectorized_dyn_selector { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.row(row).cwiseProduct(rhs.col(col)).sum(); } @@ -325,7 +333,8 @@ struct ei_product_coeff_vectorized_dyn_selector template struct ei_product_coeff_vectorized_dyn_selector { - EIGEN_STRONG_INLINE static void run(int /*row*/, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.cwiseProduct(rhs.col(col)).sum(); } @@ -334,7 +343,8 @@ struct ei_product_coeff_vectorized_dyn_selector template struct ei_product_coeff_vectorized_dyn_selector { - EIGEN_STRONG_INLINE static void run(int row, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.row(row).cwiseProduct(rhs).sum(); } @@ -343,7 +353,8 @@ struct ei_product_coeff_vectorized_dyn_selector template struct ei_product_coeff_vectorized_dyn_selector { - EIGEN_STRONG_INLINE static void run(int /*row*/, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.cwiseProduct(rhs).sum(); } @@ -352,7 +363,8 @@ struct ei_product_coeff_vectorized_dyn_selector template struct ei_product_coeff_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { ei_product_coeff_vectorized_dyn_selector::run(row, col, lhs, rhs, res); } @@ -362,30 +374,33 @@ struct ei_product_coeff_impl -struct ei_product_packet_impl +template +struct ei_product_packet_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { - ei_product_packet_impl::run(row, col, lhs, rhs, res); - res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packet(Index, col), res); + ei_product_packet_impl::run(row, col, lhs, rhs, res); + res = ei_pmadd(ei_pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet(UnrollingIndex, col), res); } }; -template -struct ei_product_packet_impl +template +struct ei_product_packet_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { - ei_product_packet_impl::run(row, col, lhs, rhs, res); - res = ei_pmadd(lhs.template packet(row, Index), ei_pset1(rhs.coeff(Index, col)), res); + ei_product_packet_impl::run(row, col, lhs, rhs, res); + res = ei_pmadd(lhs.template packet(row, UnrollingIndex), ei_pset1(rhs.coeff(UnrollingIndex, col)), res); } }; template struct ei_product_packet_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); } @@ -394,7 +409,8 @@ struct ei_product_packet_impl template struct ei_product_packet_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { res = ei_pmul(lhs.template packet(row, 0), ei_pset1(rhs.coeff(0, col))); } @@ -403,11 +419,12 @@ struct ei_product_packet_impl template struct ei_product_packet_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) { ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); - for(int i = 1; i < lhs.cols(); ++i) + for(Index i = 1; i < lhs.cols(); ++i) res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); } }; @@ -415,11 +432,12 @@ struct ei_product_packet_impl struct ei_product_packet_impl { - EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) + typedef typename Lhs::Index Index; + EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) { ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); res = ei_pmul(lhs.template packet(row, 0), ei_pset1(rhs.coeff(0, col))); - for(int i = 1; i < lhs.cols(); ++i) + for(Index i = 1; i < lhs.cols(); ++i) res = ei_pmadd(lhs.template packet(row, i), ei_pset1(rhs.coeff(i, col)), res); } }; diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h index bc697cef5..d81715528 100644 --- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -35,40 +35,40 @@ #endif // optimized GEneral packed Block * packed Panel product kernel -template +template struct ei_gebp_kernel { - void operator()(Scalar* res, int resStride, const Scalar* blockA, const Scalar* blockB, int rows, int depth, int cols, - int strideA=-1, int strideB=-1, int offsetA=0, int offsetB=0, Scalar* unpackedB = 0) + void operator()(Scalar* res, Index resStride, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, Scalar* unpackedB = 0) { typedef typename ei_packet_traits::type PacketType; enum { PacketSize = ei_packet_traits::size }; if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; Conj cj; - int packet_cols = (cols/nr) * nr; - const int peeled_mc = (rows/mr)*mr; - const int peeled_mc2 = peeled_mc + (rows-peeled_mc >= PacketSize ? PacketSize : 0); - const int peeled_kc = (depth/4)*4; + Index packet_cols = (cols/nr) * nr; + const Index peeled_mc = (rows/mr)*mr; + const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= PacketSize ? PacketSize : 0); + const Index peeled_kc = (depth/4)*4; if(unpackedB==0) unpackedB = const_cast(blockB - strideB * nr * PacketSize); // loops on each micro vertical panel of rhs (depth x nr) - for(int j2=0; j2 we select a mr x nr micro block of res which is entirely // stored into mr/packet_size x nr registers. - for(int i=0; i=PacketSize) { - int i = peeled_mc; + Index i = peeled_mc; const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize]; ei_prefetch(&blA[0]); @@ -341,7 +341,7 @@ struct ei_gebp_kernel // performs "inner" product const Scalar* blB = unpackedB; - for(int k=0; k do the same but with nr==1 - for(int j2=packet_cols; j2=PacketSize) { - int i = peeled_mc; + Index i = peeled_mc; const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize]; ei_prefetch(&blA[0]); PacketType C0 = ei_ploadu(&res[(j2+0)*resStride + i]); const Scalar* blB = unpackedB; - for(int k=0; k +template struct ei_gemm_pack_lhs { - void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, int lhsStride, int depth, int rows, - int stride=0, int offset=0) + void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, + Index stride=0, Index offset=0) { enum { PacketSize = ei_packet_traits::size }; ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); ei_conj_if::IsComplex && Conjugate> cj; - ei_const_blas_data_mapper lhs(_lhs,lhsStride); - int count = 0; - int peeled_mc = (rows/mr)*mr; - for(int i=0; i lhs(_lhs,lhsStride); + Index count = 0; + Index peeled_mc = (rows/mr)*mr; + for(Index i=0; i=PacketSize) { if(PanelMode) count += PacketSize*offset; - for(int k=0; k -struct ei_gemm_pack_rhs +template +struct ei_gemm_pack_rhs { typedef typename ei_packet_traits::type Packet; enum { PacketSize = ei_packet_traits::size }; - void operator()(Scalar* blockB, const Scalar* rhs, int rhsStride, Scalar alpha, int depth, int cols, - int stride=0, int offset=0) + void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Scalar alpha, Index depth, Index cols, + Index stride=0, Index offset=0) { ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); bool hasAlpha = alpha != Scalar(1); - int packet_cols = (cols/nr) * nr; - int count = 0; - for(int j2=0; j2 const Scalar* b2 = &rhs[(j2+2)*rhsStride]; const Scalar* b3 = &rhs[(j2+3)*rhsStride]; if (hasAlpha) - for(int k=0; k count += nr; } else - for(int k=0; k } // copy the remaining columns one at a time (nr==1) - for(int j2=packet_cols; j2 }; // this version is optimized for row major matrices -template -struct ei_gemm_pack_rhs +template +struct ei_gemm_pack_rhs { enum { PacketSize = ei_packet_traits::size }; - void operator()(Scalar* blockB, const Scalar* rhs, int rhsStride, Scalar alpha, int depth, int cols, - int stride=0, int offset=0) + void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Scalar alpha, Index depth, Index cols, + Index stride=0, Index offset=0) { ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); bool hasAlpha = alpha != Scalar(1); - int packet_cols = (cols/nr) * nr; - int count = 0; - for(int j2=0; j2 } else { - for(int k=0; k if(PanelMode) count += nr * (stride-offset-depth); } // copy the remaining columns one at a time (nr==1) - for(int j2=packet_cols; j2 simple transposition of the product */ template< - typename Scalar, + typename Scalar, typename Index, int LhsStorageOrder, bool ConjugateLhs, int RhsStorageOrder, bool ConjugateRhs> -struct ei_general_matrix_matrix_product +struct ei_general_matrix_matrix_product { static EIGEN_STRONG_INLINE void run( - int rows, int cols, int depth, - const Scalar* lhs, int lhsStride, - const Scalar* rhs, int rhsStride, - Scalar* res, int resStride, + Index rows, Index cols, Index depth, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha, - GemmParallelInfo* info = 0) + GemmParallelInfo* info = 0) { // transpose the product such that the result is column major - ei_general_matrix_matrix_product Blocking algorithm following Goto's paper */ template< - typename Scalar, + typename Scalar, typename Index, int LhsStorageOrder, bool ConjugateLhs, int RhsStorageOrder, bool ConjugateRhs> -struct ei_general_matrix_matrix_product +struct ei_general_matrix_matrix_product { -static void run(int rows, int cols, int depth, - const Scalar* _lhs, int lhsStride, - const Scalar* _rhs, int rhsStride, - Scalar* res, int resStride, +static void run(Index rows, Index cols, Index depth, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha, - GemmParallelInfo* info = 0) + GemmParallelInfo* info = 0) { - ei_const_blas_data_mapper lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + ei_const_blas_data_mapper lhs(_lhs,lhsStride); + ei_const_blas_data_mapper rhs(_rhs,rhsStride); if (ConjugateRhs) alpha = ei_conj(alpha); @@ -77,19 +77,19 @@ static void run(int rows, int cols, int depth, typedef typename ei_packet_traits::type PacketType; typedef ei_product_blocking_traits Blocking; - int kc = std::min(Blocking::Max_kc,depth); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc,depth); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_lhs pack_lhs; - ei_gebp_kernel > gebp; + ei_gemm_pack_rhs pack_rhs; + ei_gemm_pack_lhs pack_lhs; + ei_gebp_kernel > gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! - int tid = omp_get_thread_num(); - int threads = omp_get_num_threads(); + Index tid = omp_get_thread_num(); + Index threads = omp_get_num_threads(); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr*8; @@ -98,9 +98,9 @@ static void run(int rows, int cols, int depth, // For each horizontal panel of the rhs, and corresponding panel of the lhs... // (==GEMM_VAR1) - for(int k=0; k rows of B', and cols of the A' + const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. @@ -121,9 +121,9 @@ static void run(int rows, int cols, int depth, info[tid].sync = k; // Computes C_i += A' * B' per B'_j - for(int shift=0; shift Pack rhs's panel into a sequential chunk of memory (L2 caching) @@ -181,9 +181,9 @@ static void run(int rows, int cols, int depth, // For each mc x kc block of the lhs's vertical panel... // (==GEPP_VAR1) - for(int i2=0; i2 > : ei_traits, Lhs, Rhs> > {}; -template +template struct ei_gemm_functor { typedef typename Rhs::Scalar BlockBScalar; @@ -224,7 +224,7 @@ struct ei_gemm_functor : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha) {} - void operator() (int row, int rows, int col=0, int cols=-1, GemmParallelInfo* info=0) const + void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo* info=0) const { if(cols==-1) cols = m_rhs.cols(); @@ -237,9 +237,9 @@ struct ei_gemm_functor } - int sharedBlockBSize() const + Index sharedBlockBSize() const { - return std::min(ei_product_blocking_traits::Max_kc,m_rhs.rows()) * m_rhs.cols(); + return std::min(ei_product_blocking_traits::Max_kc,m_rhs.rows()) * m_rhs.cols(); } protected: @@ -273,9 +273,9 @@ class GeneralProduct * RhsBlasTraits::extractScalarFactor(m_rhs); typedef ei_gemm_functor< - Scalar, + Scalar, Index, ei_general_matrix_matrix_product< - Scalar, + Scalar, Index, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h index 3296f32ff..e2c5c76c0 100644 --- a/Eigen/src/Core/products/GeneralMatrixVector.h +++ b/Eigen/src/Core/products/GeneralMatrixVector.h @@ -32,11 +32,11 @@ * same alignment pattern. * TODO: since rhs gets evaluated only once, no need to evaluate it */ -template +template static EIGEN_DONT_INLINE void ei_cache_friendly_product_colmajor_times_vector( - int size, - const Scalar* lhs, int lhsStride, + Index size, + const Scalar* lhs, Index lhsStride, const RhsType& rhs, Scalar* res, Scalar alpha) @@ -59,30 +59,30 @@ void ei_cache_friendly_product_colmajor_times_vector( typedef typename NumTraits::Real RealScalar; typedef typename ei_packet_traits::type Packet; - const int PacketSize = sizeof(Packet)/sizeof(Scalar); + const Index PacketSize = sizeof(Packet)/sizeof(Scalar); enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned }; - const int columnsAtOnce = 4; - const int peels = 2; - const int PacketAlignedMask = PacketSize-1; - const int PeelAlignedMask = PacketSize*peels-1; + const Index columnsAtOnce = 4; + const Index peels = 2; + const Index PacketAlignedMask = PacketSize-1; + const Index PeelAlignedMask = PacketSize*peels-1; // How many coeffs of the result do we have to skip to be aligned. // Here we assume data are at least aligned on the base scalar type. - int alignedStart = ei_first_aligned(res,size); - int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; - const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; + Index alignedStart = ei_first_aligned(res,size); + Index alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; + const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; - const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; - int alignmentPattern = alignmentStep==0 ? AllAligned + const Index alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; + Index alignmentPattern = alignmentStep==0 ? AllAligned : alignmentStep==(PacketSize/2) ? EvenAligned : FirstAligned; // we cannot assume the first element is aligned because of sub-matrices - const int lhsAlignmentOffset = ei_first_aligned(lhs,size); + const Index lhsAlignmentOffset = ei_first_aligned(lhs,size); // find how many columns do we have to skip to be aligned with the result (if possible) - int skipColumns = 0; + Index skipColumns = 0; // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(res)%sizeof(RealScalar)) ) { @@ -114,11 +114,11 @@ void ei_cache_friendly_product_colmajor_times_vector( || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0); } - int offset1 = (FirstAligned && alignmentStep==1?3:1); - int offset3 = (FirstAligned && alignmentStep==1?1:3); + Index offset1 = (FirstAligned && alignmentStep==1?3:1); + Index offset3 = (FirstAligned && alignmentStep==1?1:3); - int columnBound = ((rhs.size()-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns; - for (int i=skipColumns; i(A01,A11); A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12); @@ -184,11 +184,11 @@ void ei_cache_friendly_product_colmajor_times_vector( ei_pstore(&res[j+PacketSize],A10); } } - for (int j = peeledSize; j +template static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( - const Scalar* lhs, int lhsStride, - const Scalar* rhs, int rhsSize, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, Index rhsSize, ResType& res, Scalar alpha) { @@ -270,32 +270,32 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( typedef typename NumTraits::Real RealScalar; typedef typename ei_packet_traits::type Packet; - const int PacketSize = sizeof(Packet)/sizeof(Scalar); + const Index PacketSize = sizeof(Packet)/sizeof(Scalar); enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 }; - const int rowsAtOnce = 4; - const int peels = 2; - const int PacketAlignedMask = PacketSize-1; - const int PeelAlignedMask = PacketSize*peels-1; - const int size = rhsSize; + const Index rowsAtOnce = 4; + const Index peels = 2; + const Index PacketAlignedMask = PacketSize-1; + const Index PeelAlignedMask = PacketSize*peels-1; + const Index size = rhsSize; // How many coeffs of the result do we have to skip to be aligned. // Here we assume data are at least aligned on the base scalar type // if that's not the case then vectorization is discarded, see below. - int alignedStart = ei_first_aligned(rhs, size); - int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; - const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; + Index alignedStart = ei_first_aligned(rhs, size); + Index alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; + const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; - const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; - int alignmentPattern = alignmentStep==0 ? AllAligned + const Index alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; + Index alignmentPattern = alignmentStep==0 ? AllAligned : alignmentStep==(PacketSize/2) ? EvenAligned : FirstAligned; // we cannot assume the first element is aligned because of sub-matrices - const int lhsAlignmentOffset = ei_first_aligned(lhs,size); + const Index lhsAlignmentOffset = ei_first_aligned(lhs,size); // find how many rows do we have to skip to be aligned with rhs (if possible) - int skipRows = 0; + Index skipRows = 0; // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(rhs)%sizeof(RealScalar)) ) { @@ -317,7 +317,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( } else { - skipRows = std::min(skipRows,res.size()); + skipRows = std::min(skipRows,Index(res.size())); // note that the skiped columns are processed later. } ei_internal_assert( alignmentPattern==NoneAligned @@ -327,11 +327,11 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0); } - int offset1 = (FirstAligned && alignmentStep==1?3:1); - int offset3 = (FirstAligned && alignmentStep==1?1:3); + Index offset1 = (FirstAligned && alignmentStep==1?3:1); + Index offset3 = (FirstAligned && alignmentStep==1?1:3); - int rowBound = ((res.size()-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows; - for (int i=skipRows; i(A01,A11); @@ -401,11 +401,11 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( ptmp3 = cj.pmadd(A13, b, ptmp3); } } - for (int j = peeledSize; jalignedStart) { // process aligned rhs coeffs if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0) - for (int j = alignedStart;j struct GemmParallelInfo +template struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0), blockB(0) {} int volatile sync; int volatile users; - int rhs_start; - int rhs_length; + Index rhs_start; + Index rhs_length; BlockBScalar* blockB; }; -template -void ei_parallelize_gemm(const Functor& func, int rows, int cols) +template +void ei_parallelize_gemm(const Functor& func, Index rows, Index cols) { #ifndef EIGEN_HAS_OPENMP func(0,rows, 0,cols); @@ -57,16 +57,16 @@ void ei_parallelize_gemm(const Functor& func, int rows, int cols) // 2- compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned - int max_threads = std::max(1,rows / 32); + Index max_threads = std::max(1,rows / 32); // 3 - compute the number of threads we are going to use - int threads = std::min(omp_get_max_threads(), max_threads); + Index threads = std::min(omp_get_max_threads(), max_threads); if(threads==1) return func(0,rows, 0,cols); - int blockCols = (cols / threads) & ~0x3; - int blockRows = (rows / threads) & ~0x7; + Index blockCols = (cols / threads) & ~Index(0x3); + Index blockRows = (rows / threads) & ~Index(0x7); typedef typename Functor::BlockBScalar BlockBScalar; BlockBScalar* sharedBlockB = new BlockBScalar[func.sharedBlockBSize()]; @@ -74,13 +74,13 @@ void ei_parallelize_gemm(const Functor& func, int rows, int cols) GemmParallelInfo* info = new GemmParallelInfo[threads]; #pragma omp parallel for schedule(static,1) num_threads(threads) - for(int i=0; i +template struct ei_symm_pack_lhs { enum { PacketSize = ei_packet_traits::size }; template inline - void pack(Scalar* blockA, const ei_const_blas_data_mapper& lhs, int cols, int i, int& count) + void pack(Scalar* blockA, const ei_const_blas_data_mapper& lhs, Index cols, Index i, Index& count) { // normal copy - for(int k=0; k lhs(_lhs,lhsStride); - int count = 0; - int peeled_mc = (rows/mr)*mr; - for(int i=0; i lhs(_lhs,lhsStride); + Index count = 0; + Index peeled_mc = (rows/mr)*mr; + for(Index i=0; i(blockA, lhs, cols, i, count); } @@ -72,34 +72,34 @@ struct ei_symm_pack_lhs } // do the same with mr==1 - for(int i=peeled_mc; i +template struct ei_symm_pack_rhs { enum { PacketSize = ei_packet_traits::size }; - void operator()(Scalar* blockB, const Scalar* _rhs, int rhsStride, Scalar alpha, int rows, int cols, int k2) + void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Scalar alpha, Index rows, Index cols, Index k2) { - int end_k = k2 + rows; - int count = 0; - ei_const_blas_data_mapper rhs(_rhs,rhsStride); - int packet_cols = (cols/nr)*nr; + Index end_k = k2 + rows; + Index count = 0; + ei_const_blas_data_mapper rhs(_rhs,rhsStride); + Index packet_cols = (cols/nr)*nr; // first part: normal case - for(int j2=0; j2 the same with nr==1) - for(int j2=packet_cols; j2 struct ei_product_selfadjoint_matrix; -template -struct ei_product_selfadjoint_matrix +struct ei_product_selfadjoint_matrix { static EIGEN_STRONG_INLINE void run( - int rows, int cols, - const Scalar* lhs, int lhsStride, - const Scalar* rhs, int rhsStride, - Scalar* res, int resStride, + Index rows, Index cols, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha) { - ei_product_selfadjoint_matrix::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs), EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor, @@ -235,45 +235,45 @@ struct ei_product_selfadjoint_matrix -struct ei_product_selfadjoint_matrix +struct ei_product_selfadjoint_matrix { static EIGEN_DONT_INLINE void run( - int rows, int cols, - const Scalar* _lhs, int lhsStride, - const Scalar* _rhs, int rhsStride, - Scalar* res, int resStride, + Index rows, Index cols, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha) { - int size = rows; + Index size = rows; - ei_const_blas_data_mapper lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + ei_const_blas_data_mapper lhs(_lhs,lhsStride); + ei_const_blas_data_mapper rhs(_rhs,rhsStride); if (ConjugateRhs) alpha = ei_conj(alpha); typedef ei_product_blocking_traits Blocking; - int kc = std::min(Blocking::Max_kc,size); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc,size); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB); Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr; - ei_gebp_kernel > gebp_kernel; - ei_symm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_lhs pack_lhs_transposed; + ei_gebp_kernel > gebp_kernel; + ei_symm_pack_lhs pack_lhs; + ei_gemm_pack_rhs pack_rhs; + ei_gemm_pack_lhs pack_lhs_transposed; - for(int k2=0; k2 transposed packed copy // 2 - the diagonal block => special packed copy // 3 - the panel below the diagonal block => generic packed copy - for(int i2=0; i2() + const Index actual_mc = std::min(i2+mc,size)-i2; + ei_gemm_pack_lhs() (blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc); gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols); @@ -317,50 +317,50 @@ struct ei_product_selfadjoint_matrix -struct ei_product_selfadjoint_matrix +struct ei_product_selfadjoint_matrix { static EIGEN_DONT_INLINE void run( - int rows, int cols, - const Scalar* _lhs, int lhsStride, - const Scalar* _rhs, int rhsStride, - Scalar* res, int resStride, + Index rows, Index cols, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha) { - int size = cols; + Index size = cols; - ei_const_blas_data_mapper lhs(_lhs,lhsStride); + ei_const_blas_data_mapper lhs(_lhs,lhsStride); if (ConjugateRhs) alpha = ei_conj(alpha); typedef ei_product_blocking_traits Blocking; - int kc = std::min(Blocking::Max_kc,size); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc,size); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB); Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr; - ei_gebp_kernel > gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_symm_pack_rhs pack_rhs; + ei_gebp_kernel > gebp_kernel; + ei_gemm_pack_lhs pack_lhs; + ei_symm_pack_rhs pack_rhs; - for(int k2=0; k2 GEPP - for(int i2=0; i2 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); - ei_product_selfadjoint_matrix::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint, NumTraits::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)), diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h index 0f829fd73..9af310d7a 100644 --- a/Eigen/src/Core/products/SelfadjointMatrixVector.h +++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h @@ -30,15 +30,15 @@ * the number of load/stores of the result by a factor 2 and to reduce * the instruction dependency. */ -template +template static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector( - int size, - const Scalar* lhs, int lhsStride, - const Scalar* _rhs, int rhsIncr, + Index size, + const Scalar* lhs, Index lhsStride, + const Scalar* _rhs, Index rhsIncr, Scalar* res, Scalar alpha) { typedef typename ei_packet_traits::type Packet; - const int PacketSize = sizeof(Packet)/sizeof(Scalar); + const Index PacketSize = sizeof(Packet)/sizeof(Scalar); enum { IsRowMajor = StorageOrder==RowMajor ? 1 : 0, @@ -58,16 +58,16 @@ static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector( { Scalar* r = ei_aligned_stack_new(Scalar, size); const Scalar* it = _rhs; - for (int i=0; i ei_assert(dst.innerStride()==1 && "not implemented yet"); - ei_product_selfadjoint_vector::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)> + ei_product_selfadjoint_vector::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)> ( lhs.rows(), // size &lhs.coeff(0,0), lhs.outerStride(), // lhs info diff --git a/Eigen/src/Core/products/SelfadjointProduct.h b/Eigen/src/Core/products/SelfadjointProduct.h index 01cd33d57..bf835b516 100644 --- a/Eigen/src/Core/products/SelfadjointProduct.h +++ b/Eigen/src/Core/products/SelfadjointProduct.h @@ -26,52 +26,52 @@ #define EIGEN_SELFADJOINT_PRODUCT_H /********************************************************************** -* This file implement a self adjoint product: C += A A^T updating only -* an half of the selfadjoint matrix C. +* This file implements a self adjoint product: C += A A^T updating only +* half of the selfadjoint matrix C. * It corresponds to the level 3 SYRK Blas routine. **********************************************************************/ // forward declarations (defined at the end of this file) -template +template struct ei_sybb_kernel; /* Optimized selfadjoint product (_SYRK) */ -template struct ei_selfadjoint_product; // as usual if the result is row major => we transpose the product -template -struct ei_selfadjoint_product +template +struct ei_selfadjoint_product { - static EIGEN_STRONG_INLINE void run(int size, int depth, const Scalar* mat, int matStride, Scalar* res, int resStride, Scalar alpha) + static EIGEN_STRONG_INLINE void run(Index size, Index depth, const Scalar* mat, Index matStride, Scalar* res, Index resStride, Scalar alpha) { - ei_selfadjoint_product + ei_selfadjoint_product ::run(size, depth, mat, matStride, res, resStride, alpha); } }; -template -struct ei_selfadjoint_product +struct ei_selfadjoint_product { static EIGEN_DONT_INLINE void run( - int size, int depth, - const Scalar* _mat, int matStride, - Scalar* res, int resStride, + Index size, Index depth, + const Scalar* _mat, Index matStride, + Scalar* res, Index resStride, Scalar alpha) { - ei_const_blas_data_mapper mat(_mat,matStride); + ei_const_blas_data_mapper mat(_mat,matStride); if(AAT) alpha = ei_conj(alpha); typedef ei_product_blocking_traits Blocking; - int kc = std::min(Blocking::Max_kc,depth); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,size); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc,depth); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,size); // cache block size along the M direction Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size; @@ -81,21 +81,21 @@ struct ei_selfadjoint_product // note that the actual rhs is the transpose/adjoint of mat typedef ei_conj_helper::IsComplex && !AAT, NumTraits::IsComplex && AAT> Conj; - ei_gebp_kernel gebp_kernel; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_lhs pack_lhs; - ei_sybb_kernel sybb; + ei_gebp_kernel gebp_kernel; + ei_gemm_pack_rhs pack_rhs; + ei_gemm_pack_lhs pack_lhs; + ei_sybb_kernel sybb; - for(int k2=0; k2 if (UpLo==Upper) { - int j2 = i2+actual_mc; - gebp_kernel(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(0,size-j2), + Index j2 = i2+actual_mc; + gebp_kernel(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0),size-j2), -1, -1, 0, 0, allocatedBlockB); } } @@ -138,7 +138,7 @@ SelfAdjointView& SelfAdjointView enum { IsRowMajor = (ei_traits::Flags&RowMajorBit) ? 1 : 0 }; - ei_selfadjoint_product::Flags&RowMajorBit ? RowMajor : ColMajor, !UBlasTraits::NeedToConjugate, UpLo> @@ -158,23 +158,23 @@ SelfAdjointView& SelfAdjointView // while the selfadjoint block overlapping the diagonal is evaluated into a // small temporary buffer which is then accumulated into the result using a // triangular traversal. -template +template struct ei_sybb_kernel { enum { PacketSize = ei_packet_traits::size, BlockSize = EIGEN_ENUM_MAX(mr,nr) }; - void operator()(Scalar* res, int resStride, const Scalar* blockA, const Scalar* blockB, int size, int depth, Scalar* workspace) + void operator()(Scalar* res, Index resStride, const Scalar* blockA, const Scalar* blockB, Index size, Index depth, Scalar* workspace) { - ei_gebp_kernel gebp_kernel; + ei_gebp_kernel gebp_kernel; Matrix buffer; // let's process the block per panel of actual_mc x BlockSize, // again, each is split into three parts, etc. - for (int j=0; j(BlockSize,size - j); + Index actualBlockSize = std::min(BlockSize,size - j); const Scalar* actual_b = blockB+j*depth; if(UpLo==Upper) @@ -182,16 +182,16 @@ struct ei_sybb_kernel // selfadjoint micro block { - int i = j; + Index i = j; buffer.setZero(); // 1 - apply the kernel on the temporary buffer gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, -1, -1, 0, 0, workspace); // 2 - triangular accumulation - for(int j1=0; j1 +template struct ei_selfadjoint_rank2_update_selector; -template -struct ei_selfadjoint_rank2_update_selector +template +struct ei_selfadjoint_rank2_update_selector { - static void run(Scalar* mat, int stride, const UType& u, const VType& v, Scalar alpha) + static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha) { - const int size = u.size(); - for (int i=0; i >(mat+stride*i+i, size-i) += (alpha * ei_conj(u.coeff(i))) * v.tail(size-i) @@ -47,13 +47,13 @@ struct ei_selfadjoint_rank2_update_selector } }; -template -struct ei_selfadjoint_rank2_update_selector +template +struct ei_selfadjoint_rank2_update_selector { - static void run(Scalar* mat, int stride, const UType& u, const VType& v, Scalar alpha) + static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha) { - const int size = u.size(); - for (int i=0; i >(mat+stride*i, i+1) += (alpha * ei_conj(u.coeff(i))) * v.head(i+1) + (alpha * ei_conj(v.coeff(i))) * u.head(i+1); @@ -84,7 +84,7 @@ SelfAdjointView& SelfAdjointView * VBlasTraits::extractScalarFactor(v.derived()); enum { IsRowMajor = (ei_traits::Flags&RowMajorBit) ? 1 : 0 }; - ei_selfadjoint_rank2_update_selector::ret>::type, typename ei_cleantype::ret>::type, (IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)> diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h index 53e7876c1..25d9ffe2d 100644 --- a/Eigen/src/Core/products/TriangularMatrixMatrix.h +++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h @@ -52,29 +52,29 @@ /* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of * the general matrix matrix product. */ -template struct ei_product_triangular_matrix_matrix; -template -struct ei_product_triangular_matrix_matrix { static EIGEN_STRONG_INLINE void run( - int size, int otherSize, - const Scalar* lhs, int lhsStride, - const Scalar* rhs, int rhsStride, - Scalar* res, int resStride, + Index size, Index otherSize, + const Scalar* lhs, Index lhsStride, + const Scalar* rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha) { - ei_product_triangular_matrix_matrix -struct ei_product_triangular_matrix_matrix { static EIGEN_DONT_INLINE void run( - int size, int cols, - const Scalar* _lhs, int lhsStride, - const Scalar* _rhs, int rhsStride, - Scalar* res, int resStride, + Index size, Index cols, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha) { - int rows = size; + Index rows = size; - ei_const_blas_data_mapper lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + ei_const_blas_data_mapper lhs(_lhs,lhsStride); + ei_const_blas_data_mapper rhs(_rhs,rhsStride); if (ConjugateRhs) alpha = ei_conj(alpha); @@ -116,8 +116,8 @@ struct ei_product_triangular_matrix_matrix(Blocking::Max_kc/4,size); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc/4,size); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; @@ -129,16 +129,16 @@ struct ei_product_triangular_matrix_matrix > gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; + ei_gebp_kernel > gebp_kernel; + ei_gemm_pack_lhs pack_lhs; + ei_gemm_pack_rhs pack_rhs; - for(int k2=IsLower ? size : 0; + for(Index k2=IsLower ? size : 0; IsLower ? k2>0 : k2(actual_kc-k1, SmallPanelWidth); - int lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1; - int startBlock = actual_k2+k1; - int blockBOffset = k1; + Index actualPanelWidth = std::min(actual_kc-k1, SmallPanelWidth); + Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1; + Index startBlock = actual_k2+k1; + Index blockBOffset = k1; // => GEBP with the micro triangular block // The trick is to pack this micro block while filling the opposite triangular part with zeros. // To this end we do an extra triangular copy to a small temporary buffer - for (int k=0;k0) { - int startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2; + Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2; pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget); @@ -185,12 +185,12 @@ struct ei_product_triangular_matrix_matrix GEPP { - int start = IsLower ? k2 : 0; - int end = IsLower ? size : actual_k2; - for(int i2=start; i2() + const Index actual_mc = std::min(i2+mc,end)-i2; + ei_gemm_pack_lhs() (blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc); gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols); @@ -205,25 +205,25 @@ struct ei_product_triangular_matrix_matrix -struct ei_product_triangular_matrix_matrix { static EIGEN_DONT_INLINE void run( - int size, int rows, - const Scalar* _lhs, int lhsStride, - const Scalar* _rhs, int rhsStride, - Scalar* res, int resStride, + Index size, Index rows, + const Scalar* _lhs, Index lhsStride, + const Scalar* _rhs, Index rhsStride, + Scalar* res, Index resStride, Scalar alpha) { - int cols = size; + Index cols = size; - ei_const_blas_data_mapper lhs(_lhs,lhsStride); - ei_const_blas_data_mapper rhs(_rhs,rhsStride); + ei_const_blas_data_mapper lhs(_lhs,lhsStride); + ei_const_blas_data_mapper rhs(_rhs,rhsStride); if (ConjugateRhs) alpha = ei_conj(alpha); @@ -234,8 +234,8 @@ struct ei_product_triangular_matrix_matrix(Blocking::Max_kc/4,size); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc/4,size); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,rows); // cache block size along the M direction Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; @@ -246,30 +246,30 @@ struct ei_product_triangular_matrix_matrix > gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_rhs pack_rhs_panel; + ei_gebp_kernel > gebp_kernel; + ei_gemm_pack_lhs pack_lhs; + ei_gemm_pack_rhs pack_rhs; + ei_gemm_pack_rhs pack_rhs_panel; - for(int k2=IsLower ? 0 : size; + for(Index k2=IsLower ? 0 : size; IsLower ? k20; IsLower ? k2+=kc : k2-=kc) { - const int actual_kc = std::min(IsLower ? size-k2 : k2, kc); - int actual_k2 = IsLower ? k2 : k2-actual_kc; - int rs = IsLower ? actual_k2 : size - k2; + const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc); + Index actual_k2 = IsLower ? k2 : k2-actual_kc; + Index rs = IsLower ? actual_k2 : size - k2; Scalar* geb = blockB+actual_kc*actual_kc; pack_rhs(geb, &rhs(actual_k2,IsLower ? 0 : k2), rhsStride, alpha, actual_kc, rs); // pack the triangular part of the rhs padding the unrolled blocks with zeros { - for (int j2=0; j2(actual_kc-j2, SmallPanelWidth); - int actual_j2 = actual_k2 + j2; - int panelOffset = IsLower ? j2+actualPanelWidth : 0; - int panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; + Index actualPanelWidth = std::min(actual_kc-j2, SmallPanelWidth); + Index actual_j2 = actual_k2 + j2; + Index panelOffset = IsLower ? j2+actualPanelWidth : 0; + Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; // general part pack_rhs_panel(blockB+j2*actual_kc, &rhs(actual_k2+panelOffset, actual_j2), rhsStride, alpha, @@ -277,11 +277,11 @@ struct ei_product_triangular_matrix_matrix(actual_kc-j2, SmallPanelWidth); - int panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth; - int blockOffset = IsLower ? j2 : 0; + Index actualPanelWidth = std::min(actual_kc-j2, SmallPanelWidth); + Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth; + Index blockOffset = IsLower ? j2 : 0; gebp_kernel(res+i2+(actual_k2+j2)*resStride, resStride, blockA, blockB+j2*actual_kc, @@ -349,7 +349,7 @@ struct TriangularProduct Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); - ei_product_triangular_matrix_matrix::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate, (ei_traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate, diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h index ee4c45c35..1a2b183aa 100644 --- a/Eigen/src/Core/products/TriangularMatrixVector.h +++ b/Eigen/src/Core/products/TriangularMatrixVector.h @@ -33,34 +33,35 @@ template { typedef typename Rhs::Scalar Scalar; + typedef typename Rhs::Index Index; enum { IsLower = ((Mode&Lower)==Lower), HasUnitDiag = (Mode & UnitDiag)==UnitDiag }; static EIGEN_DONT_INLINE void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits::Scalar alpha) { - static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; typename ei_conj_expr_if::ret cjLhs(lhs); typename ei_conj_expr_if::ret cjRhs(rhs); - int size = lhs.cols(); - for (int pi=0; pi0) res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r); if (HasUnitDiag) res.coeffRef(i) += alpha * cjRhs.coeff(i); } - int r = IsLower ? size - pi - actualPanelWidth : pi; + Index r = IsLower ? size - pi - actualPanelWidth : pi; if (r>0) { - int s = IsLower ? pi+actualPanelWidth : 0; + Index s = IsLower ? pi+actualPanelWidth : 0; ei_cache_friendly_product_colmajor_times_vector( r, &(lhs.const_cast_derived().coeffRef(s,pi)), lhs.outerStride(), @@ -76,33 +77,34 @@ template { typedef typename Rhs::Scalar Scalar; + typedef typename Rhs::Index Index; enum { IsLower = ((Mode&Lower)==Lower), HasUnitDiag = (Mode & UnitDiag)==UnitDiag }; static void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits::Scalar alpha) { - static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; + static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; typename ei_conj_expr_if::ret cjLhs(lhs); typename ei_conj_expr_if::ret cjRhs(rhs); - int size = lhs.cols(); - for (int pi=0; pi0) res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum(); if (HasUnitDiag) res.coeffRef(i) += alpha * cjRhs.coeff(i); } - int r = IsLower ? pi : size - pi - actualPanelWidth; + Index r = IsLower ? pi : size - pi - actualPanelWidth; if (r>0) { - int s = IsLower ? 0 : pi + actualPanelWidth; + Index s = IsLower ? 0 : pi + actualPanelWidth; Block target(res,pi,0,actualPanelWidth,1); ei_cache_friendly_product_rowmajor_times_vector( &(lhs.const_cast_derived().coeffRef(pi,s)), lhs.outerStride(), diff --git a/Eigen/src/Core/products/TriangularSolverMatrix.h b/Eigen/src/Core/products/TriangularSolverMatrix.h index 1774081a2..1d8022517 100644 --- a/Eigen/src/Core/products/TriangularSolverMatrix.h +++ b/Eigen/src/Core/products/TriangularSolverMatrix.h @@ -26,16 +26,16 @@ #define EIGEN_TRIANGULAR_SOLVER_MATRIX_H // if the rhs is row major, let's transpose the product -template -struct ei_triangular_solve_matrix +template +struct ei_triangular_solve_matrix { static EIGEN_DONT_INLINE void run( - int size, int cols, - const Scalar* tri, int triStride, - Scalar* _other, int otherStride) + Index size, Index cols, + const Scalar* tri, Index triStride, + Scalar* _other, Index otherStride) { ei_triangular_solve_matrix< - Scalar, Side==OnTheLeft?OnTheRight:OnTheLeft, + Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft, (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper), NumTraits::IsComplex && Conjugate, TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor> @@ -45,17 +45,17 @@ struct ei_triangular_solve_matrix -struct ei_triangular_solve_matrix +template +struct ei_triangular_solve_matrix { static EIGEN_DONT_INLINE void run( - int size, int otherSize, - const Scalar* _tri, int triStride, - Scalar* _other, int otherStride) + Index size, Index otherSize, + const Scalar* _tri, Index triStride, + Scalar* _other, Index otherStride) { - int cols = otherSize; - ei_const_blas_data_mapper tri(_tri,triStride); - ei_blas_data_mapper other(_other,otherStride); + Index cols = otherSize; + ei_const_blas_data_mapper tri(_tri,triStride); + ei_blas_data_mapper other(_other,otherStride); typedef ei_product_blocking_traits Blocking; enum { @@ -63,8 +63,8 @@ struct ei_triangular_solve_matrix(Blocking::Max_kc/4,size); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,size); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc/4,size); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,size); // cache block size along the M direction Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; @@ -72,15 +72,15 @@ struct ei_triangular_solve_matrix conj; - ei_gebp_kernel > gebp_kernel; - ei_gemm_pack_lhs pack_lhs; - ei_gemm_pack_rhs pack_rhs; + ei_gebp_kernel > gebp_kernel; + ei_gemm_pack_lhs pack_lhs; + ei_gemm_pack_rhs pack_rhs; - for(int k2=IsLower ? 0 : size; + for(Index k2=IsLower ? 0 : size; IsLower ? k20; IsLower ? k2+=kc : k2-=kc) { - const int actual_kc = std::min(IsLower ? size-k2 : k2, kc); + const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc); // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel, // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into @@ -97,45 +97,45 @@ struct ei_triangular_solve_matrix(actual_kc-k1, SmallPanelWidth); + Index actualPanelWidth = std::min(actual_kc-k1, SmallPanelWidth); // tr solve - for (int k=0; k0) { - int startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc; + Index startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc; pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget); @@ -155,11 +155,11 @@ struct ei_triangular_solve_matrix GEPP { - int start = IsLower ? k2+kc : 0; - int end = IsLower ? size : k2-kc; - for(int i2=start; i20) { pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc); @@ -177,17 +177,17 @@ struct ei_triangular_solve_matrix -struct ei_triangular_solve_matrix +template +struct ei_triangular_solve_matrix { static EIGEN_DONT_INLINE void run( - int size, int otherSize, - const Scalar* _tri, int triStride, - Scalar* _other, int otherStride) + Index size, Index otherSize, + const Scalar* _tri, Index triStride, + Scalar* _other, Index otherStride) { - int rows = otherSize; - ei_const_blas_data_mapper rhs(_tri,triStride); - ei_blas_data_mapper lhs(_other,otherStride); + Index rows = otherSize; + ei_const_blas_data_mapper rhs(_tri,triStride); + ei_blas_data_mapper lhs(_other,otherStride); typedef ei_product_blocking_traits Blocking; enum { @@ -196,8 +196,8 @@ struct ei_triangular_solve_matrix(Blocking::Max_kc/4,size); // cache block size along the K direction - int mc = std::min(Blocking::Max_mc,size); // cache block size along the M direction + Index kc = std::min(Blocking::Max_kc/4,size); // cache block size along the K direction + Index mc = std::min(Blocking::Max_mc,size); // cache block size along the M direction Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size; @@ -205,20 +205,20 @@ struct ei_triangular_solve_matrix conj; - ei_gebp_kernel > gebp_kernel; - ei_gemm_pack_rhs pack_rhs; - ei_gemm_pack_rhs pack_rhs_panel; - ei_gemm_pack_lhs pack_lhs_panel; + ei_gebp_kernel > gebp_kernel; + ei_gemm_pack_rhs pack_rhs; + ei_gemm_pack_rhs pack_rhs_panel; + ei_gemm_pack_lhs pack_lhs_panel; - for(int k2=IsLower ? size : 0; + for(Index k2=IsLower ? size : 0; IsLower ? k2>0 : k20) pack_rhs(geb, &rhs(actual_k2,startPanel), triStride, -1, actual_kc, rs); @@ -226,12 +226,12 @@ struct ei_triangular_solve_matrix(actual_kc-j2, SmallPanelWidth); - int actual_j2 = actual_k2 + j2; - int panelOffset = IsLower ? j2+actualPanelWidth : 0; - int panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; + Index actualPanelWidth = std::min(actual_kc-j2, SmallPanelWidth); + Index actual_j2 = actual_k2 + j2; + Index panelOffset = IsLower ? j2+actualPanelWidth : 0; + Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; if (panelLength>0) pack_rhs_panel(blockB+j2*actual_kc, @@ -241,24 +241,24 @@ struct ei_triangular_solve_matrix vertical panels of rhs) - for (int j2 = IsLower - ? (actual_kc - ((actual_kc%SmallPanelWidth) ? (actual_kc%SmallPanelWidth) - : SmallPanelWidth)) + for (Index j2 = IsLower + ? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth) + : Index(SmallPanelWidth))) : 0; IsLower ? j2>=0 : j2(actual_kc-j2, SmallPanelWidth); - int absolute_j2 = actual_k2 + j2; - int panelOffset = IsLower ? j2+actualPanelWidth : 0; - int panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2; + Index actualPanelWidth = std::min(actual_kc-j2, SmallPanelWidth); + Index absolute_j2 = actual_k2 + j2; + Index panelOffset = IsLower ? j2+actualPanelWidth : 0; + Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2; // GEBP if(panelLength>0) @@ -272,20 +272,20 @@ struct ei_triangular_solve_matrix +template struct ei_gebp_kernel; -template +template struct ei_gemm_pack_rhs; -template +template struct ei_gemm_pack_lhs; template< - typename Scalar, + typename Scalar, typename Index, int LhsStorageOrder, bool ConjugateLhs, int RhsStorageOrder, bool ConjugateRhs, int ResStorageOrder> struct ei_general_matrix_matrix_product; -template +template static void ei_cache_friendly_product_colmajor_times_vector( - int size, const Scalar* lhs, int lhsStride, const RhsType& rhs, Scalar* res, Scalar alpha); + Index size, const Scalar* lhs, Index lhsStride, const RhsType& rhs, Scalar* res, Scalar alpha); -template +template static void ei_cache_friendly_product_rowmajor_times_vector( - const Scalar* lhs, int lhsStride, const Scalar* rhs, int rhsSize, ResType& res, Scalar alpha); + const Scalar* lhs, Index lhsStride, const Scalar* rhs, Index rhsSize, ResType& res, Scalar alpha); // Provides scalar/packet-wise product and product with accumulation // with optional conjugation of the arguments. @@ -98,29 +98,29 @@ template<> struct ei_conj_helper // Lightweight helper class to access matrix coefficients. // Yes, this is somehow redundant with Map<>, but this version is much much lighter, // and so I hope better compilation performance (time and code quality). -template +template class ei_blas_data_mapper { public: - ei_blas_data_mapper(Scalar* data, int stride) : m_data(data), m_stride(stride) {} - EIGEN_STRONG_INLINE Scalar& operator()(int i, int j) + ei_blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {} + EIGEN_STRONG_INLINE Scalar& operator()(Index i, Index j) { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; } protected: Scalar* EIGEN_RESTRICT m_data; - int m_stride; + Index m_stride; }; // lightweight helper class to access matrix coefficients (const version) -template +template class ei_const_blas_data_mapper { public: - ei_const_blas_data_mapper(const Scalar* data, int stride) : m_data(data), m_stride(stride) {} - EIGEN_STRONG_INLINE const Scalar& operator()(int i, int j) const + ei_const_blas_data_mapper(const Scalar* data, Index stride) : m_data(data), m_stride(stride) {} + EIGEN_STRONG_INLINE const Scalar& operator()(Index i, Index j) const { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; } protected: const Scalar* EIGEN_RESTRICT m_data; - int m_stride; + Index m_stride; }; // Defines various constant controlling level 3 blocking diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index 787182444..312a14414 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -94,6 +94,14 @@ #define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor #endif +#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t +#endif + +#ifndef EIGEN_DEFAULT_SPARSE_INDEX_TYPE +#define EIGEN_DEFAULT_SPARSE_INDEX_TYPE int +#endif + /** Allows to disable some optimizations which might affect the accuracy of the result. * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them. * They currently include: @@ -266,6 +274,8 @@ typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ typedef typename Eigen::ei_nested::type Nested; \ + typedef typename Eigen::ei_traits::StorageKind StorageKind; \ + typedef typename Eigen::ei_index::type Index; \ enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ Flags = Eigen::ei_traits::Flags, \ @@ -281,6 +291,8 @@ typedef typename Base::PacketScalar PacketScalar; \ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ typedef typename Eigen::ei_nested::type Nested; \ + typedef typename Eigen::ei_traits::StorageKind StorageKind; \ + typedef typename Eigen::ei_index::type Index; \ enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ MaxRowsAtCompileTime = Eigen::ei_traits::MaxRowsAtCompileTime, \ diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h index f5cdd915a..6b202dbc8 100644 --- a/Eigen/src/Core/util/Memory.h +++ b/Eigen/src/Core/util/Memory.h @@ -379,10 +379,10 @@ template inline T* ei_conditional_aligned_realloc_new(T* * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for * example with Scalar=double on certain 32-bit platforms, see bug #79. * - * There is also the variant ei_first_aligned(const MatrixBase&, Integer) defined in Coeffs.h. + * There is also the variant ei_first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. */ -template -inline static Integer ei_first_aligned(const Scalar* array, Integer size) +template +inline static Index ei_first_aligned(const Scalar* array, Index size) { typedef typename ei_packet_traits::type Packet; enum { PacketSize = ei_packet_traits::size, @@ -403,7 +403,7 @@ inline static Integer ei_first_aligned(const Scalar* array, Integer size) } else { - return std::min( (PacketSize - (Integer((size_t(array)/sizeof(Scalar))) & PacketAlignedMask)) + return std::min( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask)) & PacketAlignedMask, size); } } diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h index 667418bb3..4dee8142d 100644 --- a/Eigen/src/Core/util/XprHelper.h +++ b/Eigen/src/Core/util/XprHelper.h @@ -42,27 +42,35 @@ class ei_no_assignment_operator ei_no_assignment_operator& operator=(const ei_no_assignment_operator&); }; -/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around an int variable that +template struct ei_index {}; + +template<> +struct ei_index +{ typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE type; }; + +typedef typename ei_index::type DenseIndex; + +/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that * can be accessed using value() and setValue(). * Otherwise, this class is an empty structure and value() just returns the template parameter Value. */ -template class ei_int_if_dynamic +template class ei_variable_if_dynamic { public: - EIGEN_EMPTY_STRUCT_CTOR(ei_int_if_dynamic) - explicit ei_int_if_dynamic(int v) { EIGEN_ONLY_USED_FOR_DEBUG(v); ei_assert(v == Value); } - static int value() { return Value; } - void setValue(int) {} + EIGEN_EMPTY_STRUCT_CTOR(ei_variable_if_dynamic) + explicit ei_variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); ei_assert(v == T(Value)); } + static T value() { return T(Value); } + void setValue(T) {} }; -template<> class ei_int_if_dynamic +template class ei_variable_if_dynamic { - int m_value; - ei_int_if_dynamic() { ei_assert(false); } + T m_value; + ei_variable_if_dynamic() { ei_assert(false); } public: - explicit ei_int_if_dynamic(int value) : m_value(value) {} - int value() const { return m_value; } - void setValue(int value) { m_value = value; } + explicit ei_variable_if_dynamic(T value) : m_value(value) {} + T value() const { return m_value; } + void setValue(T value) { m_value = value; } }; template struct ei_functor_traits diff --git a/Eigen/src/Eigen2Support/Block.h b/Eigen/src/Eigen2Support/Block.h index eb17a27ab..c5e1a9cca 100644 --- a/Eigen/src/Eigen2Support/Block.h +++ b/Eigen/src/Eigen2Support/Block.h @@ -40,11 +40,11 @@ * when it is applied to a fixed-size matrix, it inherits a fixed maximal size, * which means that evaluating it does not cause a dynamic memory allocation. * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template inline Block DenseBase - ::corner(CornerType type, int cRows, int cCols) + ::corner(CornerType type, Index cRows, Index cCols) { switch(type) { @@ -61,10 +61,10 @@ inline Block DenseBase } } -/** This is the const version of corner(CornerType, int, int).*/ +/** This is the const version of corner(CornerType, Index, Index).*/ template inline const Block -DenseBase::corner(CornerType type, int cRows, int cCols) const +DenseBase::corner(CornerType type, Index cRows, Index cCols) const { switch(type) { @@ -91,7 +91,7 @@ DenseBase::corner(CornerType type, int cRows, int cCols) const * Example: \include MatrixBase_template_int_int_corner_enum.cpp * Output: \verbinclude MatrixBase_template_int_int_corner_enum.out * - * \sa class Block, block(int,int,int,int) + * \sa class Block, block(Index,Index,Index,Index) */ template template diff --git a/Eigen/src/Eigen2Support/Minor.h b/Eigen/src/Eigen2Support/Minor.h index e7e164a16..3bf913b44 100644 --- a/Eigen/src/Eigen2Support/Minor.h +++ b/Eigen/src/Eigen2Support/Minor.h @@ -44,6 +44,7 @@ struct ei_traits > { typedef typename ei_nested::type MatrixTypeNested; typedef typename ei_unref::type _MatrixTypeNested; + typedef typename MatrixType::StorageKind StorageKind; enum { RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ? int(MatrixType::RowsAtCompileTime) - 1 : Dynamic, @@ -68,7 +69,7 @@ template class Minor EIGEN_DENSE_PUBLIC_INTERFACE(Minor) inline Minor(const MatrixType& matrix, - int row, int col) + Index row, Index col) : m_matrix(matrix), m_row(row), m_col(col) { ei_assert(row >= 0 && row < matrix.rows() @@ -77,22 +78,22 @@ template class Minor EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor) - inline int rows() const { return m_matrix.rows() - 1; } - inline int cols() const { return m_matrix.cols() - 1; } + inline Index rows() const { return m_matrix.rows() - 1; } + inline Index cols() const { return m_matrix.cols() - 1; } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col)); } - inline const Scalar coeff(int row, int col) const + inline const Scalar coeff(Index row, Index col) const { return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col)); } protected: const typename MatrixType::Nested m_matrix; - const int m_row, m_col; + const Index m_row, m_col; }; /** \nonstableyet @@ -107,7 +108,7 @@ template class Minor */ template inline Minor -MatrixBase::minor(int row, int col) +MatrixBase::minor(Index row, Index col) { return Minor(derived(), row, col); } @@ -116,7 +117,7 @@ MatrixBase::minor(int row, int col) * This is the const version of minor(). */ template inline const Minor -MatrixBase::minor(int row, int col) const +MatrixBase::minor(Index row, Index col) const { return Minor(derived(), row, col); } diff --git a/Eigen/src/Eigen2Support/VectorBlock.h b/Eigen/src/Eigen2Support/VectorBlock.h index 4ddc42554..c6ac4340a 100644 --- a/Eigen/src/Eigen2Support/VectorBlock.h +++ b/Eigen/src/Eigen2Support/VectorBlock.h @@ -26,37 +26,37 @@ #ifndef EIGEN_VECTORBLOCK2_H #define EIGEN_VECTORBLOCK2_H -/** \deprecated use DenseMase::head(int) */ +/** \deprecated use DenseMase::head(Index) */ template inline VectorBlock -MatrixBase::start(int size) +MatrixBase::start(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), 0, size); } -/** \deprecated use DenseMase::head(int) */ +/** \deprecated use DenseMase::head(Index) */ template inline const VectorBlock -MatrixBase::start(int size) const +MatrixBase::start(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), 0, size); } -/** \deprecated use DenseMase::tail(int) */ +/** \deprecated use DenseMase::tail(Index) */ template inline VectorBlock -MatrixBase::end(int size) +MatrixBase::end(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), this->size() - size, size); } -/** \deprecated use DenseMase::tail(int) */ +/** \deprecated use DenseMase::tail(Index) */ template inline const VectorBlock -MatrixBase::end(int size) const +MatrixBase::end(Index size) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return VectorBlock(derived(), this->size() - size, size); diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h index 5f956e49d..f56815c15 100644 --- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h +++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h @@ -68,6 +68,7 @@ template class ComplexEigenSolver /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; /** \brief Complex scalar type for \p _MatrixType. * @@ -110,7 +111,7 @@ template class ComplexEigenSolver * according to the specified problem \a size. * \sa ComplexEigenSolver() */ - ComplexEigenSolver(int size) + ComplexEigenSolver(Index size) : m_eivec(size, size), m_eivalues(size), m_schur(size), @@ -216,7 +217,7 @@ void ComplexEigenSolver::compute(const MatrixType& matrix) { // this code is inspired from Jampack assert(matrix.cols() == matrix.rows()); - const int n = matrix.cols(); + const Index n = matrix.cols(); const RealScalar matrixnorm = matrix.norm(); // Step 1: Do a complex Schur decomposition, A = U T U^* @@ -227,11 +228,11 @@ void ComplexEigenSolver::compute(const MatrixType& matrix) // Step 2: Compute X such that T = X D X^(-1), where D is the diagonal of T. // The matrix X is unit triangular. m_matX = EigenvectorType::Zero(n, n); - for(int k=n-1 ; k>=0 ; k--) + for(Index k=n-1 ; k>=0 ; k--) { m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0); // Compute X(i,k) using the (i,k) entry of the equation X T = D X - for(int i=k-1 ; i>=0 ; i--) + for(Index i=k-1 ; i>=0 ; i--) { m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k); if(k-i-1>0) @@ -250,16 +251,16 @@ void ComplexEigenSolver::compute(const MatrixType& matrix) // Step 3: Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1) m_eivec.noalias() = m_schur.matrixU() * m_matX; // .. and normalize the eigenvectors - for(int k=0 ; k class ComplexSchur /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; /** \brief Complex scalar type for \p _MatrixType. * @@ -100,7 +100,7 @@ template class ComplexSchur * * \sa compute() for an example. */ - ComplexSchur(int size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) + ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : m_matT(size,size), m_matU(size,size), m_hess(size), @@ -197,8 +197,8 @@ template class ComplexSchur bool m_matUisUptodate; private: - bool subdiagonalEntryIsNeglegible(int i); - ComplexScalar computeShift(int iu, int iter); + bool subdiagonalEntryIsNeglegible(Index i); + ComplexScalar computeShift(Index iu, Index iter); void reduceToTriangularForm(bool skipU); friend struct ei_complex_schur_reduce_to_hessenberg::IsComplex>; }; @@ -244,7 +244,7 @@ std::complex ei_sqrt(const std::complex &z) * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and * return true, else return false. */ template -inline bool ComplexSchur::subdiagonalEntryIsNeglegible(int i) +inline bool ComplexSchur::subdiagonalEntryIsNeglegible(Index i) { RealScalar d = ei_norm1(m_matT.coeff(i,i)) + ei_norm1(m_matT.coeff(i+1,i+1)); RealScalar sd = ei_norm1(m_matT.coeff(i+1,i)); @@ -259,7 +259,7 @@ inline bool ComplexSchur::subdiagonalEntryIsNeglegible(int i) /** Compute the shift in the current QR iteration. */ template -typename ComplexSchur::ComplexScalar ComplexSchur::computeShift(int iu, int iter) +typename ComplexSchur::ComplexScalar ComplexSchur::computeShift(Index iu, Index iter) { if (iter == 10 || iter == 20) { @@ -356,9 +356,9 @@ void ComplexSchur::reduceToTriangularForm(bool skipU) // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. // Rows il,...,iu is the part we are working on (the active submatrix). // Rows iu+1,...,end are already brought in triangular form. - int iu = m_matT.cols() - 1; - int il; - int iter = 0; // number of iterations we are working on the (iu,iu) element + Index iu = m_matT.cols() - 1; + Index il; + Index iter = 0; // number of iterations we are working on the (iu,iu) element while(true) { @@ -395,7 +395,7 @@ void ComplexSchur::reduceToTriangularForm(bool skipU) m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot); if(!skipU) m_matU.applyOnTheRight(il, il+1, rot); - for(int i=il+1 ; i class EigenSolver /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; /** \brief Complex scalar type for \p _MatrixType. * @@ -128,7 +129,7 @@ template class EigenSolver * according to the specified problem \a size. * \sa EigenSolver() */ - EigenSolver(int size) + EigenSolver(Index size) : m_eivec(size, size), m_eivalues(size), m_isInitialized(false), @@ -285,9 +286,9 @@ template MatrixType EigenSolver::pseudoEigenvalueMatrix() const { ei_assert(m_isInitialized && "EigenSolver is not initialized."); - int n = m_eivec.cols(); + Index n = m_eivec.cols(); MatrixType matD = MatrixType::Zero(n,n); - for (int i=0; i typename EigenSolver::EigenvectorsType EigenSolver::eigenvectors() const { ei_assert(m_isInitialized && "EigenSolver is not initialized."); - int n = m_eivec.cols(); + Index n = m_eivec.cols(); EigenvectorsType matV(n,n); - for (int j=0; j::EigenvectorsType EigenSolver::eige else { // we have a pair of complex eigen values - for (int i=0; i& EigenSolver::compute(const MatrixType& matr // Compute eigenvalues from matT m_eivalues.resize(matrix.cols()); - int i = 0; + Index i = 0; while (i < matrix.cols()) { if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) @@ -390,14 +391,14 @@ std::complex cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi) template void EigenSolver::computeEigenvectors() { - const int size = m_eivec.cols(); + const Index size = m_eivec.cols(); const Scalar eps = NumTraits::epsilon(); // inefficient! this is already computed in RealSchur Scalar norm = 0.0; - for (int j = 0; j < size; ++j) + for (Index j = 0; j < size; ++j) { - norm += m_matT.row(j).segment(std::max(j-1,0), size-std::max(j-1,0)).cwiseAbs().sum(); + norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum(); } // Backsubstitute to find vectors of upper triangular form @@ -406,7 +407,7 @@ void EigenSolver::computeEigenvectors() return; } - for (int n = size-1; n >= 0; n--) + for (Index n = size-1; n >= 0; n--) { Scalar p = m_eivalues.coeff(n).real(); Scalar q = m_eivalues.coeff(n).imag(); @@ -415,10 +416,10 @@ void EigenSolver::computeEigenvectors() if (q == 0) { Scalar lastr=0, lastw=0; - int l = n; + Index l = n; m_matT.coeffRef(n,n) = 1.0; - for (int i = n-1; i >= 0; i--) + for (Index i = n-1; i >= 0; i--) { Scalar w = m_matT.coeff(i,i) - p; Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); @@ -461,7 +462,7 @@ void EigenSolver::computeEigenvectors() else if (q < 0) // Complex vector { Scalar lastra=0, lastsa=0, lastw=0; - int l = n-1; + Index l = n-1; // Last vector component imaginary so matrix is triangular if (ei_abs(m_matT.coeff(n,n-1)) > ei_abs(m_matT.coeff(n-1,n))) @@ -477,7 +478,7 @@ void EigenSolver::computeEigenvectors() } m_matT.coeffRef(n,n-1) = 0.0; m_matT.coeffRef(n,n) = 1.0; - for (int i = n-2; i >= 0; i--) + for (Index i = n-2; i >= 0; i--) { Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1)); Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); @@ -535,7 +536,7 @@ void EigenSolver::computeEigenvectors() } // Back transformation to get eigenvectors of original matrix - for (int j = size-1; j >= 0; j--) + for (Index j = size-1; j >= 0; j--) { m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1); m_eivec.col(j) = m_tmp; diff --git a/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/Eigen/src/Eigenvalues/HessenbergDecomposition.h index 7a80aed99..1111ffb12 100644 --- a/Eigen/src/Eigenvalues/HessenbergDecomposition.h +++ b/Eigen/src/Eigenvalues/HessenbergDecomposition.h @@ -81,6 +81,7 @@ template class HessenbergDecomposition /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; /** \brief Type for vector of Householder coefficients. * @@ -104,7 +105,7 @@ template class HessenbergDecomposition * * \sa compute() for an example. */ - HessenbergDecomposition(int size = Size==Dynamic ? 2 : Size) + HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size) : m_matrix(size,size), m_temp(size) { @@ -276,12 +277,12 @@ template void HessenbergDecomposition::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp) { assert(matA.rows()==matA.cols()); - int n = matA.rows(); + Index n = matA.rows(); temp.resize(n); - for (int i = 0; i::_compute(MatrixType& matA, CoeffVector template struct HessenbergDecompositionMatrixHReturnType : public ReturnByValue > { + typedef typename MatrixType::Index Index; public: /** \brief Constructor. * @@ -337,13 +339,13 @@ template struct HessenbergDecompositionMatrixHReturnType inline void evalTo(ResultType& result) const { result = m_hess.packedMatrix(); - int n = result.rows(); + Index n = result.rows(); if (n>2) result.bottomLeftCorner(n-2, n-2).template triangularView().setZero(); } - int rows() const { return m_hess.packedMatrix().rows(); } - int cols() const { return m_hess.packedMatrix().cols(); } + Index rows() const { return m_hess.packedMatrix().rows(); } + Index cols() const { return m_hess.packedMatrix().cols(); } protected: const HessenbergDecomposition& m_hess; diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h index f9d49c6b7..92ff448ed 100644 --- a/Eigen/src/Eigenvalues/RealSchur.h +++ b/Eigen/src/Eigenvalues/RealSchur.h @@ -77,6 +77,8 @@ template class RealSchur }; typedef typename MatrixType::Scalar Scalar; typedef std::complex::Real> ComplexScalar; + typedef typename MatrixType::Index Index; + typedef Matrix EigenvalueType; typedef Matrix ColumnVectorType; @@ -91,7 +93,7 @@ template class RealSchur * * \sa compute() for an example. */ - RealSchur(int size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) + RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : m_matT(size, size), m_matU(size, size), m_workspaceVector(size), @@ -177,11 +179,11 @@ template class RealSchur typedef Matrix Vector3s; Scalar computeNormOfT(); - int findSmallSubdiagEntry(int iu, Scalar norm); - void splitOffTwoRows(int iu, Scalar exshift); - void computeShift(int iu, int iter, Scalar& exshift, Vector3s& shiftInfo); - void initFrancisQRStep(int il, int iu, const Vector3s& shiftInfo, int& im, Vector3s& firstHouseholderVector); - void performFrancisQRStep(int il, int im, int iu, const Vector3s& firstHouseholderVector, Scalar* workspace); + Index findSmallSubdiagEntry(Index iu, Scalar norm); + void splitOffTwoRows(Index iu, Scalar exshift); + void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo); + void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector); + void performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace); }; @@ -204,14 +206,14 @@ void RealSchur::compute(const MatrixType& matrix) // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. // Rows il,...,iu is the part we are working on (the active window). // Rows iu+1,...,end are already brought in triangular form. - int iu = m_matU.cols() - 1; - int iter = 0; // iteration count + Index iu = m_matU.cols() - 1; + Index iter = 0; // iteration count Scalar exshift = 0.0; // sum of exceptional shifts Scalar norm = computeNormOfT(); while (iu >= 0) { - int il = findSmallSubdiagEntry(iu, norm); + Index il = findSmallSubdiagEntry(iu, norm); // Check for convergence if (il == iu) // One root found @@ -233,7 +235,7 @@ void RealSchur::compute(const MatrixType& matrix) Vector3s firstHouseholderVector, shiftInfo; computeShift(iu, iter, exshift, shiftInfo); iter = iter + 1; // (Could check iteration count here.) - int im; + Index im; initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector); performFrancisQRStep(il, im, iu, firstHouseholderVector, workspace); } @@ -246,21 +248,21 @@ void RealSchur::compute(const MatrixType& matrix) template inline typename MatrixType::Scalar RealSchur::computeNormOfT() { - const int size = m_matU.cols(); + const Index size = m_matU.cols(); // FIXME to be efficient the following would requires a triangular reduxion code // Scalar norm = m_matT.upper().cwiseAbs().sum() // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum(); Scalar norm = 0.0; - for (int j = 0; j < size; ++j) - norm += m_matT.row(j).segment(std::max(j-1,0), size-std::max(j-1,0)).cwiseAbs().sum(); + for (Index j = 0; j < size; ++j) + norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum(); return norm; } /** \internal Look for single small sub-diagonal element and returns its index */ template -inline int RealSchur::findSmallSubdiagEntry(int iu, Scalar norm) +inline typename MatrixType::Index RealSchur::findSmallSubdiagEntry(Index iu, Scalar norm) { - int res = iu; + Index res = iu; while (res > 0) { Scalar s = ei_abs(m_matT.coeff(res-1,res-1)) + ei_abs(m_matT.coeff(res,res)); @@ -275,9 +277,9 @@ inline int RealSchur::findSmallSubdiagEntry(int iu, Scalar norm) /** \internal Update T given that rows iu-1 and iu decouple from the rest. */ template -inline void RealSchur::splitOffTwoRows(int iu, Scalar exshift) +inline void RealSchur::splitOffTwoRows(Index iu, Scalar exshift) { - const int size = m_matU.cols(); + const Index size = m_matU.cols(); // The eigenvalues of the 2x2 matrix [a b; c d] are // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc @@ -307,7 +309,7 @@ inline void RealSchur::splitOffTwoRows(int iu, Scalar exshift) /** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */ template -inline void RealSchur::computeShift(int iu, int iter, Scalar& exshift, Vector3s& shiftInfo) +inline void RealSchur::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo) { shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu); shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1); @@ -317,7 +319,7 @@ inline void RealSchur::computeShift(int iu, int iter, Scalar& exshif if (iter == 10) { exshift += shiftInfo.coeff(0); - for (int i = 0; i <= iu; ++i) + for (Index i = 0; i <= iu; ++i) m_matT.coeffRef(i,i) -= shiftInfo.coeff(0); Scalar s = ei_abs(m_matT.coeff(iu,iu-1)) + ei_abs(m_matT.coeff(iu-1,iu-2)); shiftInfo.coeffRef(0) = Scalar(0.75) * s; @@ -338,7 +340,7 @@ inline void RealSchur::computeShift(int iu, int iter, Scalar& exshif s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s; exshift += s; - for (int i = 0; i <= iu; ++i) + for (Index i = 0; i <= iu; ++i) m_matT.coeffRef(i,i) -= s; shiftInfo.setConstant(Scalar(0.964)); } @@ -347,7 +349,7 @@ inline void RealSchur::computeShift(int iu, int iter, Scalar& exshif /** \internal Compute index im at which Francis QR step starts and the first Householder vector. */ template -inline void RealSchur::initFrancisQRStep(int il, int iu, const Vector3s& shiftInfo, int& im, Vector3s& firstHouseholderVector) +inline void RealSchur::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector) { Vector3s& v = firstHouseholderVector; // alias to save typing @@ -373,14 +375,14 @@ inline void RealSchur::initFrancisQRStep(int il, int iu, const Vecto /** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */ template -inline void RealSchur::performFrancisQRStep(int il, int im, int iu, const Vector3s& firstHouseholderVector, Scalar* workspace) +inline void RealSchur::performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace) { assert(im >= il); assert(im <= iu-2); - const int size = m_matU.cols(); + const Index size = m_matU.cols(); - for (int k = im; k <= iu-2; ++k) + for (Index k = im; k <= iu-2; ++k) { bool firstIteration = (k == im); @@ -422,7 +424,7 @@ inline void RealSchur::performFrancisQRStep(int il, int im, int iu, } // clean up pollution due to round-off errors - for (int i = im+2; i <= iu; ++i) + for (Index i = im+2; i <= iu; ++i) { m_matT.coeffRef(i,i-2) = Scalar(0); if (i > im+2) diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index 76343640d..2c53655d1 100644 --- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -82,6 +82,7 @@ template class SelfAdjointEigenSolver /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; /** \brief Real scalar type for \p _MatrixType. * @@ -105,7 +106,7 @@ template class SelfAdjointEigenSolver * perform decompositions via compute(const MatrixType&, bool) or * compute(const MatrixType&, const MatrixType&, bool). This constructor * can only be used if \p _MatrixType is a fixed-size matrix; use - * SelfAdjointEigenSolver(int) for dynamic-size matrices. + * SelfAdjointEigenSolver(Index) for dynamic-size matrices. * * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out @@ -132,7 +133,7 @@ template class SelfAdjointEigenSolver * * \sa compute(const MatrixType&, bool) for an example */ - SelfAdjointEigenSolver(int size) + SelfAdjointEigenSolver(Index size) : m_eivec(size, size), m_eivalues(size), m_tridiag(size), @@ -379,8 +380,8 @@ template class SelfAdjointEigenSolver * Implemented from Golub's "Matrix Computations", algorithm 8.3.2: * "implicit symmetric QR step with Wilkinson shift" */ -template -static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n); +template +static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); template SelfAdjointEigenSolver& SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) @@ -389,7 +390,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver::compute( m_eigenvectorsOk = computeEigenvectors; #endif assert(matrix.cols() == matrix.rows()); - int n = matrix.cols(); + Index n = matrix.cols(); m_eivalues.resize(n,1); m_eivec.resize(n,n); @@ -407,11 +408,11 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver::compute( if (computeEigenvectors) m_eivec = m_tridiag.matrixQ(); - int end = n-1; - int start = 0; + Index end = n-1; + Index start = 0; while (end>0) { - for (int i = start; i& SelfAdjointEigenSolver::compute( // Sort eigenvalues and corresponding vectors. // TODO make the sort optional ? // TODO use a better sort algorithm !! - for (int i = 0; i < n-1; ++i) + for (Index i = 0; i < n-1; ++i) { - int k; + Index k; m_eivalues.segment(i,n-i).minCoeff(&k); if (k > 0) { @@ -473,7 +474,7 @@ compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors { // transform back the eigen vectors: evecs = inv(U) * evecs cholB.matrixU().solveInPlace(m_eivec); - for (int i=0; i -static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n) +template +static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) { RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); RealScalar e2 = ei_abs2(subdiag[end-1]); @@ -491,7 +492,7 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int st RealScalar x = diag[start] - mu; RealScalar z = subdiag[start]; - for (int k = start; k < end; ++k) + for (Index k = start; k < end; ++k) { PlanarRotation rot; rot.makeGivens(x, z); diff --git a/Eigen/src/Eigenvalues/Tridiagonalization.h b/Eigen/src/Eigenvalues/Tridiagonalization.h index 6ea852a6b..02917f2e6 100644 --- a/Eigen/src/Eigenvalues/Tridiagonalization.h +++ b/Eigen/src/Eigenvalues/Tridiagonalization.h @@ -67,6 +67,7 @@ template class Tridiagonalization typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; enum { Size = MatrixType::RowsAtCompileTime, @@ -107,7 +108,7 @@ template class Tridiagonalization * * \sa compute() for an example. */ - Tridiagonalization(int size = Size==Dynamic ? 2 : Size) + Tridiagonalization(Index size = Size==Dynamic ? 2 : Size) : m_matrix(size,size), m_hCoeffs(size > 1 ? size-1 : 1) {} @@ -324,7 +325,7 @@ template const typename Tridiagonalization::SubDiagonalReturnType Tridiagonalization::subDiagonal() const { - int n = m_matrix.rows(); + Index n = m_matrix.rows(); return Block(m_matrix, 1, 0, n-1,n-1).diagonal(); } @@ -334,7 +335,7 @@ Tridiagonalization::matrixT() const { // FIXME should this function (and other similar ones) rather take a matrix as argument // and fill it ? (to avoid temporaries) - int n = m_matrix.rows(); + Index n = m_matrix.rows(); MatrixType matT = m_matrix; matT.topRightCorner(n-1, n-1).diagonal() = subDiagonal().template cast().conjugate(); if (n>2) @@ -363,10 +364,10 @@ template void Tridiagonalization::_compute(MatrixType& matA, CoeffVectorType& hCoeffs) { assert(matA.rows()==matA.cols()); - int n = matA.rows(); - for (int i = 0; i::_compute(MatrixType& matA, CoeffVectorType& template void Tridiagonalization::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { - int n = mat.rows(); + Index n = mat.rows(); ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1); if (n==3 && (!NumTraits::IsComplex) ) { diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h index f3bee6f7d..9d9d96f62 100644 --- a/Eigen/src/Geometry/AlignedBox.h +++ b/Eigen/src/Geometry/AlignedBox.h @@ -45,6 +45,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim }; typedef _Scalar Scalar; typedef NumTraits ScalarTraits; + typedef DenseIndex Index; typedef typename ScalarTraits::Real RealScalar; typedef typename ScalarTraits::NonInteger NonInteger; typedef Matrix VectorType; @@ -72,7 +73,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); } /** Constructs a null box with \a _dim the dimension of the ambient space. */ - inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim) + inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim) { setEmpty(); } /** Constructs a box with extremities \a _min and \a _max. */ @@ -91,7 +92,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) ~AlignedBox() {} /** \returns the dimension in which the box holds */ - inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; } + inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : Index(AmbientDimAtCompileTime); } /** \deprecated use isEmpty */ inline bool isNull() const { return isEmpty(); } @@ -157,8 +158,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) VectorType res; - int mult = 1; - for(int d=0; d::squaredExteriorDistance(const Matri const typename ei_nested::type p(a_p.derived()); Scalar dist2 = 0.; Scalar aux; - for (int k=0; k p[k] ) { @@ -332,7 +333,7 @@ inline Scalar AlignedBox::squaredExteriorDistance(const Align { Scalar dist2 = 0.; Scalar aux; - for (int k=0; k b.m_max[k] ) { diff --git a/Eigen/src/Geometry/EulerAngles.h b/Eigen/src/Geometry/EulerAngles.h index 13d23761a..d910cbc92 100644 --- a/Eigen/src/Geometry/EulerAngles.h +++ b/Eigen/src/Geometry/EulerAngles.h @@ -43,7 +43,7 @@ */ template inline Matrix::Scalar,3,1> -MatrixBase::eulerAngles(int a0, int a1, int a2) const +MatrixBase::eulerAngles(Index a0, Index a1, Index a2) const { /* Implemented from Graphics Gems IV */ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) @@ -52,10 +52,10 @@ MatrixBase::eulerAngles(int a0, int a1, int a2) const typedef Matrix Vector2; const Scalar epsilon = NumTraits::dummy_precision(); - const int odd = ((a0+1)%3 == a1) ? 0 : 1; - const int i = a0; - const int j = (a0 + 1 + odd)%3; - const int k = (a0 + 2 - odd)%3; + const Index odd = ((a0+1)%3 == a1) ? 0 : 1; + const Index i = a0; + const Index j = (a0 + 1 + odd)%3; + const Index k = (a0 + 2 - odd)%3; if (a0==a2) { diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h index caea1db41..3077f0921 100644 --- a/Eigen/src/Geometry/Homogeneous.h +++ b/Eigen/src/Geometry/Homogeneous.h @@ -77,10 +77,10 @@ template class Homogeneous : m_matrix(matrix) {} - inline int rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } - inline int cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } + inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } + inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { if( (int(Direction)==Vertical && row==m_matrix.rows()) || (int(Direction)==Horizontal && col==m_matrix.cols())) @@ -223,12 +223,13 @@ struct ei_homogeneous_left_product_impl,Lhs> : public ReturnByValue,Lhs> > { typedef typename ei_cleantype::type LhsNested; + typedef typename MatrixType::Index Index; ei_homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) : m_lhs(lhs), m_rhs(rhs) {} - inline int rows() const { return m_lhs.rows(); } - inline int cols() const { return m_rhs.cols(); } + inline Index rows() const { return m_lhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } template void evalTo(Dest& dst) const { @@ -261,12 +262,13 @@ struct ei_homogeneous_right_product_impl,Rhs> : public ReturnByValue,Rhs> > { typedef typename ei_cleantype::type RhsNested; + typedef typename MatrixType::Index Index; ei_homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {} - inline int rows() const { return m_lhs.rows(); } - inline int cols() const { return m_rhs.cols(); } + inline Index rows() const { return m_lhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } template void evalTo(Dest& dst) const { diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h index 1d0b299ba..8450c9d26 100644 --- a/Eigen/src/Geometry/Hyperplane.h +++ b/Eigen/src/Geometry/Hyperplane.h @@ -51,10 +51,11 @@ public: enum { AmbientDimAtCompileTime = _AmbientDim }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef DenseIndex Index; typedef Matrix VectorType; - typedef Matrix Coefficients; + : Index(AmbientDimAtCompileTime)+1,1> Coefficients; typedef Block NormalReturnType; /** Default constructor without initialization */ @@ -62,7 +63,7 @@ public: /** Constructs a dynamic-size hyperplane with \a _dim the dimension * of the ambient space */ - inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {} + inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {} /** Construct a plane from its normal \a n and a point \a e onto the plane. * \warning the vector normal is assumed to be normalized. @@ -122,7 +123,7 @@ public: ~Hyperplane() {} /** \returns the dimension in which the plane holds */ - inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : AmbientDimAtCompileTime; } + inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); } /** normalizes \c *this */ void normalize(void) diff --git a/Eigen/src/Geometry/OrthoMethods.h b/Eigen/src/Geometry/OrthoMethods.h index 265507eb9..ed790cc05 100644 --- a/Eigen/src/Geometry/OrthoMethods.h +++ b/Eigen/src/Geometry/OrthoMethods.h @@ -137,12 +137,13 @@ struct ei_unitOrthogonal_selector typedef typename ei_plain_matrix_type::type VectorType; typedef typename ei_traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename Derived::Index Index; typedef Matrix Vector2; inline static VectorType run(const Derived& src) { VectorType perp = VectorType::Zero(src.size()); - int maxi = 0; - int sndi = 0; + Index maxi = 0; + Index sndi = 0; src.cwiseAbs().maxCoeff(&maxi); if (maxi==0) sndi = 1; diff --git a/Eigen/src/Geometry/ParametrizedLine.h b/Eigen/src/Geometry/ParametrizedLine.h index 1846a440a..45c23385d 100644 --- a/Eigen/src/Geometry/ParametrizedLine.h +++ b/Eigen/src/Geometry/ParametrizedLine.h @@ -47,6 +47,7 @@ public: enum { AmbientDimAtCompileTime = _AmbientDim }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef DenseIndex Index; typedef Matrix VectorType; /** Default constructor without initialization */ @@ -54,7 +55,7 @@ public: /** Constructs a dynamic-size line with \a _dim the dimension * of the ambient space */ - inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {} + inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {} /** Initializes a parametrized line of direction \a direction and origin \a origin. * \warning the vector direction is assumed to be normalized. @@ -71,7 +72,7 @@ public: ~ParametrizedLine() {} /** \returns the dimension in which the line holds */ - inline int dim() const { return m_direction.size(); } + inline Index dim() const { return m_direction.size(); } const VectorType& origin() const { return m_origin; } VectorType& origin() { return m_origin; } diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h index 4e054d98a..7d52ebf71 100644 --- a/Eigen/src/Geometry/Quaternion.h +++ b/Eigen/src/Geometry/Quaternion.h @@ -617,6 +617,7 @@ template struct ei_quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; + typedef DenseIndex Index; template inline static void run(QuaternionBase& q, const Other& mat) { // This algorithm comes from "Quaternion Calculus and Fast Animation", @@ -633,13 +634,13 @@ struct ei_quaternionbase_assign_impl } else { - int i = 0; + DenseIndex i = 0; if (mat.coeff(1,1) > mat.coeff(0,0)) i = 1; if (mat.coeff(2,2) > mat.coeff(i,i)) i = 2; - int j = (i+1)%3; - int k = (j+1)%3; + DenseIndex j = (i+1)%3; + DenseIndex k = (j+1)%3; t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); q.coeffs().coeffRef(i) = Scalar(0.5) * t; diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index 6a7bb9ac9..207497fc9 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -174,6 +174,7 @@ public: }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; + typedef DenseIndex Index; /** type of the matrix used to represent the transformation */ typedef Matrix MatrixType; /** type of the matrix used to represent the linear part of the transformation */ @@ -270,11 +271,11 @@ public: #endif /** shortcut for m_matrix(row,col); - * \sa MatrixBase::operaror(int,int) const */ - inline Scalar operator() (int row, int col) const { return m_matrix(row,col); } + * \sa MatrixBase::operaror(Index,Index) const */ + inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); } /** shortcut for m_matrix(row,col); - * \sa MatrixBase::operaror(int,int) */ - inline Scalar& operator() (int row, int col) { return m_matrix(row,col); } + * \sa MatrixBase::operaror(Index,Index) */ + inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); } /** \returns a read-only expression of the transformation matrix */ inline const MatrixType& matrix() const { return m_matrix; } @@ -1141,7 +1142,7 @@ struct ei_transform_right_product_impl static ResultType run(const TransformType& tr, const Other& other) { TransformType res; - const int Rows = Mode==Projective ? HDim : Dim; + enum { Rows = Mode==Projective ? HDim : Dim }; res.matrix().template block(0,0).noalias() = (tr.linearExt() * other); res.translationExt() += tr.translationExt(); if(Mode!=Affine) diff --git a/Eigen/src/Geometry/Umeyama.h b/Eigen/src/Geometry/Umeyama.h index 262d27aa3..5b9fd7725 100644 --- a/Eigen/src/Geometry/Umeyama.h +++ b/Eigen/src/Geometry/Umeyama.h @@ -109,6 +109,7 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo typedef typename ei_umeyama_transform_matrix_type::type TransformationMatrixType; typedef typename ei_traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename Derived::Index Index; EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT((ei_is_same_type::Scalar>::ret), @@ -120,8 +121,8 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo typedef Matrix MatrixType; typedef typename ei_plain_matrix_type_row_major::type RowMajorMatrixType; - const int m = src.rows(); // dimension - const int n = src.cols(); // number of measurements + const Index m = src.rows(); // dimension + const Index n = src.cols(); // number of measurements // required for demeaning ... const RealScalar one_over_n = 1 / static_cast(n); @@ -151,7 +152,7 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo // Eq. (40) and (43) const VectorType& d = svd.singularValues(); - int rank = 0; for (int i=0; i 0 ) { Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h index ab7702b14..90c5bf8a2 100644 --- a/Eigen/src/Householder/HouseholderSequence.h +++ b/Eigen/src/Householder/HouseholderSequence.h @@ -53,6 +53,7 @@ template struct ei_traits > { typedef typename VectorsType::Scalar Scalar; + typedef typename VectorsType::StorageKind StorageKind; enum { RowsAtCompileTime = Side==OnTheLeft ? ei_traits::RowsAtCompileTime : ei_traits::ColsAtCompileTime, @@ -69,9 +70,10 @@ struct ei_hseq_side_dependent_impl { typedef Block EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, int k) + typedef typename VectorsType::Index Index; + static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { - const int start = k+1+h.m_shift; + Index start = k+1+h.m_shift; return Block(h.m_vectors, start, k, h.rows()-start, 1); } }; @@ -81,9 +83,10 @@ struct ei_hseq_side_dependent_impl { typedef Transpose > EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, int k) + typedef typename VectorsType::Index Index; + static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { - const int start = k+1+h.m_shift; + Index start = k+1+h.m_shift; return Block(h.m_vectors, k, start, 1, h.rows()-start).transpose(); } }; @@ -106,6 +109,7 @@ template class HouseholderS MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime }; typedef typename ei_traits::Scalar Scalar; + typedef typename VectorsType::Index Index; typedef typename ei_hseq_side_dependent_impl::EssentialVectorType EssentialVectorType; @@ -126,15 +130,15 @@ template class HouseholderS { } - HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift) + HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, Index actualVectors, Index shift) : m_vectors(v), m_coeffs(h), m_trans(trans), m_actualVectors(actualVectors), m_shift(shift) { } - int rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } - int cols() const { return rows(); } + Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } + Index cols() const { return rows(); } - const EssentialVectorType essentialVector(int k) const + const EssentialVectorType essentialVector(Index k) const { ei_assert(k >= 0 && k < m_actualVectors); return ei_hseq_side_dependent_impl::essentialVector(*this, k); @@ -154,13 +158,13 @@ template class HouseholderS /** \internal */ template void evalTo(DestType& dst) const { - int vecs = m_actualVectors; + Index vecs = m_actualVectors; dst.setIdentity(rows(), rows()); Matrix temp(rows()); - for(int k = vecs-1; k >= 0; --k) + for(Index k = vecs-1; k >= 0; --k) { - int cornerSize = rows() - k - m_shift; + Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0)); @@ -174,9 +178,9 @@ template class HouseholderS template inline void applyThisOnTheRight(Dest& dst) const { Matrix temp(dst.rows()); - for(int k = 0; k < m_actualVectors; ++k) + for(Index k = 0; k < m_actualVectors; ++k) { - int actual_k = m_trans ? m_actualVectors-k-1 : k; + Index actual_k = m_trans ? m_actualVectors-k-1 : k; dst.rightCols(rows()-m_shift-actual_k) .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); } @@ -186,9 +190,9 @@ template class HouseholderS template inline void applyThisOnTheLeft(Dest& dst) const { Matrix temp(dst.cols()); - for(int k = 0; k < m_actualVectors; ++k) + for(Index k = 0; k < m_actualVectors; ++k) { - int actual_k = m_trans ? k : m_actualVectors-k-1; + Index actual_k = m_trans ? k : m_actualVectors-k-1; dst.bottomRows(rows()-m_shift-actual_k) .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); } @@ -218,8 +222,8 @@ template class HouseholderS typename VectorsType::Nested m_vectors; typename CoeffsType::Nested m_coeffs; bool m_trans; - int m_actualVectors; - int m_shift; + Index m_actualVectors; + Index m_shift; }; template @@ -229,7 +233,9 @@ HouseholderSequence householderSequence(const VectorsTyp } template -HouseholderSequence householderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift) +HouseholderSequence householderSequence + (const VectorsType& v, const CoeffsType& h, + bool trans, typename VectorsType::Index actualVectors, typename VectorsType::Index shift) { return HouseholderSequence(v, h, trans, actualVectors, shift); } @@ -241,7 +247,9 @@ HouseholderSequence rightHouseholderSequence(const Vecto } template -HouseholderSequence rightHouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift) +HouseholderSequence rightHouseholderSequence + (const VectorsType& v, const CoeffsType& h, bool trans, + typename VectorsType::Index actualVectors, typename VectorsType::Index shift) { return HouseholderSequence(v, h, trans, actualVectors, shift); } diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h index 024a130f2..f34e1836b 100644 --- a/Eigen/src/Jacobi/Jacobi.h +++ b/Eigen/src/Jacobi/Jacobi.h @@ -74,7 +74,7 @@ template class PlanarRotation PlanarRotation adjoint() const { return PlanarRotation(ei_conj(m_c), -m_s); } template - bool makeJacobi(const MatrixBase&, int p, int q); + bool makeJacobi(const MatrixBase&, typename Derived::Index p, typename Derived::Index q); bool makeJacobi(RealScalar x, Scalar y, RealScalar z); void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0); @@ -89,7 +89,7 @@ template class PlanarRotation /** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$ * - * \sa MatrixBase::makeJacobi(const MatrixBase&, int, int), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() + * \sa MatrixBase::makeJacobi(const MatrixBase&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template bool PlanarRotation::makeJacobi(RealScalar x, Scalar y, RealScalar z) @@ -133,7 +133,7 @@ bool PlanarRotation::makeJacobi(RealScalar x, Scalar y, RealScalar z) */ template template -inline bool PlanarRotation::makeJacobi(const MatrixBase& m, int p, int q) +inline bool PlanarRotation::makeJacobi(const MatrixBase& m, typename Derived::Index p, typename Derived::Index q) { return makeJacobi(ei_real(m.coeff(p,p)), m.coeff(p,q), ei_real(m.coeff(q,q))); } @@ -277,7 +277,7 @@ void ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotati */ template template -inline void MatrixBase::applyOnTheLeft(int p, int q, const PlanarRotation& j) +inline void MatrixBase::applyOnTheLeft(Index p, Index q, const PlanarRotation& j) { RowXpr x(this->row(p)); RowXpr y(this->row(q)); @@ -292,7 +292,7 @@ inline void MatrixBase::applyOnTheLeft(int p, int q, const PlanarRotati */ template template -inline void MatrixBase::applyOnTheRight(int p, int q, const PlanarRotation& j) +inline void MatrixBase::applyOnTheRight(Index p, Index q, const PlanarRotation& j) { ColXpr x(this->col(p)); ColXpr y(this->col(q)); @@ -303,11 +303,12 @@ inline void MatrixBase::applyOnTheRight(int p, int q, const PlanarRotat template void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotation& j) { + typedef typename VectorX::Index Index; typedef typename VectorX::Scalar Scalar; ei_assert(_x.size() == _y.size()); - int size = _x.size(); - int incrx = size ==1 ? 1 : &_x.coeffRef(1) - &_x.coeffRef(0); - int incry = size ==1 ? 1 : &_y.coeffRef(1) - &_y.coeffRef(0); + Index size = _x.size(); + Index incrx = size ==1 ? 1 : &_x.coeffRef(1) - &_x.coeffRef(0); + Index incry = size ==1 ? 1 : &_y.coeffRef(1) - &_y.coeffRef(0); Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0); Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0); @@ -318,14 +319,14 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& typedef typename ei_packet_traits::type Packet; enum { PacketSize = ei_packet_traits::size, Peeling = 2 }; - int alignedStart = ei_first_aligned(y, size); - int alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; + Index alignedStart = ei_first_aligned(y, size); + Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; const Packet pc = ei_pset1(Scalar(j.c())); const Packet ps = ei_pset1(Scalar(j.s())); ei_conj_helper::IsComplex,false> cj; - for(int i=0; i class FullPivLU }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename ei_plain_row_type::type IntRowVectorType; - typedef typename ei_plain_col_type::type IntColVectorType; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; + typedef typename ei_plain_row_type::type IntRowVectorType; + typedef typename ei_plain_col_type::type IntColVectorType; typedef PermutationMatrix PermutationQType; typedef PermutationMatrix PermutationPType; @@ -87,7 +89,7 @@ template class FullPivLU * according to the specified problem \a size. * \sa FullPivLU() */ - FullPivLU(int rows, int cols); + FullPivLU(Index rows, Index cols); /** Constructor. * @@ -124,7 +126,7 @@ template class FullPivLU * * \sa rank() */ - inline int nonzeroPivots() const + inline Index nonzeroPivots() const { ei_assert(m_isInitialized && "LU is not initialized."); return m_nonzero_pivots; @@ -301,12 +303,12 @@ template class FullPivLU * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ - inline int rank() const + inline Index rank() const { ei_assert(m_isInitialized && "LU is not initialized."); RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold(); - int result = 0; - for(int i = 0; i < m_nonzero_pivots; ++i) + Index result = 0; + for(Index i = 0; i < m_nonzero_pivots; ++i) result += (ei_abs(m_lu.coeff(i,i)) > premultiplied_threshold); return result; } @@ -317,7 +319,7 @@ template class FullPivLU * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ - inline int dimensionOfKernel() const + inline Index dimensionOfKernel() const { ei_assert(m_isInitialized && "LU is not initialized."); return cols() - rank(); @@ -378,8 +380,8 @@ template class FullPivLU MatrixType reconstructedMatrix() const; - inline int rows() const { return m_lu.rows(); } - inline int cols() const { return m_lu.cols(); } + inline Index rows() const { return m_lu.rows(); } + inline Index cols() const { return m_lu.cols(); } protected: MatrixType m_lu; @@ -387,7 +389,7 @@ template class FullPivLU PermutationQType m_q; IntColVectorType m_rowsTranspositions; IntRowVectorType m_colsTranspositions; - int m_det_pq, m_nonzero_pivots; + Index m_det_pq, m_nonzero_pivots; RealScalar m_maxpivot, m_prescribedThreshold; bool m_isInitialized, m_usePrescribedThreshold; }; @@ -399,7 +401,7 @@ FullPivLU::FullPivLU() } template -FullPivLU::FullPivLU(int rows, int cols) +FullPivLU::FullPivLU(Index rows, Index cols) : m_lu(rows, cols), m_p(rows), m_q(cols), @@ -429,26 +431,26 @@ FullPivLU& FullPivLU::compute(const MatrixType& matrix) m_isInitialized = true; m_lu = matrix; - const int size = matrix.diagonalSize(); - const int rows = matrix.rows(); - const int cols = matrix.cols(); + const Index size = matrix.diagonalSize(); + const Index rows = matrix.rows(); + const Index cols = matrix.cols(); // will store the transpositions, before we accumulate them at the end. // can't accumulate on-the-fly because that will be done in reverse order for the rows. m_rowsTranspositions.resize(matrix.rows()); m_colsTranspositions.resize(matrix.cols()); - int number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i + Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_maxpivot = RealScalar(0); RealScalar cutoff(0); - for(int k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) { // First, we need to find the pivot. // biggest coefficient in the remaining bottom-right corner (starting at row k, col k) - int row_of_biggest_in_corner, col_of_biggest_in_corner; + Index row_of_biggest_in_corner, col_of_biggest_in_corner; RealScalar biggest_in_corner; biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k) .cwiseAbs() @@ -468,7 +470,7 @@ FullPivLU& FullPivLU::compute(const MatrixType& matrix) // before exiting, make sure to initialize the still uninitialized transpositions // in a sane state without destroying what we already have. m_nonzero_pivots = k; - for(int i = k; i < size; ++i) + for(Index i = k; i < size; ++i) { m_rowsTranspositions.coeffRef(i) = i; m_colsTranspositions.coeffRef(i) = i; @@ -505,11 +507,11 @@ FullPivLU& FullPivLU::compute(const MatrixType& matrix) // permutations P and Q m_p.setIdentity(rows); - for(int k = size-1; k >= 0; --k) + for(Index k = size-1; k >= 0; --k) m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k)); m_q.setIdentity(cols); - for(int k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k)); m_det_pq = (number_of_transpositions%2) ? -1 : 1; @@ -531,7 +533,7 @@ template MatrixType FullPivLU::reconstructedMatrix() const { ei_assert(m_isInitialized && "LU is not initialized."); - const int smalldim = std::min(m_lu.rows(), m_lu.cols()); + const Index smalldim = std::min(m_lu.rows(), m_lu.cols()); // LU MatrixType res(m_lu.rows(),m_lu.cols()); // FIXME the .toDenseMatrix() should not be needed... @@ -564,7 +566,7 @@ struct ei_kernel_retval > template void evalTo(Dest& dst) const { - const int cols = dec().matrixLU().cols(), dimker = cols - rank(); + const Index cols = dec().matrixLU().cols(), dimker = cols - rank(); if(dimker == 0) { // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's @@ -590,10 +592,10 @@ struct ei_kernel_retval > * independent vectors in Ker U. */ - Matrix pivots(rank()); + Matrix pivots(rank()); RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); - int p = 0; - for(int i = 0; i < dec().nonzeroPivots(); ++i) + Index p = 0; + for(Index i = 0; i < dec().nonzeroPivots(); ++i) if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) pivots.coeffRef(p++) = i; ei_internal_assert(p == rank()); @@ -605,14 +607,14 @@ struct ei_kernel_retval > Matrix m(dec().matrixLU().block(0, 0, rank(), cols)); - for(int i = 0; i < rank(); ++i) + for(Index i = 0; i < rank(); ++i) { if(i) m.row(i).head(i).setZero(); m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i); } m.block(0, 0, rank(), rank()); m.block(0, 0, rank(), rank()).template triangularView().setZero(); - for(int i = 0; i < rank(); ++i) + for(Index i = 0; i < rank(); ++i) m.col(i).swap(m.col(pivots.coeff(i))); // ok, we have our trapezoid matrix, we can apply the triangular solver. @@ -624,13 +626,13 @@ struct ei_kernel_retval > ); // now we must undo the column permutation that we had applied! - for(int i = rank()-1; i >= 0; --i) + for(Index i = rank()-1; i >= 0; --i) m.col(i).swap(m.col(pivots.coeff(i))); // see the negative sign in the next line, that's what we were talking about above. - for(int i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker); - for(int i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); - for(int k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1); + for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker); + for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); + for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1); } }; @@ -658,15 +660,15 @@ struct ei_image_retval > return; } - Matrix pivots(rank()); + Matrix pivots(rank()); RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); - int p = 0; - for(int i = 0; i < dec().nonzeroPivots(); ++i) + Index p = 0; + for(Index i = 0; i < dec().nonzeroPivots(); ++i) if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) pivots.coeffRef(p++) = i; ei_internal_assert(p == rank()); - for(int i = 0; i < rank(); ++i) + for(Index i = 0; i < rank(); ++i) dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i))); } }; @@ -689,10 +691,10 @@ struct ei_solve_retval, Rhs> * Step 4: result = Q * c; */ - const int rows = dec().rows(), cols = dec().cols(), + const Index rows = dec().rows(), cols = dec().cols(), nonzero_pivots = dec().nonzeroPivots(); ei_assert(rhs().rows() == rows); - const int smalldim = std::min(rows, cols); + const Index smalldim = std::min(rows, cols); if(nonzero_pivots == 0) { @@ -724,9 +726,9 @@ struct ei_solve_retval, Rhs> .solveInPlace(c.topRows(nonzero_pivots)); // Step 4 - for(int i = 0; i < nonzero_pivots; ++i) + for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i); - for(int i = nonzero_pivots; i < dec().matrixLU().cols(); ++i) + for(Index i = nonzero_pivots; i < dec().matrixLU().cols(); ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); } }; diff --git a/Eigen/src/LU/Inverse.h b/Eigen/src/LU/Inverse.h index 1e9d69a22..ed1724dda 100644 --- a/Eigen/src/LU/Inverse.h +++ b/Eigen/src/LU/Inverse.h @@ -281,7 +281,8 @@ struct ei_traits > template struct ei_inverse_impl : public ReturnByValue > { - typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename MatrixType::Index Index; + typedef typename ei_eval::type MatrixTypeNested; typedef typename ei_cleantype::type MatrixTypeNestedCleaned; const MatrixTypeNested m_matrix; @@ -290,8 +291,8 @@ struct ei_inverse_impl : public ReturnByValue > : m_matrix(matrix) {} - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } template inline void evalTo(Dest& dst) const { diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index 695b7d75c..39c348e5e 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -71,7 +71,9 @@ template class PartialPivLU }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename ei_plain_col_type::type PermutationVectorType; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; + typedef typename ei_plain_col_type::type PermutationVectorType; typedef PermutationMatrix PermutationType; @@ -89,7 +91,7 @@ template class PartialPivLU * according to the specified problem \a size. * \sa PartialPivLU() */ - PartialPivLU(int size); + PartialPivLU(Index size); /** Constructor. * @@ -178,14 +180,14 @@ template class PartialPivLU MatrixType reconstructedMatrix() const; - inline int rows() const { return m_lu.rows(); } - inline int cols() const { return m_lu.cols(); } + inline Index rows() const { return m_lu.rows(); } + inline Index cols() const { return m_lu.cols(); } protected: MatrixType m_lu; PermutationType m_p; PermutationVectorType m_rowsTranspositions; - int m_det_p; + Index m_det_p; bool m_isInitialized; }; @@ -200,7 +202,7 @@ PartialPivLU::PartialPivLU() } template -PartialPivLU::PartialPivLU(int size) +PartialPivLU::PartialPivLU(Index size) : m_lu(size, size), m_p(size), m_rowsTranspositions(size), @@ -233,6 +235,7 @@ struct ei_partial_lu_impl typedef Block MatrixType; typedef Block BlockType; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; /** \internal performs the LU decomposition in-place of the matrix \a lu * using an unblocked algorithm. @@ -246,14 +249,14 @@ struct ei_partial_lu_impl * undefined coefficients (to avoid generating inf/nan values). Returns true * otherwise. */ - static bool unblocked_lu(MatrixType& lu, int* row_transpositions, int& nb_transpositions) + static bool unblocked_lu(MatrixType& lu, Index* row_transpositions, Index& nb_transpositions) { - const int rows = lu.rows(); - const int size = std::min(lu.rows(),lu.cols()); + const Index rows = lu.rows(); + const Index size = std::min(lu.rows(),lu.cols()); nb_transpositions = 0; - for(int k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) { - int row_of_biggest_in_col; + Index row_of_biggest_in_col; RealScalar biggest_in_corner = lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col); row_of_biggest_in_col += k; @@ -265,7 +268,7 @@ struct ei_partial_lu_impl // the blocked_lu code can't guarantee the same. // before exiting, make sure to initialize the still uninitialized row_transpositions // in a sane state without destroying what we already have. - for(int i = k; i < size; i++) + for(Index i = k; i < size; i++) row_transpositions[i] = i; return false; } @@ -280,8 +283,8 @@ struct ei_partial_lu_impl if(k > > */ - static bool blocked_lu(int rows, int cols, Scalar* lu_data, int luStride, int* row_transpositions, int& nb_transpositions, int maxBlockSize=256) + static bool blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, Index* row_transpositions, Index& nb_transpositions, Index maxBlockSize=256) { MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols); MatrixType lu(lu1,0,0,rows,cols); - const int size = std::min(rows,cols); + const Index size = std::min(rows,cols); // if the matrix is too small, no blocking: if(size<=16) @@ -321,19 +324,19 @@ struct ei_partial_lu_impl // automatically adjust the number of subdivisions to the size // of the matrix so that there is enough sub blocks: - int blockSize; + Index blockSize; { blockSize = size/8; blockSize = (blockSize/16)*16; - blockSize = std::min(std::max(blockSize,8), maxBlockSize); + blockSize = std::min(std::max(blockSize,Index(8)), maxBlockSize); } nb_transpositions = 0; - for(int k = 0; k < size; k+=blockSize) + for(Index k = 0; k < size; k+=blockSize) { - int bs = std::min(size-k,blockSize); // actual size of the block - int trows = rows - k - bs; // trailing rows - int tsize = size - k - bs; // trailing size + Index bs = std::min(size-k,blockSize); // actual size of the block + Index trows = rows - k - bs; // trailing rows + Index tsize = size - k - bs; // trailing size // partition the matrix: // A00 | A01 | A02 @@ -346,7 +349,7 @@ struct ei_partial_lu_impl BlockType A21(lu,k+bs,k,trows,bs); BlockType A22(lu,k+bs,k+bs,trows,tsize); - int nb_transpositions_in_panel; + Index nb_transpositions_in_panel; // recursively calls the blocked LU algorithm with a very small // blocking size: if(!blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride, @@ -355,23 +358,23 @@ struct ei_partial_lu_impl // end quickly with undefined coefficients, just avoid generating inf/nan values. // before exiting, make sure to initialize the still uninitialized row_transpositions // in a sane state without destroying what we already have. - for(int i=k; i -void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, int& nb_transpositions) +void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, typename MatrixType::Index& nb_transpositions) { ei_assert(lu.cols() == row_transpositions.size()); ei_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1); @@ -403,16 +406,16 @@ PartialPivLU& PartialPivLU::compute(const MatrixType& ma m_lu = matrix; ei_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices"); - const int size = matrix.rows(); + const Index size = matrix.rows(); m_rowsTranspositions.resize(size); - int nb_transpositions; + Index nb_transpositions; ei_partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions); m_det_p = (nb_transpositions%2) ? -1 : 1; m_p.setIdentity(size); - for(int k = size-1; k >= 0; --k) + for(Index k = size-1; k >= 0; --k) m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k)); m_isInitialized = true; diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h index fbc80adf2..b4bcfd529 100644 --- a/Eigen/src/QR/ColPivHouseholderQR.h +++ b/Eigen/src/QR/ColPivHouseholderQR.h @@ -56,10 +56,11 @@ template class ColPivHouseholderQR }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; typedef Matrix MatrixQType; typedef typename ei_plain_diag_type::type HCoeffsType; typedef PermutationMatrix PermutationType; - typedef typename ei_plain_row_type::type IntRowVectorType; + typedef typename ei_plain_row_type::type IntRowVectorType; typedef typename ei_plain_row_type::type RowVectorType; typedef typename ei_plain_row_type::type RealRowVectorType; typedef typename HouseholderSequence::ConjugateReturnType HouseholderSequenceType; @@ -85,7 +86,7 @@ template class ColPivHouseholderQR * according to the specified problem \a size. * \sa ColPivHouseholderQR() */ - ColPivHouseholderQR(int rows, int cols) + ColPivHouseholderQR(Index rows, Index cols) : m_qr(rows, cols), m_hCoeffs(std::min(rows,cols)), m_colsPermutation(cols), @@ -186,12 +187,12 @@ template class ColPivHouseholderQR * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ - inline int rank() const + inline Index rank() const { ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold(); - int result = 0; - for(int i = 0; i < m_nonzero_pivots; ++i) + Index result = 0; + for(Index i = 0; i < m_nonzero_pivots; ++i) result += (ei_abs(m_qr.coeff(i,i)) > premultiplied_threshold); return result; } @@ -202,7 +203,7 @@ template class ColPivHouseholderQR * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ - inline int dimensionOfKernel() const + inline Index dimensionOfKernel() const { ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return cols() - rank(); @@ -260,8 +261,8 @@ template class ColPivHouseholderQR (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); } - inline int rows() const { return m_qr.rows(); } - inline int cols() const { return m_qr.cols(); } + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return m_qr.cols(); } const HCoeffsType& hCoeffs() const { return m_hCoeffs; } /** Allows to prescribe a threshold to be used by certain methods, such as rank(), @@ -320,7 +321,7 @@ template class ColPivHouseholderQR * * \sa rank() */ - inline int nonzeroPivots() const + inline Index nonzeroPivots() const { ei_assert(m_isInitialized && "LU is not initialized."); return m_nonzero_pivots; @@ -340,8 +341,8 @@ template class ColPivHouseholderQR RealRowVectorType m_colSqNorms; bool m_isInitialized, m_usePrescribedThreshold; RealScalar m_prescribedThreshold, m_maxpivot; - int m_nonzero_pivots; - int m_det_pq; + Index m_nonzero_pivots; + Index m_det_pq; }; #ifndef EIGEN_HIDE_HEAVY_CODE @@ -365,9 +366,9 @@ typename MatrixType::RealScalar ColPivHouseholderQR::logAbsDetermina template ColPivHouseholderQR& ColPivHouseholderQR::compute(const MatrixType& matrix) { - int rows = matrix.rows(); - int cols = matrix.cols(); - int size = matrix.diagonalSize(); + Index rows = matrix.rows(); + Index cols = matrix.cols(); + Index size = matrix.diagonalSize(); m_qr = matrix; m_hCoeffs.resize(size); @@ -375,10 +376,10 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const m_temp.resize(cols); m_colsTranspositions.resize(matrix.cols()); - int number_of_transpositions = 0; + Index number_of_transpositions = 0; m_colSqNorms.resize(cols); - for(int k = 0; k < cols; ++k) + for(Index k = 0; k < cols; ++k) m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm(); RealScalar threshold_helper = m_colSqNorms.maxCoeff() * ei_abs2(NumTraits::epsilon()) / rows; @@ -386,10 +387,10 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_maxpivot = RealScalar(0); - for(int k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) { // first, we look up in our table m_colSqNorms which column has the biggest squared norm - int biggest_col_index; + Index biggest_col_index; RealScalar biggest_col_sq_norm = m_colSqNorms.tail(cols-k).maxCoeff(&biggest_col_index); biggest_col_index += k; @@ -444,7 +445,7 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const } m_colsPermutation.setIdentity(cols); - for(int k = 0; k < m_nonzero_pivots; ++k) + for(Index k = 0; k < m_nonzero_pivots; ++k) m_colsPermutation.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k)); m_det_pq = (number_of_transpositions%2) ? -1 : 1; @@ -461,12 +462,10 @@ struct ei_solve_retval, Rhs> template void evalTo(Dest& dst) const { -#ifndef EIGEN_NO_DEBUG - const int rows = dec().rows(); - ei_assert(rhs().rows() == rows); -#endif + ei_assert(rhs().rows() == dec().rows()); + const int cols = dec().cols(), - nonzero_pivots = dec().nonzeroPivots(); + nonzero_pivots = dec().nonzeroPivots(); if(nonzero_pivots == 0) { @@ -498,8 +497,8 @@ struct ei_solve_retval, Rhs> .template triangularView() * c.topRows(nonzero_pivots); - for(int i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i); - for(int i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero(); + for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i); + for(Index i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero(); } }; diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index 0195e1330..3b4d02d67 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -56,11 +56,12 @@ template class FullPivHouseholderQR }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; typedef Matrix MatrixQType; typedef typename ei_plain_diag_type::type HCoeffsType; - typedef Matrix IntRowVectorType; + typedef Matrix IntRowVectorType; typedef PermutationMatrix PermutationType; - typedef typename ei_plain_col_type::type IntColVectorType; + typedef typename ei_plain_col_type::type IntColVectorType; typedef typename ei_plain_row_type::type RowVectorType; typedef typename ei_plain_col_type::type ColVectorType; @@ -84,7 +85,7 @@ template class FullPivHouseholderQR * according to the specified problem \a size. * \sa FullPivHouseholderQR() */ - FullPivHouseholderQR(int rows, int cols) + FullPivHouseholderQR(Index rows, Index cols) : m_qr(rows, cols), m_hCoeffs(std::min(rows,cols)), m_rows_transpositions(rows), @@ -188,7 +189,7 @@ template class FullPivHouseholderQR * \note This is computed at the time of the construction of the QR decomposition. This * method does not perform any further computation. */ - inline int rank() const + inline Index rank() const { ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_rank; @@ -199,7 +200,7 @@ template class FullPivHouseholderQR * \note Since the rank is computed at the time of the construction of the QR decomposition, this * method almost does not perform any further computation. */ - inline int dimensionOfKernel() const + inline Index dimensionOfKernel() const { ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_qr.cols() - m_rank; @@ -253,8 +254,8 @@ template class FullPivHouseholderQR (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); } - inline int rows() const { return m_qr.rows(); } - inline int cols() const { return m_qr.cols(); } + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return m_qr.cols(); } const HCoeffsType& hCoeffs() const { return m_hCoeffs; } protected: @@ -266,8 +267,8 @@ template class FullPivHouseholderQR RowVectorType m_temp; bool m_isInitialized; RealScalar m_precision; - int m_rank; - int m_det_pq; + Index m_rank; + Index m_det_pq; }; #ifndef EIGEN_HIDE_HEAVY_CODE @@ -291,9 +292,9 @@ typename MatrixType::RealScalar FullPivHouseholderQR::logAbsDetermin template FullPivHouseholderQR& FullPivHouseholderQR::compute(const MatrixType& matrix) { - int rows = matrix.rows(); - int cols = matrix.cols(); - int size = std::min(rows,cols); + Index rows = matrix.rows(); + Index cols = matrix.cols(); + Index size = std::min(rows,cols); m_rank = size; m_qr = matrix; @@ -305,13 +306,13 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons m_rows_transpositions.resize(matrix.rows()); m_cols_transpositions.resize(matrix.cols()); - int number_of_transpositions = 0; + Index number_of_transpositions = 0; RealScalar biggest(0); - for (int k = 0; k < size; ++k) + for (Index k = 0; k < size; ++k) { - int row_of_biggest_in_corner, col_of_biggest_in_corner; + Index row_of_biggest_in_corner, col_of_biggest_in_corner; RealScalar biggest_in_corner; biggest_in_corner = m_qr.bottomRightCorner(rows-k, cols-k) @@ -325,7 +326,7 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons if(ei_isMuchSmallerThan(biggest_in_corner, biggest, m_precision)) { m_rank = k; - for(int i = k; i < size; i++) + for(Index i = k; i < size; i++) { m_rows_transpositions.coeffRef(i) = i; m_cols_transpositions.coeffRef(i) = i; @@ -354,7 +355,7 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons } m_cols_permutation.setIdentity(cols); - for(int k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k)); m_det_pq = (number_of_transpositions%2) ? -1 : 1; @@ -371,7 +372,7 @@ struct ei_solve_retval, Rhs> template void evalTo(Dest& dst) const { - const int rows = dec().rows(), cols = dec().cols(); + const Index rows = dec().rows(), cols = dec().cols(); ei_assert(rhs().rows() == rows); // FIXME introduce nonzeroPivots() and use it here. and more generally, @@ -385,9 +386,9 @@ struct ei_solve_retval, Rhs> typename Rhs::PlainObject c(rhs()); Matrix temp(rhs().cols()); - for (int k = 0; k < dec().rank(); ++k) + for (Index k = 0; k < dec().rank(); ++k) { - int remainingSize = rows-k; + Index remainingSize = rows-k; c.row(k).swap(c.row(dec().rowsTranspositions().coeff(k))); c.bottomRightCorner(remainingSize, rhs().cols()) .applyHouseholderOnTheLeft(dec().matrixQR().col(k).tail(remainingSize-1), @@ -409,8 +410,8 @@ struct ei_solve_retval, Rhs> .template triangularView() .solveInPlace(c.topRows(dec().rank())); - for(int i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i); - for(int i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero(); + for(Index i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i); + for(Index i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero(); } }; @@ -422,12 +423,12 @@ typename FullPivHouseholderQR::MatrixQType FullPivHouseholderQR temp(rows); - for (int k = size-1; k >= 0; k--) + for (Index k = size-1; k >= 0; k--) { res.block(k, k, rows-k, rows-k) .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k)); diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h index 6a2883939..a8caaccea 100644 --- a/Eigen/src/QR/HouseholderQR.h +++ b/Eigen/src/QR/HouseholderQR.h @@ -60,6 +60,7 @@ template class HouseholderQR }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; typedef Matrix::Flags&RowMajorBit ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType; typedef typename ei_plain_diag_type::type HCoeffsType; typedef typename ei_plain_row_type::type RowVectorType; @@ -79,7 +80,7 @@ template class HouseholderQR * according to the specified problem \a size. * \sa HouseholderQR() */ - HouseholderQR(int rows, int cols) + HouseholderQR(Index rows, Index cols) : m_qr(rows, cols), m_hCoeffs(std::min(rows,cols)), m_temp(cols), @@ -165,8 +166,8 @@ template class HouseholderQR */ typename MatrixType::RealScalar logAbsDeterminant() const; - inline int rows() const { return m_qr.rows(); } - inline int cols() const { return m_qr.cols(); } + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return m_qr.cols(); } const HCoeffsType& hCoeffs() const { return m_hCoeffs; } protected: @@ -197,19 +198,19 @@ typename MatrixType::RealScalar HouseholderQR::logAbsDeterminant() c template HouseholderQR& HouseholderQR::compute(const MatrixType& matrix) { - int rows = matrix.rows(); - int cols = matrix.cols(); - int size = std::min(rows,cols); + Index rows = matrix.rows(); + Index cols = matrix.cols(); + Index size = std::min(rows,cols); m_qr = matrix; m_hCoeffs.resize(size); m_temp.resize(cols); - for(int k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) { - int remainingRows = rows - k; - int remainingCols = cols - k - 1; + Index remainingRows = rows - k; + Index remainingCols = cols - k - 1; RealScalar beta; m_qr.col(k).tail(remainingRows).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); @@ -231,8 +232,8 @@ struct ei_solve_retval, Rhs> template void evalTo(Dest& dst) const { - const int rows = dec().rows(), cols = dec().cols(); - const int rank = std::min(rows, cols); + const Index rows = dec().rows(), cols = dec().cols(); + const Index rank = std::min(rows, cols); ei_assert(rhs().rows() == rows); typename Rhs::PlainObject c(rhs()); diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h index 9323c0180..292530657 100644 --- a/Eigen/src/SVD/JacobiSVD.h +++ b/Eigen/src/SVD/JacobiSVD.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009-2010 Benoit Jacob // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -63,6 +63,7 @@ template class JacobiSVD private: typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; enum { ComputeU = (Options & SkipU) == 0, ComputeV = (Options & SkipV) == 0, @@ -107,7 +108,7 @@ template class JacobiSVD * according to the specified problem \a size. * \sa JacobiSVD() */ - JacobiSVD(int rows, int cols) : m_matrixU(rows, rows), + JacobiSVD(Index rows, Index cols) : m_matrixU(rows, rows), m_matrixV(cols, cols), m_singularValues(std::min(rows, cols)), m_workMatrix(rows, cols), @@ -119,7 +120,7 @@ template class JacobiSVD m_workMatrix(), m_isInitialized(false) { - const int minSize = std::min(matrix.rows(), matrix.cols()); + const Index minSize = std::min(matrix.rows(), matrix.cols()); m_singularValues.resize(minSize); m_workMatrix.resize(minSize, minSize); compute(matrix); @@ -164,7 +165,8 @@ template struct ei_svd_precondition_2x2_block_to_be_real { typedef JacobiSVD SVD; - static void run(typename SVD::WorkMatrixType&, JacobiSVD&, int, int) {} + typedef typename SVD::Index Index; + static void run(typename SVD::WorkMatrixType&, JacobiSVD&, Index, Index) {} }; template @@ -173,8 +175,9 @@ struct ei_svd_precondition_2x2_block_to_be_real typedef JacobiSVD SVD; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename SVD::Index Index; enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV }; - static void run(typename SVD::WorkMatrixType& work_matrix, JacobiSVD& svd, int p, int q) + static void run(typename SVD::WorkMatrixType& work_matrix, JacobiSVD& svd, Index p, Index q) { Scalar z; PlanarRotation rot; @@ -210,8 +213,8 @@ struct ei_svd_precondition_2x2_block_to_be_real } }; -template -void ei_real_2x2_jacobi_svd(const MatrixType& matrix, int p, int q, +template +void ei_real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, PlanarRotation *j_left, PlanarRotation *j_right) { @@ -250,12 +253,13 @@ struct ei_svd_precondition_if_more_rows_than_cols typedef JacobiSVD SVD; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV }; static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd) { - int rows = matrix.rows(); - int cols = matrix.cols(); - int diagSize = cols; + Index rows = matrix.rows(); + Index cols = matrix.cols(); + Index diagSize = cols; if(rows > cols) { FullPivHouseholderQR qr(matrix); @@ -282,6 +286,7 @@ struct ei_svd_precondition_if_more_cols_than_rows typedef JacobiSVD SVD; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV, @@ -294,9 +299,9 @@ struct ei_svd_precondition_if_more_cols_than_rows static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd) { - int rows = matrix.rows(); - int cols = matrix.cols(); - int diagSize = rows; + Index rows = matrix.rows(); + Index cols = matrix.cols(); + Index diagSize = rows; if(cols > rows) { typedef Matrix template JacobiSVD& JacobiSVD::compute(const MatrixType& matrix) { - int rows = matrix.rows(); - int cols = matrix.cols(); - int diagSize = std::min(rows, cols); + Index rows = matrix.rows(); + Index cols = matrix.cols(); + Index diagSize = std::min(rows, cols); m_singularValues.resize(diagSize); const RealScalar precision = 2 * NumTraits::epsilon(); @@ -333,9 +338,9 @@ JacobiSVD& JacobiSVD::compute(const Ma while(!finished) { finished = true; - for(int p = 1; p < diagSize; ++p) + for(Index p = 1; p < diagSize; ++p) { - for(int q = 0; q < p; ++q) + for(Index q = 0; q < p; ++q) { if(std::max(ei_abs(m_workMatrix.coeff(p,q)),ei_abs(m_workMatrix.coeff(q,p))) > std::max(ei_abs(m_workMatrix.coeff(p,p)),ei_abs(m_workMatrix.coeff(q,q)))*precision) @@ -356,16 +361,16 @@ JacobiSVD& JacobiSVD::compute(const Ma } } - for(int i = 0; i < diagSize; ++i) + for(Index i = 0; i < diagSize; ++i) { RealScalar a = ei_abs(m_workMatrix.coeff(i,i)); m_singularValues.coeffRef(i) = a; if(ComputeU && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a; } - for(int i = 0; i < diagSize; i++) + for(Index i = 0; i < diagSize; i++) { - int pos; + Index pos; m_singularValues.tail(diagSize-i).maxCoeff(&pos); if(pos) { diff --git a/Eigen/src/SVD/SVD.h b/Eigen/src/SVD/SVD.h index a9e22dfd4..736056230 100644 --- a/Eigen/src/SVD/SVD.h +++ b/Eigen/src/SVD/SVD.h @@ -46,6 +46,7 @@ template class SVD typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, @@ -79,7 +80,7 @@ template class SVD * according to the specified problem \a size. * \sa JacobiSVD() */ - SVD(int rows, int cols) : m_matU(rows, rows), + SVD(Index rows, Index cols) : m_matU(rows, rows), m_matV(cols,cols), m_sigma(std::min(rows, cols)), m_workMatrix(rows, cols), @@ -143,13 +144,13 @@ template class SVD template void computeScalingRotation(ScalingType *positive, RotationType *unitary) const; - inline int rows() const + inline Index rows() const { ei_assert(m_isInitialized && "SVD is not initialized."); return m_rows; } - inline int cols() const + inline Index cols() const { ei_assert(m_isInitialized && "SVD is not initialized."); return m_cols; @@ -182,7 +183,7 @@ template class SVD MatrixType m_workMatrix; RowVector m_rv1; bool m_isInitialized; - int m_rows, m_cols; + Index m_rows, m_cols; }; /** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix @@ -194,8 +195,8 @@ template class SVD template SVD& SVD::compute(const MatrixType& matrix) { - const int m = m_rows = matrix.rows(); - const int n = m_cols = matrix.cols(); + const Index m = m_rows = matrix.rows(); + const Index n = m_cols = matrix.cols(); m_matU.resize(m, m); m_matU.setZero(); @@ -203,14 +204,14 @@ SVD& SVD::compute(const MatrixType& matrix) m_matV.resize(n,n); m_workMatrix = matrix; - int max_iters = 30; + Index max_iters = 30; MatrixVType& V = m_matV; MatrixType& A = m_workMatrix; SingularValuesType& W = m_sigma; bool flag; - int i=0,its=0,j=0,k=0,l=0,nm=0; + Index i=0,its=0,j=0,k=0,l=0,nm=0; Scalar anorm, c, f, g, h, s, scale, x, y, z; bool convergence = true; Scalar eps = NumTraits::dummy_precision(); @@ -426,9 +427,9 @@ SVD& SVD::compute(const MatrixType& matrix) // sort the singular values: { - for (int i=0; i, Rhs> { ei_assert(rhs().rows() == dec().rows()); - for (int j=0; j aux = dec().matrixU().adjoint() * rhs().col(j); - for (int i = 0; i < dec().rows(); ++i) + for (Index i = 0; i < dec().rows(); ++i) { Scalar si = dec().singularValues().coeff(i); if(si == RealScalar(0)) @@ -471,7 +472,7 @@ struct ei_solve_retval, Rhs> else aux.coeffRef(i) /= si; } - const int minsize = std::min(dec().rows(),dec().cols()); + const Index minsize = std::min(dec().rows(),dec().cols()); dst.col(j).head(minsize) = aux.head(minsize); if(dec().cols()>dec().rows()) dst.col(j).tail(cols()-minsize).setZero(); dst.col(j) = dec().matrixV() * dst.col(j); diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h index 53e04076a..1e1355b52 100644 --- a/Eigen/src/SVD/UpperBidiagonalization.h +++ b/Eigen/src/SVD/UpperBidiagonalization.h @@ -37,6 +37,7 @@ template class UpperBidiagonalization }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; typedef Matrix RowVectorType; typedef Matrix ColVectorType; typedef BandMatrix BidiagonalType; @@ -95,8 +96,8 @@ template class UpperBidiagonalization template UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix) { - int rows = matrix.rows(); - int cols = matrix.cols(); + Index rows = matrix.rows(); + Index cols = matrix.cols(); ei_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols."); @@ -104,10 +105,10 @@ UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::comput ColVectorType temp(rows); - for (int k = 0; /* breaks at k==cols-1 below */ ; ++k) + for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k) { - int remainingRows = rows - k; - int remainingCols = cols - k - 1; + Index remainingRows = rows - k; + Index remainingCols = cols - k - 1; // construct left householder transform in-place in m_householder m_householder.col(k).tail(remainingRows) diff --git a/Eigen/src/Sparse/AmbiVector.h b/Eigen/src/Sparse/AmbiVector.h index 1ac28272b..7b18f8cc2 100644 --- a/Eigen/src/Sparse/AmbiVector.h +++ b/Eigen/src/Sparse/AmbiVector.h @@ -35,7 +35,8 @@ template class AmbiVector public: typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; - AmbiVector(int size) + typedef SparseIndex Index; + AmbiVector(Index size) : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1) { resize(size); @@ -44,40 +45,40 @@ template class AmbiVector void init(double estimatedDensity); void init(int mode); - int nonZeros() const; + Index nonZeros() const; /** Specifies a sub-vector to work on */ - void setBounds(int start, int end) { m_start = start; m_end = end; } + void setBounds(Index start, Index end) { m_start = start; m_end = end; } void setZero(); void restart(); - Scalar& coeffRef(int i); - Scalar& coeff(int i); + Scalar& coeffRef(Index i); + Scalar& coeff(Index i); class Iterator; ~AmbiVector() { delete[] m_buffer; } - void resize(int size) + void resize(Index size) { if (m_allocatedSize < size) reallocate(size); m_size = size; } - int size() const { return m_size; } + Index size() const { return m_size; } protected: - void reallocate(int size) + void reallocate(Index size) { // if the size of the matrix is not too large, let's allocate a bit more than needed such // that we can handle dense vector even in sparse mode. delete[] m_buffer; if (size<1000) { - int allocSize = (size * sizeof(ListEl))/sizeof(Scalar); + Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar); m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); m_buffer = new Scalar[allocSize]; } @@ -93,9 +94,9 @@ template class AmbiVector void reallocateSparse() { - int copyElements = m_allocatedElements; - m_allocatedElements = std::min(int(m_allocatedElements*1.5),m_size); - int allocSize = m_allocatedElements * sizeof(ListEl); + Index copyElements = m_allocatedElements; + m_allocatedElements = std::min(Index(m_allocatedElements*1.5),m_size); + Index allocSize = m_allocatedElements * sizeof(ListEl); allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); Scalar* newBuffer = new Scalar[allocSize]; memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); @@ -107,30 +108,30 @@ template class AmbiVector // element type of the linked list struct ListEl { - int next; - int index; + Index next; + Index index; Scalar value; }; // used to store data in both mode Scalar* m_buffer; Scalar m_zero; - int m_size; - int m_start; - int m_end; - int m_allocatedSize; - int m_allocatedElements; - int m_mode; + Index m_size; + Index m_start; + Index m_end; + Index m_allocatedSize; + Index m_allocatedElements; + Index m_mode; // linked list mode - int m_llStart; - int m_llCurrent; - int m_llSize; + Index m_llStart; + Index m_llCurrent; + Index m_llSize; }; /** \returns the number of non zeros in the current sub vector */ template -int AmbiVector::nonZeros() const +SparseIndex AmbiVector::nonZeros() const { if (m_mode==IsSparse) return m_llSize; @@ -175,7 +176,7 @@ void AmbiVector::setZero() { if (m_mode==IsDense) { - for (int i=m_start; i::setZero() } template -Scalar& AmbiVector::coeffRef(int i) +Scalar& AmbiVector::coeffRef(Index i) { if (m_mode==IsDense) return m_buffer[i]; @@ -221,7 +222,7 @@ Scalar& AmbiVector::coeffRef(int i) } else { - int nextel = llElements[m_llCurrent].next; + Index nextel = llElements[m_llCurrent].next; ei_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); while (nextel >= 0 && llElements[nextel].index<=i) { @@ -256,7 +257,7 @@ Scalar& AmbiVector::coeffRef(int i) } template -Scalar& AmbiVector::coeff(int i) +Scalar& AmbiVector::coeff(Index i) { if (m_mode==IsDense) return m_buffer[i]; @@ -270,7 +271,7 @@ Scalar& AmbiVector::coeff(int i) } else { - int elid = m_llStart; + Index elid = m_llStart; while (elid >= 0 && llElements[elid].index::Iterator } } - int index() const { return m_cachedIndex; } + Index index() const { return m_cachedIndex; } Scalar value() const { return m_cachedValue; } operator bool() const { return m_cachedIndex>=0; } @@ -365,9 +366,9 @@ class AmbiVector<_Scalar>::Iterator protected: const AmbiVector& m_vector; // the target vector - int m_currentEl; // the current element in sparse/linked-list mode + Index m_currentEl; // the current element in sparse/linked-list mode RealScalar m_epsilon; // epsilon used to prune zero coefficients - int m_cachedIndex; // current coordinate + Index m_cachedIndex; // current coordinate Scalar m_cachedValue; // current value bool m_isDense; // mode of the vector }; diff --git a/Eigen/src/Sparse/CholmodSupport.h b/Eigen/src/Sparse/CholmodSupport.h index cf407240f..82a09f35c 100644 --- a/Eigen/src/Sparse/CholmodSupport.h +++ b/Eigen/src/Sparse/CholmodSupport.h @@ -114,8 +114,8 @@ MappedSparseMatrix::MappedSparseMatrix(cholmod_sparse& cm) { m_innerSize = cm.nrow; m_outerSize = cm.ncol; - m_outerIndex = reinterpret_cast(cm.p); - m_innerIndices = reinterpret_cast(cm.i); + m_outerIndex = reinterpret_cast(cm.p); + m_innerIndices = reinterpret_cast(cm.i); m_values = reinterpret_cast(cm.x); m_nnz = m_outerIndex[cm.ncol]; } @@ -220,7 +220,7 @@ template template bool SparseLLT::solveInPlace(MatrixBase &b) const { - const int size = m_cholmodFactor->n; + const Index size = m_cholmodFactor->n; ei_assert(size==b.rows()); // this uses Eigen's triangular sparse solver diff --git a/Eigen/src/Sparse/CompressedStorage.h b/Eigen/src/Sparse/CompressedStorage.h index 4fc1797d1..37d337639 100644 --- a/Eigen/src/Sparse/CompressedStorage.h +++ b/Eigen/src/Sparse/CompressedStorage.h @@ -32,6 +32,7 @@ template class CompressedStorage { typedef typename NumTraits::Real RealScalar; + typedef SparseIndex Index; public: CompressedStorage() : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) @@ -53,7 +54,7 @@ class CompressedStorage { resize(other.size()); memcpy(m_values, other.m_values, m_size * sizeof(Scalar)); - memcpy(m_indices, other.m_indices, m_size * sizeof(int)); + memcpy(m_indices, other.m_indices, m_size * sizeof(Index)); return *this; } @@ -91,9 +92,9 @@ class CompressedStorage m_size = size; } - void append(const Scalar& v, int i) + void append(const Scalar& v, Index i) { - int id = static_cast(m_size); + Index id = static_cast(m_size); resize(m_size+1, 1); m_values[id] = v; m_indices[id] = i; @@ -106,10 +107,10 @@ class CompressedStorage inline Scalar& value(size_t i) { return m_values[i]; } inline const Scalar& value(size_t i) const { return m_values[i]; } - inline int& index(size_t i) { return m_indices[i]; } - inline const int& index(size_t i) const { return m_indices[i]; } + inline Index& index(size_t i) { return m_indices[i]; } + inline const Index& index(size_t i) const { return m_indices[i]; } - static CompressedStorage Map(int* indices, Scalar* values, size_t size) + static CompressedStorage Map(Index* indices, Scalar* values, size_t size) { CompressedStorage res; res.m_indices = indices; @@ -119,13 +120,13 @@ class CompressedStorage } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ - inline int searchLowerIndex(int key) const + inline Index searchLowerIndex(Index key) const { return searchLowerIndex(0, m_size, key); } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ - inline int searchLowerIndex(size_t start, size_t end, int key) const + inline Index searchLowerIndex(size_t start, size_t end, Index key) const { while(end>start) { @@ -135,12 +136,12 @@ class CompressedStorage else end = mid; } - return static_cast(start); + return static_cast(start); } /** \returns the stored value at index \a key * If the value does not exist, then the value \a defaultValue is returned without any insertion. */ - inline Scalar at(int key, Scalar defaultValue = Scalar(0)) const + inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const { if (m_size==0) return defaultValue; @@ -153,7 +154,7 @@ class CompressedStorage } /** Like at(), but the search is performed in the range [start,end) */ - inline Scalar atInRange(size_t start, size_t end, int key, Scalar defaultValue = Scalar(0)) const + inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const { if (start>=end) return Scalar(0); @@ -168,7 +169,7 @@ class CompressedStorage /** \returns a reference to the value at index \a key * If the value does not exist, then the value \a defaultValue is inserted * such that the keys are sorted. */ - inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0)) + inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0)) { size_t id = searchLowerIndex(0,m_size,key); if (id>=m_size || m_indices[id]!=key) @@ -206,11 +207,11 @@ class CompressedStorage inline void reallocate(size_t size) { Scalar* newValues = new Scalar[size]; - int* newIndices = new int[size]; + Index* newIndices = new Index[size]; size_t copySize = std::min(size, m_size); // copy memcpy(newValues, m_values, copySize * sizeof(Scalar)); - memcpy(newIndices, m_indices, copySize * sizeof(int)); + memcpy(newIndices, m_indices, copySize * sizeof(Index)); // delete old stuff delete[] m_values; delete[] m_indices; @@ -221,7 +222,7 @@ class CompressedStorage protected: Scalar* m_values; - int* m_indices; + Index* m_indices; size_t m_size; size_t m_allocatedSize; diff --git a/Eigen/src/Sparse/CoreIterators.h b/Eigen/src/Sparse/CoreIterators.h index 69780456d..7ae847ddc 100644 --- a/Eigen/src/Sparse/CoreIterators.h +++ b/Eigen/src/Sparse/CoreIterators.h @@ -38,9 +38,11 @@ template class DenseBase::InnerIterator { typedef typename Derived::Scalar Scalar; + typedef typename Derived::Index Index; + enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit }; public: - EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, int outer) + EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer) : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.rows()) {} @@ -52,17 +54,17 @@ template class DenseBase::InnerIterator EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; } - EIGEN_STRONG_INLINE int index() const { return m_inner; } - inline int row() const { return IsRowMajor ? m_outer : index(); } - inline int col() const { return IsRowMajor ? index() : m_outer; } + EIGEN_STRONG_INLINE Index index() const { return m_inner; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const Derived& m_expression; - int m_inner; - const int m_outer; - const int m_end; + Index m_inner; + const Index m_outer; + const Index m_end; }; #endif // EIGEN_COREITERATORS_H diff --git a/Eigen/src/Sparse/DynamicSparseMatrix.h b/Eigen/src/Sparse/DynamicSparseMatrix.h index fd7c7fbd7..fea707f15 100644 --- a/Eigen/src/Sparse/DynamicSparseMatrix.h +++ b/Eigen/src/Sparse/DynamicSparseMatrix.h @@ -75,16 +75,16 @@ class DynamicSparseMatrix typedef DynamicSparseMatrix TransposedSparseMatrix; - int m_innerSize; + Index m_innerSize; std::vector > m_data; public: - inline int rows() const { return IsRowMajor ? outerSize() : m_innerSize; } - inline int cols() const { return IsRowMajor ? m_innerSize : outerSize(); } - inline int innerSize() const { return m_innerSize; } - inline int outerSize() const { return static_cast(m_data.size()); } - inline int innerNonZeros(int j) const { return m_data[j].size(); } + inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; } + inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); } + inline Index innerSize() const { return m_innerSize; } + inline Index outerSize() const { return static_cast(m_data.size()); } + inline Index innerNonZeros(Index j) const { return m_data[j].size(); } std::vector >& _data() { return m_data; } const std::vector >& _data() const { return m_data; } @@ -92,21 +92,21 @@ class DynamicSparseMatrix /** \returns the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. */ - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { - const int outer = IsRowMajor ? row : col; - const int inner = IsRowMajor ? col : row; + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; return m_data[outer].at(inner); } /** \returns a reference to the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. If the coefficient does not - * exist yet, then a sorted insertion into a sequential buffer is performed. + * exist yet, then a sorted insertion Indexo a sequential buffer is performed. */ - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { - const int outer = IsRowMajor ? row : col; - const int inner = IsRowMajor ? col : row; + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; return m_data[outer].atWithInsertion(inner); } @@ -114,44 +114,44 @@ class DynamicSparseMatrix void setZero() { - for (int j=0; j(m_data[j].size()); + Index res = 0; + for (Index j=0; j(m_data[j].size()); return res; } /** \deprecated * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */ - EIGEN_DEPRECATED void startFill(int reserveSize = 1000) + EIGEN_DEPRECATED void startFill(Index reserveSize = 1000) { setZero(); reserve(reserveSize); } - void reserve(int reserveSize = 1000) + void reserve(Index reserveSize = 1000) { if (outerSize()>0) { - int reserveSizePerVector = std::max(reserveSize/outerSize(),4); - for (int j=0; j(m_data[outer].size()) - 1; + Index startId = 0; + Index id = static_cast(m_data[outer].size()) - 1; m_data[outer].resize(id+2,1); while ( (id >= startId) && (m_data[outer].index(id) > inner) ) @@ -212,27 +212,27 @@ class DynamicSparseMatrix void prune(Scalar reference, RealScalar epsilon = NumTraits::dummy_precision()) { - for (int j=0; jinnerSize) { // remove all coefficients with innerCoord>=innerSize @@ -252,7 +252,7 @@ class DynamicSparseMatrix ei_assert(innerSize()==0 && outerSize()==0); } - inline DynamicSparseMatrix(int rows, int cols) + inline DynamicSparseMatrix(Index rows, Index cols) : m_innerSize(0) { resize(rows, cols); @@ -308,15 +308,15 @@ class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator Base; public: - InnerIterator(const DynamicSparseMatrix& mat, int outer) + InnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) {} - inline int row() const { return IsRowMajor ? m_outer : Base::index(); } - inline int col() const { return IsRowMajor ? Base::index() : m_outer; } + inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } + inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } protected: - const int m_outer; + const Index m_outer; }; #endif // EIGEN_DYNAMIC_SPARSEMATRIX_H diff --git a/Eigen/src/Sparse/MappedSparseMatrix.h b/Eigen/src/Sparse/MappedSparseMatrix.h index 43ac6b308..07233ac67 100644 --- a/Eigen/src/Sparse/MappedSparseMatrix.h +++ b/Eigen/src/Sparse/MappedSparseMatrix.h @@ -48,40 +48,40 @@ class MappedSparseMatrix protected: enum { IsRowMajor = Base::IsRowMajor }; - int m_outerSize; - int m_innerSize; - int m_nnz; - int* m_outerIndex; - int* m_innerIndices; + Index m_outerSize; + Index m_innerSize; + Index m_nnz; + Index* m_outerIndex; + Index* m_innerIndices; Scalar* m_values; public: - inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } - inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } - inline int innerSize() const { return m_innerSize; } - inline int outerSize() const { return m_outerSize; } - inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } + inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline Index innerSize() const { return m_innerSize; } + inline Index outerSize() const { return m_outerSize; } + inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } //---------------------------------------- // direct access interface inline const Scalar* _valuePtr() const { return m_values; } inline Scalar* _valuePtr() { return m_values; } - inline const int* _innerIndexPtr() const { return m_innerIndices; } - inline int* _innerIndexPtr() { return m_innerIndices; } + inline const Index* _innerIndexPtr() const { return m_innerIndices; } + inline Index* _innerIndexPtr() { return m_innerIndices; } - inline const int* _outerIndexPtr() const { return m_outerIndex; } - inline int* _outerIndexPtr() { return m_outerIndex; } + inline const Index* _outerIndexPtr() const { return m_outerIndex; } + inline Index* _outerIndexPtr() { return m_outerIndex; } //---------------------------------------- - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { - const int outer = IsRowMajor ? row : col; - const int inner = IsRowMajor ? col : row; + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; - int start = m_outerIndex[outer]; - int end = m_outerIndex[outer+1]; + Index start = m_outerIndex[outer]; + Index end = m_outerIndex[outer+1]; if (start==end) return Scalar(0); else if (end>0 && inner==m_innerIndices[end-1]) @@ -89,22 +89,22 @@ class MappedSparseMatrix // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) - const int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); - const int id = r-&m_innerIndices[0]; + const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); + const Index id = r-&m_innerIndices[0]; return ((*r==inner) && (id=start && "you probably called coeffRef on a non finalized matrix"); ei_assert(end>start && "coeffRef cannot be called on a zero coefficient"); - int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); - const int id = r-&m_innerIndices[0]; + Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); + const Index id = r-&m_innerIndices[0]; ei_assert((*r==inner) && (id class MappedSparseMatrix::InnerIterator { public: - InnerIterator(const MappedSparseMatrix& mat, int outer) + InnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(mat._outerIndexPtr()[outer]), @@ -148,7 +148,7 @@ class MappedSparseMatrix::InnerIterator {} template - InnerIterator(const Flagged& mat, int outer) + InnerIterator(const Flagged& mat, Index outer) : m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]), m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1]) {} @@ -158,18 +158,18 @@ class MappedSparseMatrix::InnerIterator inline Scalar value() const { return m_matrix._valuePtr()[m_id]; } inline Scalar& valueRef() { return const_cast(m_matrix._valuePtr()[m_id]); } - inline int index() const { return m_matrix._innerIndexPtr()[m_id]; } - inline int row() const { return IsRowMajor ? m_outer : index(); } - inline int col() const { return IsRowMajor ? index() : m_outer; } + inline Index index() const { return m_matrix._innerIndexPtr()[m_id]; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } protected: const MappedSparseMatrix& m_matrix; - const int m_outer; - int m_id; - const int m_start; - const int m_end; + const Index m_outer; + Index m_id; + const Index m_start; + const Index m_end; }; #endif // EIGEN_MAPPED_SPARSEMATRIX_H diff --git a/Eigen/src/Sparse/RandomSetter.h b/Eigen/src/Sparse/RandomSetter.h index 76f24cf0e..abe98815f 100644 --- a/Eigen/src/Sparse/RandomSetter.h +++ b/Eigen/src/Sparse/RandomSetter.h @@ -166,7 +166,9 @@ template class RandomSetter { - typedef typename ei_traits::Scalar Scalar; + typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::Index Index; + struct ScalarWrapper { ScalarWrapper() : value(0) {} @@ -194,14 +196,14 @@ class RandomSetter inline RandomSetter(SparseMatrixType& target) : mp_target(&target) { - const int outerSize = SwapStorage ? target.innerSize() : target.outerSize(); - const int innerSize = SwapStorage ? target.outerSize() : target.innerSize(); + const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize(); + const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize(); m_outerPackets = outerSize >> OuterPacketBits; if (outerSize&OuterPacketMask) m_outerPackets += 1; m_hashmaps = new HashMapType[m_outerPackets]; // compute number of bits needed to store inner indices - int aux = innerSize - 1; + Index aux = innerSize - 1; m_keyBitsOffset = 0; while (aux) { @@ -209,11 +211,11 @@ class RandomSetter aux = aux >> 1; } KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset)); - for (int k=0; k::setInvalidKey(m_hashmaps[k],ik); // insert current coeffs - for (int j=0; jouterSize(); ++j) + for (Index j=0; jouterSize(); ++j) for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it) (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value(); } @@ -226,18 +228,18 @@ class RandomSetter { mp_target->setZero(); mp_target->reserve(nonZeros()); - int prevOuter = -1; - for (int k=0; kfirst >> m_keyBitsOffset) + outerOffset; - const int inner = it->first & keyBitsMask; + const Index outer = (it->first >> m_keyBitsOffset) + outerOffset; + const Index inner = it->first & keyBitsMask; if (prevOuter!=outer) { - for (int j=prevOuter+1;j<=outer;++j) + for (Index j=prevOuter+1;j<=outer;++j) mp_target->startVec(j); prevOuter = outer; } @@ -251,20 +253,20 @@ class RandomSetter VectorXi positions(mp_target->outerSize()); positions.setZero(); // pass 1 - for (int k=0; kfirst & keyBitsMask; + const Index outer = it->first & keyBitsMask; ++positions[outer]; } } // prefix sum - int count = 0; - for (int j=0; jouterSize(); ++j) + Index count = 0; + for (Index j=0; jouterSize(); ++j) { - int tmp = positions[j]; + Index tmp = positions[j]; mp_target->_outerIndexPtr()[j] = count; positions[j] = count; count += tmp; @@ -272,20 +274,20 @@ class RandomSetter mp_target->_outerIndexPtr()[mp_target->outerSize()] = count; mp_target->resizeNonZeros(count); // pass 2 - for (int k=0; kfirst >> m_keyBitsOffset) + outerOffset; - const int outer = it->first & keyBitsMask; + const Index inner = (it->first >> m_keyBitsOffset) + outerOffset; + const Index outer = it->first & keyBitsMask; // sorted insertion // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients, // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a // small fraction of them have to be sorted, whence the following simple procedure: - int posStart = mp_target->_outerIndexPtr()[outer]; - int i = (positions[outer]++) - 1; + Index posStart = mp_target->_outerIndexPtr()[outer]; + Index i = (positions[outer]++) - 1; while ( (i >= posStart) && (mp_target->_innerIndexPtr()[i] > inner) ) { mp_target->_valuePtr()[i+1] = mp_target->_valuePtr()[i]; @@ -301,14 +303,14 @@ class RandomSetter } /** \returns a reference to the coefficient at given coordinates \a row, \a col */ - Scalar& operator() (int row, int col) + Scalar& operator() (Index row, Index col) { ei_assert(((!IsUpper) || (row<=col)) && "Invalid access to an upper triangular matrix"); ei_assert(((!IsLower) || (col<=row)) && "Invalid access to an upper triangular matrix"); - const int outer = SetterRowMajor ? row : col; - const int inner = SetterRowMajor ? col : row; - const int outerMajor = outer >> OuterPacketBits; // index of the packet/map - const int outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet + const Index outer = SetterRowMajor ? row : col; + const Index inner = SetterRowMajor ? col : row; + const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map + const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet const KeyType key = (KeyType(outerMinor)<(m_hashmaps[k].size()); + Index nz = 0; + for (Index k=0; k(m_hashmaps[k].size()); return nz; } @@ -331,7 +333,7 @@ class RandomSetter HashMapType* m_hashmaps; SparseMatrixType* mp_target; - int m_outerPackets; + Index m_outerPackets; unsigned char m_keyBitsOffset; }; diff --git a/Eigen/src/Sparse/SparseBlock.h b/Eigen/src/Sparse/SparseBlock.h index bdbc46025..bf8b5adc7 100644 --- a/Eigen/src/Sparse/SparseBlock.h +++ b/Eigen/src/Sparse/SparseBlock.h @@ -54,22 +54,22 @@ class SparseInnerVectorSet : ei_no_assignment_operator, class InnerIterator: public MatrixType::InnerIterator { public: - inline InnerIterator(const SparseInnerVectorSet& xpr, int outer) + inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} - inline int row() const { return IsRowMajor ? m_outer : this->index(); } - inline int col() const { return IsRowMajor ? this->index() : m_outer; } + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: - int m_outer; + Index m_outer; }; - inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize) + inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } - inline SparseInnerVectorSet(const MatrixType& matrix, int outer) + inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { ei_assert(Size!=Dynamic); @@ -88,15 +88,14 @@ class SparseInnerVectorSet : ei_no_assignment_operator, // return *this; // } - EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: const typename MatrixType::Nested m_matrix; - int m_outerStart; - const ei_int_if_dynamic m_outerSize; - + Index m_outerStart; + const ei_variable_if_dynamic m_outerSize; }; /*************************************************************************** @@ -116,22 +115,22 @@ class SparseInnerVectorSet, Size> class InnerIterator: public MatrixType::InnerIterator { public: - inline InnerIterator(const SparseInnerVectorSet& xpr, int outer) + inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} - inline int row() const { return IsRowMajor ? m_outer : this->index(); } - inline int col() const { return IsRowMajor ? this->index() : m_outer; } + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: - int m_outer; + Index m_outer; }; - inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize) + inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } - inline SparseInnerVectorSet(const MatrixType& matrix, int outer) + inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { ei_assert(Size!=Dynamic); @@ -150,7 +149,7 @@ class SparseInnerVectorSet, Size> else { // evaluate/copy vector per vector - for (int j=0; j aux(other.innerVector(j)); m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); @@ -164,10 +163,10 @@ class SparseInnerVectorSet, Size> return operator=(other); } - int nonZeros() const + Index nonZeros() const { - int count = 0; - for (int j=0; j, Size> // return *this; // } - EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: const typename MatrixType::Nested m_matrix; - int m_outerStart; - const ei_int_if_dynamic m_outerSize; + Index m_outerStart; + const ei_variable_if_dynamic m_outerSize; }; @@ -214,22 +213,22 @@ class SparseInnerVectorSet, Size> class InnerIterator: public MatrixType::InnerIterator { public: - inline InnerIterator(const SparseInnerVectorSet& xpr, int outer) + inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} - inline int row() const { return IsRowMajor ? m_outer : this->index(); } - inline int col() const { return IsRowMajor ? this->index() : m_outer; } + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: - int m_outer; + Index m_outer; }; - inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize) + inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) { ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); } - inline SparseInnerVectorSet(const MatrixType& matrix, int outer) + inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) { ei_assert(Size==1); @@ -248,7 +247,7 @@ class SparseInnerVectorSet, Size> else { // evaluate/copy vector per vector - for (int j=0; j aux(other.innerVector(j)); m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); @@ -267,17 +266,17 @@ class SparseInnerVectorSet, Size> inline Scalar* _valuePtr() { return m_matrix.const_cast_derived()._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; } - inline const int* _innerIndexPtr() const + inline const Index* _innerIndexPtr() const { return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; } - inline int* _innerIndexPtr() + inline Index* _innerIndexPtr() { return m_matrix.const_cast_derived()._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; } - inline const int* _outerIndexPtr() const + inline const Index* _outerIndexPtr() const { return m_matrix._outerIndexPtr() + m_outerStart; } - inline int* _outerIndexPtr() + inline Index* _outerIndexPtr() { return m_matrix.const_cast_derived()._outerIndexPtr() + m_outerStart; } - int nonZeros() const + Index nonZeros() const { return size_t(m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()]) - size_t(m_matrix._outerIndexPtr()[m_outerStart]); } @@ -295,14 +294,14 @@ class SparseInnerVectorSet, Size> // return *this; // } - EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: const typename MatrixType::Nested m_matrix; - int m_outerStart; - const ei_int_if_dynamic m_outerSize; + Index m_outerStart; + const ei_variable_if_dynamic m_outerSize; }; @@ -310,7 +309,7 @@ class SparseInnerVectorSet, Size> /** \returns the i-th row of the matrix \c *this. For row-major matrix only. */ template -SparseInnerVectorSet SparseMatrixBase::row(int i) +SparseInnerVectorSet SparseMatrixBase::row(Index i) { EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); return innerVector(i); @@ -319,7 +318,7 @@ SparseInnerVectorSet SparseMatrixBase::row(int i) /** \returns the i-th row of the matrix \c *this. For row-major matrix only. * (read-only version) */ template -const SparseInnerVectorSet SparseMatrixBase::row(int i) const +const SparseInnerVectorSet SparseMatrixBase::row(Index i) const { EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); return innerVector(i); @@ -327,7 +326,7 @@ const SparseInnerVectorSet SparseMatrixBase::row(int i) cons /** \returns the i-th column of the matrix \c *this. For column-major matrix only. */ template -SparseInnerVectorSet SparseMatrixBase::col(int i) +SparseInnerVectorSet SparseMatrixBase::col(Index i) { EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); return innerVector(i); @@ -336,7 +335,7 @@ SparseInnerVectorSet SparseMatrixBase::col(int i) /** \returns the i-th column of the matrix \c *this. For column-major matrix only. * (read-only version) */ template -const SparseInnerVectorSet SparseMatrixBase::col(int i) const +const SparseInnerVectorSet SparseMatrixBase::col(Index i) const { EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); return innerVector(i); @@ -346,21 +345,21 @@ const SparseInnerVectorSet SparseMatrixBase::col(int i) cons * is col-major (resp. row-major). */ template -SparseInnerVectorSet SparseMatrixBase::innerVector(int outer) +SparseInnerVectorSet SparseMatrixBase::innerVector(Index outer) { return SparseInnerVectorSet(derived(), outer); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). Read-only. */ template -const SparseInnerVectorSet SparseMatrixBase::innerVector(int outer) const +const SparseInnerVectorSet SparseMatrixBase::innerVector(Index outer) const { return SparseInnerVectorSet(derived(), outer); } //---------- /** \returns the i-th row of the matrix \c *this. For row-major matrix only. */ template -SparseInnerVectorSet SparseMatrixBase::subrows(int start, int size) +SparseInnerVectorSet SparseMatrixBase::subrows(Index start, Index size) { EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); return innerVectors(start, size); @@ -369,7 +368,7 @@ SparseInnerVectorSet SparseMatrixBase::subrows(int sta /** \returns the i-th row of the matrix \c *this. For row-major matrix only. * (read-only version) */ template -const SparseInnerVectorSet SparseMatrixBase::subrows(int start, int size) const +const SparseInnerVectorSet SparseMatrixBase::subrows(Index start, Index size) const { EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); return innerVectors(start, size); @@ -377,7 +376,7 @@ const SparseInnerVectorSet SparseMatrixBase::subrows(i /** \returns the i-th column of the matrix \c *this. For column-major matrix only. */ template -SparseInnerVectorSet SparseMatrixBase::subcols(int start, int size) +SparseInnerVectorSet SparseMatrixBase::subcols(Index start, Index size) { EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); return innerVectors(start, size); @@ -386,7 +385,7 @@ SparseInnerVectorSet SparseMatrixBase::subcols(int sta /** \returns the i-th column of the matrix \c *this. For column-major matrix only. * (read-only version) */ template -const SparseInnerVectorSet SparseMatrixBase::subcols(int start, int size) const +const SparseInnerVectorSet SparseMatrixBase::subcols(Index start, Index size) const { EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); return innerVectors(start, size); @@ -396,14 +395,14 @@ const SparseInnerVectorSet SparseMatrixBase::subcols(i * is col-major (resp. row-major). */ template -SparseInnerVectorSet SparseMatrixBase::innerVectors(int outerStart, int outerSize) +SparseInnerVectorSet SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) { return SparseInnerVectorSet(derived(), outerStart, outerSize); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). Read-only. */ template -const SparseInnerVectorSet SparseMatrixBase::innerVectors(int outerStart, int outerSize) const +const SparseInnerVectorSet SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const { return SparseInnerVectorSet(derived(), outerStart, outerSize); } #endif // EIGEN_SPARSE_BLOCK_H diff --git a/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/Eigen/src/Sparse/SparseCwiseBinaryOp.h index 91fbcb172..90878feda 100644 --- a/Eigen/src/Sparse/SparseCwiseBinaryOp.h +++ b/Eigen/src/Sparse/SparseCwiseBinaryOp.h @@ -68,10 +68,11 @@ class CwiseBinaryOpImpl::InnerIterator : public ei_sparse_cwise_binary_op_inner_iterator_selector::InnerIterator> { public: + typedef typename Lhs::Index Index; typedef ei_sparse_cwise_binary_op_inner_iterator_selector< BinaryOp,Lhs,Rhs, InnerIterator> Base; - EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, int outer) + EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) : Base(binOp.derived(),outer) {} }; @@ -95,9 +96,11 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector::_RhsNested _RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename _RhsNested::InnerIterator RhsIterator; + typedef typename Lhs::Index Index; + public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { this->operator++(); @@ -134,9 +137,9 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector=0; } @@ -145,7 +148,7 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, typedef typename _LhsNested::InnerIterator LhsIterator; typedef typename ei_traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; + typedef typename Lhs::Index Index; public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) { while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) @@ -189,9 +193,9 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } - EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } @@ -211,10 +215,11 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, typedef typename ei_traits::_LhsNested _LhsNested; typedef typename ei_traits::RhsNested RhsNested; typedef typename _LhsNested::InnerIterator LhsIterator; + typedef typename Lhs::Index Index; enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} @@ -228,9 +233,9 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, { return m_functor(m_lhsIter.value(), m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } - EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } @@ -238,7 +243,7 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, const RhsNested m_rhs; LhsIterator m_lhsIter; const BinaryFunc m_functor; - const int m_outer; + const Index m_outer; }; // sparse - dense (product) @@ -250,10 +255,12 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, typedef typename CwiseBinaryXpr::Scalar Scalar; typedef typename ei_traits::_RhsNested _RhsNested; typedef typename _RhsNested::InnerIterator RhsIterator; + typedef typename Lhs::Index Index; + enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; public: - EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer) : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer) {} @@ -266,9 +273,9 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } - EIGEN_STRONG_INLINE int index() const { return m_rhsIter.index(); } - EIGEN_STRONG_INLINE int row() const { return m_rhsIter.row(); } - EIGEN_STRONG_INLINE int col() const { return m_rhsIter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } @@ -276,7 +283,7 @@ class ei_sparse_cwise_binary_op_inner_iterator_selector, const CwiseBinaryXpr& m_xpr; RhsIterator m_rhsIter; const BinaryFunc& m_functor; - const int m_outer; + const Index m_outer; }; diff --git a/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/Eigen/src/Sparse/SparseCwiseUnaryOp.h index f3f8c82c5..5e12da464 100644 --- a/Eigen/src/Sparse/SparseCwiseUnaryOp.h +++ b/Eigen/src/Sparse/SparseCwiseUnaryOp.h @@ -57,9 +57,10 @@ class CwiseUnaryOpImpl::InnerIterator typedef typename CwiseUnaryOpImpl::Scalar Scalar; typedef typename ei_traits::_XprTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; + typedef typename MatrixType::Index Index; public: - EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, int outer) + EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, Index outer) : m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor()) {} @@ -68,9 +69,9 @@ class CwiseUnaryOpImpl::InnerIterator EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); } - EIGEN_STRONG_INLINE int index() const { return m_iter.index(); } - EIGEN_STRONG_INLINE int row() const { return m_iter.row(); } - EIGEN_STRONG_INLINE int col() const { return m_iter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_iter; } @@ -98,9 +99,10 @@ class CwiseUnaryViewImpl::InnerIterator typedef typename CwiseUnaryViewImpl::Scalar Scalar; typedef typename ei_traits::_MatrixTypeNested _MatrixTypeNested; typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; + typedef typename MatrixType::Index Index; public: - EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, int outer) + EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, Index outer) : m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived().functor()) {} @@ -110,9 +112,9 @@ class CwiseUnaryViewImpl::InnerIterator EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); } EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(m_iter.valueRef()); } - EIGEN_STRONG_INLINE int index() const { return m_iter.index(); } - EIGEN_STRONG_INLINE int row() const { return m_iter.row(); } - EIGEN_STRONG_INLINE int col() const { return m_iter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_iter; } @@ -125,7 +127,7 @@ template EIGEN_STRONG_INLINE Derived& SparseMatrixBase::operator*=(const Scalar& other) { - for (int j=0; j EIGEN_STRONG_INLINE Derived& SparseMatrixBase::operator/=(const Scalar& other) { - for (int j=0; j,Rhs>::InnerIterator { typedef typename CwiseUnaryOp,Rhs>::InnerIterator Base; + typedef typename Lhs::Index Index; public: inline ei_sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, int outer) + const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer) {} }; @@ -130,9 +131,10 @@ class ei_sparse_diagonal_product_inner_iterator_selector ei_scalar_product_op, SparseInnerVectorSet, typename Lhs::DiagonalVectorType>::InnerIterator Base; + typedef typename Lhs::Index Index; public: inline ei_sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, int outer) + const SparseDiagonalProductType& expr, Index outer) : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0) {} }; @@ -143,9 +145,10 @@ class ei_sparse_diagonal_product_inner_iterator_selector : public CwiseUnaryOp,Lhs>::InnerIterator { typedef typename CwiseUnaryOp,Lhs>::InnerIterator Base; + typedef typename Lhs::Index Index; public: inline ei_sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, int outer) + const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer) {} }; @@ -162,9 +165,10 @@ class ei_sparse_diagonal_product_inner_iterator_selector ei_scalar_product_op, SparseInnerVectorSet, Transpose >::InnerIterator Base; + typedef typename Lhs::Index Index; public: inline ei_sparse_diagonal_product_inner_iterator_selector( - const SparseDiagonalProductType& expr, int outer) + const SparseDiagonalProductType& expr, Index outer) : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0) {} }; diff --git a/Eigen/src/Sparse/SparseLDLT.h b/Eigen/src/Sparse/SparseLDLT.h index 28797a6c4..b6a51c6a6 100644 --- a/Eigen/src/Sparse/SparseLDLT.h +++ b/Eigen/src/Sparse/SparseLDLT.h @@ -78,6 +78,7 @@ class SparseLDLT { protected: typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; typedef typename NumTraits::Real RealScalar; typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; @@ -188,36 +189,36 @@ template void SparseLDLT::_symbolic(const MatrixType& a) { assert(a.rows()==a.cols()); - const int size = a.rows(); + const Index size = a.rows(); m_matrix.resize(size, size); m_parent.resize(size); m_nonZerosPerCol.resize(size); - int * tags = ei_aligned_stack_new(int, size); + Index * tags = ei_aligned_stack_new(Index, size); - const int* Ap = a._outerIndexPtr(); - const int* Ai = a._innerIndexPtr(); - int* Lp = m_matrix._outerIndexPtr(); - const int* P = 0; - int* Pinv = 0; + const Index* Ap = a._outerIndexPtr(); + const Index* Ai = a._innerIndexPtr(); + Index* Lp = m_matrix._outerIndexPtr(); + const Index* P = 0; + Index* Pinv = 0; if (P) { /* If P is present then compute Pinv, the inverse of P */ - for (int k = 0; k < size; ++k) + for (Index k = 0; k < size; ++k) Pinv[P[k]] = k; } - for (int k = 0; k < size; ++k) + for (Index k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ tags[k] = k; /* mark node k as visited */ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ - int kk = P ? P[k] : k; /* kth original, or permuted, column */ - int p2 = Ap[kk+1]; - for (int p = Ap[kk]; p < p2; ++p) + Index kk = P ? P[k] : k; /* kth original, or permuted, column */ + Index p2 = Ap[kk+1]; + for (Index p = Ap[kk]; p < p2; ++p) { /* A (i,k) is nonzero (original or permuted A) */ - int i = Pinv ? Pinv[Ai[p]] : Ai[p]; + Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; if (i < k) { /* follow path from i to root of etree, stop at flagged node */ @@ -234,53 +235,53 @@ void SparseLDLT::_symbolic(const MatrixType& a) } /* construct Lp index array from m_nonZerosPerCol column counts */ Lp[0] = 0; - for (int k = 0; k < size; ++k) + for (Index k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k]; m_matrix.resizeNonZeros(Lp[size]); - ei_aligned_stack_delete(int, tags, size); + ei_aligned_stack_delete(Index, tags, size); } template bool SparseLDLT::_numeric(const MatrixType& a) { assert(a.rows()==a.cols()); - const int size = a.rows(); + const Index size = a.rows(); assert(m_parent.size()==size); assert(m_nonZerosPerCol.size()==size); - const int* Ap = a._outerIndexPtr(); - const int* Ai = a._innerIndexPtr(); + const Index* Ap = a._outerIndexPtr(); + const Index* Ai = a._innerIndexPtr(); const Scalar* Ax = a._valuePtr(); - const int* Lp = m_matrix._outerIndexPtr(); - int* Li = m_matrix._innerIndexPtr(); + const Index* Lp = m_matrix._outerIndexPtr(); + Index* Li = m_matrix._innerIndexPtr(); Scalar* Lx = m_matrix._valuePtr(); m_diag.resize(size); Scalar * y = ei_aligned_stack_new(Scalar, size); - int * pattern = ei_aligned_stack_new(int, size); - int * tags = ei_aligned_stack_new(int, size); + Index * pattern = ei_aligned_stack_new(Index, size); + Index * tags = ei_aligned_stack_new(Index, size); - const int* P = 0; - const int* Pinv = 0; + const Index* P = 0; + const Index* Pinv = 0; bool ok = true; - for (int k = 0; k < size; ++k) + for (Index k = 0; k < size; ++k) { /* compute nonzero pattern of kth row of L, in topological order */ y[k] = 0.0; /* Y(0:k) is now all zero */ - int top = size; /* stack for pattern is empty */ + Index top = size; /* stack for pattern is empty */ tags[k] = k; /* mark node k as visited */ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ - int kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */ - int p2 = Ap[kk+1]; - for (int p = Ap[kk]; p < p2; ++p) + Index kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */ + Index p2 = Ap[kk+1]; + for (Index p = Ap[kk]; p < p2; ++p) { - int i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */ + Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */ if (i <= k) { y[i] += Ax[p]; /* scatter A(i,k) into Y (sum duplicates) */ - int len; + Index len; for (len = 0; tags[i] != k; i = m_parent[i]) { pattern[len++] = i; /* L(k,i) is nonzero */ @@ -295,11 +296,11 @@ bool SparseLDLT::_numeric(const MatrixType& a) y[k] = 0.0; for (; top < size; ++top) { - int i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ + Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; - int p2 = Lp[i] + m_nonZerosPerCol[i]; - int p; + Index p2 = Lp[i] + m_nonZerosPerCol[i]; + Index p; for (p = Lp[i]; p < p2; ++p) y[Li[p]] -= Lx[p] * yi; Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */ @@ -316,8 +317,8 @@ bool SparseLDLT::_numeric(const MatrixType& a) } ei_aligned_stack_delete(Scalar, y, size); - ei_aligned_stack_delete(int, pattern, size); - ei_aligned_stack_delete(int, tags, size); + ei_aligned_stack_delete(Index, pattern, size); + ei_aligned_stack_delete(Index, tags, size); return ok; /* success, diagonal of D is all nonzero */ } @@ -327,7 +328,7 @@ template template bool SparseLDLT::solveInPlace(MatrixBase &b) const { - const int size = m_matrix.rows(); + const Index size = m_matrix.rows(); ei_assert(size==b.rows()); if (!m_succeeded) return false; diff --git a/Eigen/src/Sparse/SparseLLT.h b/Eigen/src/Sparse/SparseLLT.h index a1c10ba13..37c6c3f9a 100644 --- a/Eigen/src/Sparse/SparseLLT.h +++ b/Eigen/src/Sparse/SparseLLT.h @@ -40,6 +40,7 @@ class SparseLLT { protected: typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; typedef typename NumTraits::Real RealScalar; typedef SparseMatrix CholMatrixType; @@ -127,7 +128,7 @@ template void SparseLLT::compute(const MatrixType& a) { assert(a.rows()==a.cols()); - const int size = a.rows(); + const Index size = a.rows(); m_matrix.resize(size, size); // allocate a temporary vector for accumulations @@ -137,7 +138,7 @@ void SparseLLT::compute(const MatrixType& a) // TODO estimate the number of non zeros m_matrix.setZero(); m_matrix.reserve(a.nonZeros()*2); - for (int j = 0; j < size; ++j) + for (Index j = 0; j < size; ++j) { Scalar x = ei_real(a.coeff(j,j)); @@ -154,7 +155,7 @@ void SparseLLT::compute(const MatrixType& a) for (; it; ++it) tempVector.coeffRef(it.index()) = it.value(); } - for (int k=0; k template bool SparseLLT::solveInPlace(MatrixBase &b) const { - const int size = m_matrix.rows(); + const Index size = m_matrix.rows(); ei_assert(size==b.rows()); m_matrix.template triangularView().solveInPlace(b); diff --git a/Eigen/src/Sparse/SparseMatrix.h b/Eigen/src/Sparse/SparseMatrix.h index 06cc7a949..fd41d7302 100644 --- a/Eigen/src/Sparse/SparseMatrix.h +++ b/Eigen/src/Sparse/SparseMatrix.h @@ -77,46 +77,46 @@ class SparseMatrix typedef SparseMatrix TransposedSparseMatrix; - int m_outerSize; - int m_innerSize; - int* m_outerIndex; + Index m_outerSize; + Index m_innerSize; + Index* m_outerIndex; CompressedStorage m_data; public: - inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } - inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } - inline int innerSize() const { return m_innerSize; } - inline int outerSize() const { return m_outerSize; } - inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } + inline Index innerSize() const { return m_innerSize; } + inline Index outerSize() const { return m_outerSize; } + inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } inline const Scalar* _valuePtr() const { return &m_data.value(0); } inline Scalar* _valuePtr() { return &m_data.value(0); } - inline const int* _innerIndexPtr() const { return &m_data.index(0); } - inline int* _innerIndexPtr() { return &m_data.index(0); } + inline const Index* _innerIndexPtr() const { return &m_data.index(0); } + inline Index* _innerIndexPtr() { return &m_data.index(0); } - inline const int* _outerIndexPtr() const { return m_outerIndex; } - inline int* _outerIndexPtr() { return m_outerIndex; } + inline const Index* _outerIndexPtr() const { return m_outerIndex; } + inline Index* _outerIndexPtr() { return m_outerIndex; } - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { - const int outer = IsRowMajor ? row : col; - const int inner = IsRowMajor ? col : row; + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { - const int outer = IsRowMajor ? row : col; - const int inner = IsRowMajor ? col : row; + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; - int start = m_outerIndex[outer]; - int end = m_outerIndex[outer+1]; + Index start = m_outerIndex[outer]; + Index end = m_outerIndex[outer+1]; ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); ei_assert(end>start && "coeffRef cannot be called on a zero coefficient"); - const int id = m_data.searchLowerIndex(start,end-1,inner); + const Index id = m_data.searchLowerIndex(start,end-1,inner); ei_assert((id(m_data.size()); } + inline Index nonZeros() const { return static_cast(m_data.size()); } /** \deprecated use setZero() and reserve() * Initializes the filling process of \c *this. * \param reserveSize approximate number of nonzeros * Note that the matrix \c *this is zero-ed. */ - EIGEN_DEPRECATED void startFill(int reserveSize = 1000) + EIGEN_DEPRECATED void startFill(Index reserveSize = 1000) { setZero(); m_data.reserve(reserveSize); } /** Preallocates \a reserveSize non zeros */ - inline void reserve(int reserveSize) + inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); } /** \deprecated use insert() */ - EIGEN_DEPRECATED Scalar& fill(int row, int col) + EIGEN_DEPRECATED Scalar& fill(Index row, Index col) { - const int outer = IsRowMajor ? row : col; - const int inner = IsRowMajor ? col : row; + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; if (m_outerIndex[outer+1]==0) { // we start a new inner vector - int i = outer; + Index i = outer; while (i>=0 && m_outerIndex[i]==0) { m_outerIndex[i] = m_data.size(); @@ -176,7 +176,7 @@ class SparseMatrix } // std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n"; assert(size_t(m_outerIndex[outer+1]) == m_data.size()); - int id = m_outerIndex[outer+1]; + Index id = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; m_data.append(0, inner); @@ -185,25 +185,25 @@ class SparseMatrix //--- low level purely coherent filling --- - inline Scalar& insertBack(int outer, int inner) + inline Scalar& insertBack(Index outer, Index inner) { ei_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "wrong sorted insertion"); ei_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)=0 && m_outerIndex[previousOuter]==0) { - m_outerIndex[previousOuter] = static_cast(m_data.size()); + m_outerIndex[previousOuter] = static_cast(m_data.size()); --previousOuter; } m_outerIndex[outer+1] = m_outerIndex[outer]; @@ -285,9 +285,9 @@ class SparseMatrix { // oops wrong guess. // let's correct the outer offsets - for (int k=0; k<=(outer+1); ++k) + for (Index k=0; k<=(outer+1); ++k) m_outerIndex[k] = 0; - int k=outer+1; + Index k=outer+1; while(m_outerIndex[k]==0) m_outerIndex[k++] = 1; while (k<=m_outerSize && m_outerIndex[k]!=0) @@ -306,13 +306,13 @@ class SparseMatrix { // we are not inserting into the last inner vec // update outer indices: - int j = outer+2; + Index j = outer+2; while (j<=m_outerSize && m_outerIndex[j]!=0) m_outerIndex[j++]++; --j; // shift data of last vecs: - int k = m_outerIndex[j]-1; - while (k>=int(id)) + Index k = m_outerIndex[j]-1; + while (k>=Index(id)) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); @@ -338,8 +338,8 @@ class SparseMatrix */ inline void finalize() { - int size = static_cast(m_data.size()); - int i = m_outerSize; + Index size = static_cast(m_data.size()); + Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) --i; @@ -353,13 +353,13 @@ class SparseMatrix void prune(Scalar reference, RealScalar epsilon = NumTraits::dummy_precision()) { - int k = 0; - for (int j=0; j(m_outerIndex,outerSize()).setZero(); + Eigen::Map > (m_outerIndex,outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass - for (int j=0; j class SparseMatrix::InnerIterator { public: - InnerIterator(const SparseMatrix& mat, int outer) + InnerIterator(const SparseMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_start(m_id), m_end(mat.m_outerIndex[outer+1]) {} template - InnerIterator(const Flagged& mat, int outer) + InnerIterator(const Flagged& mat, Index outer) : m_matrix(mat._expression()), m_outer(outer), m_id(m_matrix.m_outerIndex[outer]), m_start(m_id), m_end(m_matrix.m_outerIndex[outer+1]) {} @@ -555,19 +555,19 @@ class SparseMatrix::InnerIterator inline Scalar value() const { return m_matrix.m_data.value(m_id); } inline Scalar& valueRef() { return const_cast(m_matrix.m_data.value(m_id)); } - inline int index() const { return m_matrix.m_data.index(m_id); } - inline int outer() const { return m_outer; } - inline int row() const { return IsRowMajor ? m_outer : index(); } - inline int col() const { return IsRowMajor ? index() : m_outer; } + inline Index index() const { return m_matrix.m_data.index(m_id); } + inline Index outer() const { return m_outer; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } protected: const SparseMatrix& m_matrix; - const int m_outer; - int m_id; - const int m_start; - const int m_end; + const Index m_outer; + Index m_id; + const Index m_start; + const Index m_end; }; #endif // EIGEN_SPARSEMATRIX_H diff --git a/Eigen/src/Sparse/SparseMatrixBase.h b/Eigen/src/Sparse/SparseMatrixBase.h index 65fa19a79..a4326821d 100644 --- a/Eigen/src/Sparse/SparseMatrixBase.h +++ b/Eigen/src/Sparse/SparseMatrixBase.h @@ -42,6 +42,9 @@ template class SparseMatrixBase : public EigenBase typedef typename ei_traits::Scalar Scalar; typedef typename ei_packet_traits::type PacketScalar; + typedef typename ei_traits::StorageKind StorageKind; + typedef typename ei_index::type Index; + typedef SparseMatrixBase StorageBaseType; enum { @@ -145,15 +148,15 @@ template class SparseMatrixBase : public EigenBase #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ - inline int rows() const { return derived().rows(); } + inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ - inline int cols() const { return derived().cols(); } + inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ - inline int size() const { return rows() * cols(); } + inline Index size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ - inline int nonZeros() const { return derived().nonZeros(); } + inline Index nonZeros() const { return derived().nonZeros(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode @@ -161,10 +164,10 @@ template class SparseMatrixBase : public EigenBase inline bool isVector() const { return rows()==1 || cols()==1; } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ - int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } + Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ - int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } + Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } @@ -193,13 +196,13 @@ template class SparseMatrixBase : public EigenBase enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) }; - const int outerSize = other.outerSize(); + const Index outerSize = other.outerSize(); //typedef typename ei_meta_if, Derived>::ret TempType; // thanks to shallow copies, we always eval to a tempary Derived temp(other.rows(), other.cols()); temp.reserve(std::max(this->rows(),this->cols())*2); - for (int j=0; j class SparseMatrixBase : public EigenBase // std::cout << Flags << " " << OtherDerived::Flags << "\n"; const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); // std::cout << "eval transpose = " << transpose << "\n"; - const int outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols(); + const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols(); if ((!transpose) && other.isRValue()) { // eval without temporary derived().resize(other.rows(), other.cols()); derived().setZero(); derived().reserve(std::max(this->rows(),this->cols())*2); - for (int j=0; j class SparseMatrixBase : public EigenBase { if (Flags&RowMajorBit) { - for (int row=0; row class SparseMatrixBase : public EigenBase else { if (m.cols() == 1) { - int row = 0; + Index row = 0; for (typename Derived::InnerIterator it(m.derived(), 0); it; ++it) { for ( ; row class SparseMatrixBase : public EigenBase const AdjointReturnType adjoint() const { return transpose(); } // sub-vector - SparseInnerVectorSet row(int i); - const SparseInnerVectorSet row(int i) const; - SparseInnerVectorSet col(int j); - const SparseInnerVectorSet col(int j) const; - SparseInnerVectorSet innerVector(int outer); - const SparseInnerVectorSet innerVector(int outer) const; + SparseInnerVectorSet row(Index i); + const SparseInnerVectorSet row(Index i) const; + SparseInnerVectorSet col(Index j); + const SparseInnerVectorSet col(Index j) const; + SparseInnerVectorSet innerVector(Index outer); + const SparseInnerVectorSet innerVector(Index outer) const; // set of sub-vectors - SparseInnerVectorSet subrows(int start, int size); - const SparseInnerVectorSet subrows(int start, int size) const; - SparseInnerVectorSet subcols(int start, int size); - const SparseInnerVectorSet subcols(int start, int size) const; - SparseInnerVectorSet innerVectors(int outerStart, int outerSize); - const SparseInnerVectorSet innerVectors(int outerStart, int outerSize) const; + SparseInnerVectorSet subrows(Index start, Index size); + const SparseInnerVectorSet subrows(Index start, Index size) const; + SparseInnerVectorSet subcols(Index start, Index size); + const SparseInnerVectorSet subcols(Index start, Index size) const; + SparseInnerVectorSet innerVectors(Index outerStart, Index outerSize); + const SparseInnerVectorSet innerVectors(Index outerStart, Index outerSize) const; // typename BlockReturnType::Type block(int startRow, int startCol, int blockRows, int blockCols); // const typename BlockReturnType::Type @@ -493,7 +496,7 @@ template class SparseMatrixBase : public EigenBase void evalTo(MatrixBase& dst) const { dst.setZero(); - for (int j=0; j static void ei_sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef typename ei_traits::type>::Scalar Scalar; + typedef typename ei_cleantype::type::Scalar Scalar; + typedef typename ei_cleantype::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. - int rows = lhs.innerSize(); - int cols = rhs.outerSize(); + Index rows = lhs.innerSize(); + Index cols = rhs.outerSize(); ei_assert(lhs.outerSize() == rhs.innerSize()); std::vector mask(rows,false); Matrix values(rows); - Matrix indices(rows); + Matrix indices(rows); // estimate the number of non zero entries float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); @@ -160,20 +161,20 @@ static void ei_sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& // int t = (rows*100)/139; res.resize(rows, cols); - res.reserve(int(ratioRes*rows*cols)); + res.reserve(Index(ratioRes*rows*cols)); // we compute each column of the result, one after the other - for (int j=0; j::type>::Scalar Scalar; + typedef typename ei_cleantype::type::Scalar Scalar; + typedef typename ei_cleantype::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. - int rows = lhs.innerSize(); - int cols = rhs.outerSize(); + Index rows = lhs.innerSize(); + Index cols = rhs.outerSize(); //int size = lhs.outerSize(); ei_assert(lhs.outerSize() == rhs.innerSize()); @@ -242,8 +244,8 @@ static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& r float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f); res.resize(rows, cols); - res.reserve(int(ratioRes*rows*cols)); - for (int j=0; j::type _Rhs; typedef typename _Lhs::InnerIterator LhsInnerIterator; enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit }; - for(int j=0; j dest_j(dest.row(LhsIsRowMajor ? j : 0)); @@ -555,7 +557,7 @@ class DenseTimeSparseProduct typedef typename ei_cleantype::type _Rhs; typedef typename _Rhs::InnerIterator RhsInnerIterator; enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit }; - for(int j=0; j::sum() const { ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); Scalar res = 0; - for (int j=0; j class SparseSelfAdjointView { public: - typedef typename ei_traits::Scalar Scalar; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) { @@ -57,8 +58,8 @@ template class SparseSelfAdjointView ei_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); } - inline int rows() const { return m_matrix.rows(); } - inline int cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } /** \internal \returns a reference to the nested matrix */ const MatrixType& matrix() const { return m_matrix; } @@ -173,7 +174,7 @@ class SparseSelfAdjointTimeDenseProduct || ( (UpLo&Lower) && LhsIsRowMajor), ProcessSecondHalf = !ProcessFirstHalf }; - for (int j=0; j dest_j(dest.row(LhsIsRowMajor ? j : 0)); for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i) { - int a = LhsIsRowMajor ? j : i.index(); - int b = LhsIsRowMajor ? i.index() : j; + Index a = LhsIsRowMajor ? j : i.index(); + Index b = LhsIsRowMajor ? i.index() : j; typename Lhs::Scalar v = i.value(); dest.row(a) += (v) * m_rhs.row(b); dest.row(b) += ei_conj(v) * m_rhs.row(a); diff --git a/Eigen/src/Sparse/SparseTranspose.h b/Eigen/src/Sparse/SparseTranspose.h index b8c38617a..a94f5ae7c 100644 --- a/Eigen/src/Sparse/SparseTranspose.h +++ b/Eigen/src/Sparse/SparseTranspose.h @@ -35,19 +35,19 @@ template class TransposeImpl class InnerIterator; class ReverseInnerIterator; - inline int nonZeros() const { return derived().nestedExpression().nonZeros(); } + inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } // FIXME should be keep them ? - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { return const_cast_derived().nestedExpression().coeffRef(col, row); } - inline const Scalar coeff(int row, int col) const + inline const Scalar coeff(Index row, Index col) const { return derived().nestedExpression().coeff(col, row); } - inline const Scalar coeff(int index) const + inline const Scalar coeff(Index index) const { return derived().nestedExpression().coeff(index); } - inline Scalar& coeffRef(int index) + inline Scalar& coeffRef(Index index) { return const_cast_derived().nestedExpression().coeffRef(index); } }; @@ -56,11 +56,11 @@ template class TransposeImpl::InnerItera typedef typename MatrixType::InnerIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, int outer) + EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, Index outer) : Base(trans.derived().nestedExpression(), outer) {} - inline int row() const { return Base::col(); } - inline int col() const { return Base::row(); } + inline Index row() const { return Base::col(); } + inline Index col() const { return Base::row(); } }; template class TransposeImpl::ReverseInnerIterator : public MatrixType::ReverseInnerIterator @@ -68,11 +68,11 @@ template class TransposeImpl::ReverseInn typedef typename MatrixType::ReverseInnerIterator Base; public: - EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, int outer) + EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, Index outer) : Base(xpr.derived().nestedExpression(), outer) {} - inline int row() const { return Base::col(); } - inline int col() const { return Base::row(); } + inline Index row() const { return Base::col(); } + inline Index col() const { return Base::row(); } }; #endif // EIGEN_SPARSETRANSPOSE_H diff --git a/Eigen/src/Sparse/SparseTriangularView.h b/Eigen/src/Sparse/SparseTriangularView.h index e713220b9..2588c3698 100644 --- a/Eigen/src/Sparse/SparseTriangularView.h +++ b/Eigen/src/Sparse/SparseTriangularView.h @@ -38,11 +38,12 @@ template class SparseTriangularView public: class InnerIterator; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; - inline int rows() { return m_matrix.rows(); } - inline int cols() { return m_matrix.cols(); } + inline Index rows() { return m_matrix.rows(); } + inline Index cols() { return m_matrix.cols(); } - typedef typename ei_traits::Scalar Scalar; typedef typename ei_meta_if::ret, MatrixType, const MatrixType&>::ret MatrixTypeNested; @@ -68,15 +69,15 @@ class SparseTriangularView::InnerIterator : public MatrixType:: typedef typename MatrixType::InnerIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, int outer) + EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer) : Base(view.nestedExpression(), outer) { if(SkipFirst) while((*this) && this->index()::Scalar Scalar; \ -typedef typename Eigen::NumTraits::Real RealScalar; \ -typedef typename Eigen::ei_nested::type Nested; \ -enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ - Flags = Eigen::ei_traits::Flags, \ - CoeffReadCost = Eigen::ei_traits::CoeffReadCost, \ - SizeAtCompileTime = Base::SizeAtCompileTime, \ - IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; + typedef BaseClass Base; \ + typedef typename Eigen::ei_traits::Scalar Scalar; \ + typedef typename Eigen::NumTraits::Real RealScalar; \ + typedef typename Eigen::ei_nested::type Nested; \ + typedef typename Eigen::ei_traits::StorageKind StorageKind; \ + typedef typename Eigen::ei_index::type Index; \ + enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ + Flags = Eigen::ei_traits::Flags, \ + CoeffReadCost = Eigen::ei_traits::CoeffReadCost, \ + SizeAtCompileTime = Base::SizeAtCompileTime, \ + IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; #define EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived) \ _EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) @@ -76,6 +78,8 @@ enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ typedef typename Eigen::ei_traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ typedef typename Eigen::ei_nested::type Nested; \ + typedef typename Eigen::ei_traits::StorageKind StorageKind; \ + typedef typename Eigen::ei_index::type Index; \ enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ Flags = Eigen::ei_traits::Flags, \ @@ -88,6 +92,12 @@ enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ #define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) +template<> +struct ei_index +{ typedef EIGEN_DEFAULT_SPARSE_INDEX_TYPE type; }; + +typedef typename ei_index::type SparseIndex; + enum SparseBackend { DefaultBackend, Taucs, diff --git a/Eigen/src/Sparse/SparseVector.h b/Eigen/src/Sparse/SparseVector.h index 6806ab288..4013b4de5 100644 --- a/Eigen/src/Sparse/SparseVector.h +++ b/Eigen/src/Sparse/SparseVector.h @@ -70,33 +70,33 @@ class SparseVector enum { IsColVector = ei_traits::IsColVector }; CompressedStorage m_data; - int m_size; + Index m_size; CompressedStorage& _data() { return m_data; } CompressedStorage& _data() const { return m_data; } public: - EIGEN_STRONG_INLINE int rows() const { return IsColVector ? m_size : 1; } - EIGEN_STRONG_INLINE int cols() const { return IsColVector ? 1 : m_size; } - EIGEN_STRONG_INLINE int innerSize() const { return m_size; } - EIGEN_STRONG_INLINE int outerSize() const { return 1; } - EIGEN_STRONG_INLINE int innerNonZeros(int j) const { ei_assert(j==0); return m_size; } + EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } + EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } + EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } + EIGEN_STRONG_INLINE Index outerSize() const { return 1; } + EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { ei_assert(j==0); return m_size; } EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); } EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); } - EIGEN_STRONG_INLINE const int* _innerIndexPtr() const { return &m_data.index(0); } - EIGEN_STRONG_INLINE int* _innerIndexPtr() { return &m_data.index(0); } + EIGEN_STRONG_INLINE const Index* _innerIndexPtr() const { return &m_data.index(0); } + EIGEN_STRONG_INLINE Index* _innerIndexPtr() { return &m_data.index(0); } - inline Scalar coeff(int row, int col) const + inline Scalar coeff(Index row, Index col) const { ei_assert((IsColVector ? col : row)==0); return coeff(IsColVector ? row : col); } - inline Scalar coeff(int i) const { return m_data.at(i); } + inline Scalar coeff(Index i) const { return m_data.at(i); } - inline Scalar& coeffRef(int row, int col) + inline Scalar& coeffRef(Index row, Index col) { ei_assert((IsColVector ? col : row)==0); return coeff(IsColVector ? row : col); @@ -108,7 +108,7 @@ class SparseVector * * This insertion might be very costly if the number of nonzeros above \a i is large. */ - inline Scalar& coeffRef(int i) + inline Scalar& coeffRef(Index i) { return m_data.atWithInsertion(i); } @@ -120,33 +120,33 @@ class SparseVector inline void setZero() { m_data.clear(); } /** \returns the number of non zero coefficients */ - inline int nonZeros() const { return static_cast(m_data.size()); } + inline Index nonZeros() const { return static_cast(m_data.size()); } - inline void startVec(int outer) + inline void startVec(Index outer) { ei_assert(outer==0); } - inline Scalar& insertBack(int outer, int inner) + inline Scalar& insertBack(Index outer, Index inner) { ei_assert(outer==0); return insertBack(inner); } - inline Scalar& insertBack(int i) + inline Scalar& insertBack(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } - inline Scalar& insert(int outer, int inner) + inline Scalar& insert(Index outer, Index inner) { ei_assert(outer==0); return insert(inner); } - Scalar& insert(int i) + Scalar& insert(Index i) { - int startId = 0; - int id = m_data.size() - 1; + Index startId = 0; + Index id = m_data.size() - 1; // TODO smart realloc m_data.resize(id+2,1); @@ -163,38 +163,38 @@ class SparseVector /** */ - inline void reserve(int reserveSize) { m_data.reserve(reserveSize); } + inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); } /** \deprecated use setZero() and reserve() */ - EIGEN_DEPRECATED void startFill(int reserve) + EIGEN_DEPRECATED void startFill(Index reserve) { setZero(); m_data.reserve(reserve); } - /** \deprecated use insertBack(int,int) */ - EIGEN_DEPRECATED Scalar& fill(int r, int c) + /** \deprecated use insertBack(Index,Index) */ + EIGEN_DEPRECATED Scalar& fill(Index r, Index c) { ei_assert(r==0 || c==0); return fill(IsColVector ? r : c); } - /** \deprecated use insertBack(int) */ - EIGEN_DEPRECATED Scalar& fill(int i) + /** \deprecated use insertBack(Index) */ + EIGEN_DEPRECATED Scalar& fill(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } - /** \deprecated use insert(int,int) */ - EIGEN_DEPRECATED Scalar& fillrand(int r, int c) + /** \deprecated use insert(Index,Index) */ + EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c) { ei_assert(r==0 || c==0); return fillrand(IsColVector ? r : c); } - /** \deprecated use insert(int) */ - EIGEN_DEPRECATED Scalar& fillrand(int i) + /** \deprecated use insert(Index) */ + EIGEN_DEPRECATED Scalar& fillrand(Index i) { return insert(i); } @@ -208,25 +208,25 @@ class SparseVector m_data.prune(reference,epsilon); } - void resize(int rows, int cols) + void resize(Index rows, Index cols) { ei_assert(rows==1 || cols==1); resize(IsColVector ? rows : cols); } - void resize(int newSize) + void resize(Index newSize) { m_size = newSize; m_data.clear(); } - void resizeNonZeros(int size) { m_data.resize(size); } + void resizeNonZeros(Index size) { m_data.resize(size); } inline SparseVector() : m_size(0) { resize(0); } - inline SparseVector(int size) : m_size(0) { resize(size); } + inline SparseVector(Index size) : m_size(0) { resize(size); } - inline SparseVector(int rows, int cols) : m_size(0) { resize(rows,cols); } + inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); } template inline SparseVector(const MatrixBase& other) @@ -329,7 +329,7 @@ class SparseVector friend std::ostream & operator << (std::ostream & s, const SparseVector& m) { - for (unsigned int i=0; i class SparseVector::InnerIterator { public: - InnerIterator(const SparseVector& vec, int outer=0) - : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) + InnerIterator(const SparseVector& vec, Index outer=0) + : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) { ei_assert(outer==0); } InnerIterator(const CompressedStorage& data) - : m_data(data), m_id(0), m_end(static_cast(m_data.size())) + : m_data(data), m_id(0), m_end(static_cast(m_data.size())) {} template - InnerIterator(const Flagged& vec, int ) + InnerIterator(const Flagged& vec, Index ) : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size()) {} @@ -388,16 +388,16 @@ class SparseVector::InnerIterator inline Scalar value() const { return m_data.value(m_id); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id)); } - inline int index() const { return m_data.index(m_id); } - inline int row() const { return IsColVector ? index() : 0; } - inline int col() const { return IsColVector ? 0 : index(); } + inline Index index() const { return m_data.index(m_id); } + inline Index row() const { return IsColVector ? index() : 0; } + inline Index col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id < m_end); } protected: const CompressedStorage& m_data; - int m_id; - const int m_end; + Index m_id; + const Index m_end; }; #endif // EIGEN_SPARSEVECTOR_H diff --git a/Eigen/src/misc/Image.h b/Eigen/src/misc/Image.h index 1d63d8143..32392fd29 100644 --- a/Eigen/src/misc/Image.h +++ b/Eigen/src/misc/Image.h @@ -48,6 +48,8 @@ template struct ei_image_retval_base { typedef _DecompositionType DecompositionType; typedef typename DecompositionType::MatrixType MatrixType; + typedef ReturnByValue Base; + typedef typename Base::Index Index; ei_image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) : m_dec(dec), m_rank(dec.rank()), @@ -55,9 +57,9 @@ template struct ei_image_retval_base m_originalMatrix(originalMatrix) {} - inline int rows() const { return m_dec.rows(); } - inline int cols() const { return m_cols; } - inline int rank() const { return m_rank; } + inline Index rows() const { return m_dec.rows(); } + inline Index cols() const { return m_cols; } + inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } inline const MatrixType& originalMatrix() const { return m_originalMatrix; } @@ -68,7 +70,7 @@ template struct ei_image_retval_base protected: const DecompositionType& m_dec; - int m_rank, m_cols; + Index m_rank, m_cols; const MatrixType& m_originalMatrix; }; @@ -76,6 +78,7 @@ template struct ei_image_retval_base typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ + typedef typename MatrixType::Index Index; \ typedef ei_image_retval_base Base; \ using Base::dec; \ using Base::originalMatrix; \ diff --git a/Eigen/src/misc/Kernel.h b/Eigen/src/misc/Kernel.h index 497b42eab..38a2d4097 100644 --- a/Eigen/src/misc/Kernel.h +++ b/Eigen/src/misc/Kernel.h @@ -49,6 +49,8 @@ template struct ei_kernel_retval_base : public ReturnByValue > { typedef _DecompositionType DecompositionType; + typedef ReturnByValue Base; + typedef typename Base::Index Index; ei_kernel_retval_base(const DecompositionType& dec) : m_dec(dec), @@ -56,9 +58,9 @@ template struct ei_kernel_retval_base m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank) {} - inline int rows() const { return m_dec.cols(); } - inline int cols() const { return m_cols; } - inline int rank() const { return m_rank; } + inline Index rows() const { return m_dec.cols(); } + inline Index cols() const { return m_cols; } + inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } template inline void evalTo(Dest& dst) const @@ -68,13 +70,14 @@ template struct ei_kernel_retval_base protected: const DecompositionType& m_dec; - int m_rank, m_cols; + Index m_rank, m_cols; }; #define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ + typedef typename MatrixType::Index Index; \ typedef ei_kernel_retval_base Base; \ using Base::dec; \ using Base::rank; \ diff --git a/Eigen/src/misc/Solve.h b/Eigen/src/misc/Solve.h index 028716aa2..d6fc67406 100644 --- a/Eigen/src/misc/Solve.h +++ b/Eigen/src/misc/Solve.h @@ -45,13 +45,15 @@ template struct ei_solve_retval_base { typedef typename ei_cleantype::type RhsNestedCleaned; typedef _DecompositionType DecompositionType; + typedef ReturnByValue Base; + typedef typename Base::Index Index; ei_solve_retval_base(const DecompositionType& dec, const Rhs& rhs) : m_dec(dec), m_rhs(rhs) {} - inline int rows() const { return m_dec.cols(); } - inline int cols() const { return m_rhs.cols(); } + inline Index rows() const { return m_dec.cols(); } + inline Index cols() const { return m_rhs.cols(); } inline const DecompositionType& dec() const { return m_dec; } inline const RhsNestedCleaned& rhs() const { return m_rhs; } @@ -69,6 +71,7 @@ template struct ei_solve_retval_base typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ + typedef typename MatrixType::Index Index; \ typedef ei_solve_retval_base Base; \ using Base::dec; \ using Base::rhs; \ -- cgit v1.2.3