diff options
author | 2015-02-16 15:29:00 +0100 | |
---|---|---|
committer | 2015-02-16 15:29:00 +0100 | |
commit | 98604576d1d4ef3d768b1cc08b4ccedbf16816ab (patch) | |
tree | dc570ba8d010e3b74550671039bf1548d11289d1 /Eigen/src/SparseCore/SparseMatrix.h | |
parent | bd511dde9da3a1f147b3d0d70e76298f759c4420 (diff) | |
parent | 45cbb0bbb1f66bbc923dd4dd1034b919f6b4a191 (diff) |
Merged in chtz/eigen-indexconversion (pull request PR-92)
bug #877, bug #572: Get rid of Index conversion warnings, summary of changes:
- Introduce a global typedef Eigen::Index making Eigen::DenseIndex and AnyExpr<>::Index deprecated (default is std::ptrdiff_t).
- Eigen::Index is used throughout the API to represent indices, offsets, and sizes.
- Classes storing an array of indices uses the type StorageIndex to store them. This is a template parameter of the class. Default is int.
- Methods that *explicitly* set or return an element of such an array take or return a StorageIndex type. In all other cases, the Index type is used.
Diffstat (limited to 'Eigen/src/SparseCore/SparseMatrix.h')
-rw-r--r-- | Eigen/src/SparseCore/SparseMatrix.h | 142 |
1 files changed, 70 insertions, 72 deletions
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h index 74b4c6a9d..4cf4f1826 100644 --- a/Eigen/src/SparseCore/SparseMatrix.h +++ b/Eigen/src/SparseCore/SparseMatrix.h @@ -43,7 +43,7 @@ template<typename _Scalar, int _Options, typename _Index> struct traits<SparseMatrix<_Scalar, _Options, _Index> > { typedef _Scalar Scalar; - typedef _Index Index; + typedef _Index StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -65,7 +65,7 @@ struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> > typedef _Scalar Scalar; typedef Dense StorageKind; - typedef _Index Index; + typedef _Index StorageIndex; typedef MatrixXpr XprKind; enum { @@ -107,23 +107,24 @@ class SparseMatrix using Base::IsRowMajor; - typedef internal::CompressedStorage<Scalar,Index> Storage; + typedef internal::CompressedStorage<Scalar,StorageIndex> Storage; enum { Options = _Options }; + typedef typename Base::IndexVector IndexVector; + typedef typename Base::ScalarVector ScalarVector; protected: - typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix; Index m_outerSize; Index m_innerSize; - Index* m_outerIndex; - Index* m_innerNonZeros; // optional, if null then the data is compressed + StorageIndex* m_outerIndex; + StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed Storage m_data; - Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } - const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } public: @@ -149,29 +150,29 @@ class SparseMatrix /** \returns a const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ - inline const Index* innerIndexPtr() const { return &m_data.index(0); } + inline const StorageIndex* innerIndexPtr() const { return &m_data.index(0); } /** \returns a non-const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ - inline Index* innerIndexPtr() { return &m_data.index(0); } + inline StorageIndex* innerIndexPtr() { return &m_data.index(0); } /** \returns a const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ - inline const Index* outerIndexPtr() const { return m_outerIndex; } + inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } /** \returns a non-const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ - inline Index* outerIndexPtr() { return m_outerIndex; } + inline StorageIndex* outerIndexPtr() { return m_outerIndex; } /** \returns a const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ - inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; } + inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; } /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ - inline Index* innerNonZeroPtr() { return m_innerNonZeros; } + inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; } /** \internal */ inline Storage& data() { return m_data; } @@ -187,7 +188,7 @@ class SparseMatrix const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; - return m_data.atInRange(m_outerIndex[outer], end, inner); + return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner)); } /** \returns a non-const reference to the value of the matrix at position \a i, \a j @@ -210,7 +211,7 @@ class SparseMatrix eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); if(end<=start) return insert(row,col); - const Index p = m_data.searchLowerIndex(start,end-1,inner); + const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner)); if((p<end) && (m_data.index(p)==inner)) return m_data.value(p); else @@ -235,7 +236,7 @@ class SparseMatrix if(isCompressed()) { - reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2)); + reserve(IndexVector::Constant(outerSize(), 2)); } return insertUncompressed(row,col); } @@ -246,9 +247,9 @@ class SparseMatrix inline void setZero() { m_data.clear(); - memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); if(m_innerNonZeros) - memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index)); + memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); } /** \returns the number of non zero coefficients */ @@ -256,7 +257,7 @@ class SparseMatrix { if(m_innerNonZeros) return innerNonZeros().sum(); - return static_cast<Index>(m_data.size()); + return convert_index(Index(m_data.size())); } /** Preallocates \a reserveSize non zeros. @@ -298,15 +299,15 @@ class SparseMatrix { if(isCompressed()) { - std::size_t totalReserveSize = 0; + Index totalReserveSize = 0; // turn the matrix into non-compressed mode - m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index))); + m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); // temporarily use m_innerSizes to hold the new starting points. - Index* newOuterIndex = m_innerNonZeros; + StorageIndex* newOuterIndex = m_innerNonZeros; - Index count = 0; + StorageIndex count = 0; for(Index j=0; j<m_outerSize; ++j) { newOuterIndex[j] = count; @@ -314,10 +315,10 @@ class SparseMatrix totalReserveSize += reserveSizes[j]; } m_data.reserve(totalReserveSize); - Index previousOuterIndex = m_outerIndex[m_outerSize]; + StorageIndex previousOuterIndex = m_outerIndex[m_outerSize]; for(Index j=m_outerSize-1; j>=0; --j) { - Index innerNNZ = previousOuterIndex - m_outerIndex[j]; + StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); @@ -333,15 +334,15 @@ class SparseMatrix } else { - Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index))); + StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); - Index count = 0; + StorageIndex count = 0; for(Index j=0; j<m_outerSize; ++j) { newOuterIndex[j] = count; - Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; - Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved); + StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; + StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved); count += toReserve + m_innerNonZeros[j]; } newOuterIndex[m_outerSize] = count; @@ -352,7 +353,7 @@ class SparseMatrix Index offset = newOuterIndex[j] - m_outerIndex[j]; if(offset>0) { - Index innerNNZ = m_innerNonZeros[j]; + StorageIndex innerNNZ = m_innerNonZeros[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); @@ -389,7 +390,7 @@ class SparseMatrix * \sa insertBack, startVec */ inline Scalar& insertBackByOuterInner(Index outer, Index inner) { - eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); + eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)"); Index p = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; @@ -423,7 +424,7 @@ class SparseMatrix { if(isCompressed()) { - Index size = static_cast<Index>(m_data.size()); + StorageIndex size = internal::convert_index<StorageIndex>(m_data.size()); Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) @@ -488,7 +489,7 @@ class SparseMatrix { if(m_innerNonZeros != 0) return; - m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index))); + m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; @@ -515,7 +516,7 @@ class SparseMatrix // TODO also implement a unit test makeCompressed(); - Index k = 0; + StorageIndex k = 0; for(Index j=0; j<m_outerSize; ++j) { Index previousStart = m_outerIndex[j]; @@ -548,13 +549,13 @@ class SparseMatrix Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); - Index newInnerSize = IsRowMajor ? cols : rows; + StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows); // Deals with inner non zeros if (m_innerNonZeros) { // Resize m_innerNonZeros - Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index))); + StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex))); if (!newInnerNonZeros) internal::throw_std_bad_alloc(); m_innerNonZeros = newInnerNonZeros; @@ -564,7 +565,7 @@ class SparseMatrix else if (innerChange < 0) { // Inner size decreased: allocate a new m_innerNonZeros - m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index))); + m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); for(Index i = 0; i < m_outerSize; i++) m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; @@ -575,8 +576,8 @@ class SparseMatrix { for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) { - Index &n = m_innerNonZeros[i]; - Index start = m_outerIndex[i]; + StorageIndex &n = m_innerNonZeros[i]; + StorageIndex start = m_outerIndex[i]; while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; } } @@ -587,12 +588,12 @@ class SparseMatrix if (outerChange == 0) return; - Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index))); + StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); m_outerIndex = newOuterIndex; if (outerChange > 0) { - Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; + StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++) m_outerIndex[i] = last; } @@ -610,7 +611,7 @@ class SparseMatrix if (m_outerSize != outerSize || m_outerSize==0) { std::free(m_outerIndex); - m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index))); + m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex))); if (!m_outerIndex) internal::throw_std_bad_alloc(); m_outerSize = outerSize; @@ -620,7 +621,7 @@ class SparseMatrix std::free(m_innerNonZeros); m_innerNonZeros = 0; } - memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); } /** \internal @@ -713,9 +714,9 @@ class SparseMatrix { eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); this->m_data.resize(rows()); - Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); - Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes(); - Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); + Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, StorageIndex(rows()-1)); + Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes(); + Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows())); } inline SparseMatrix& operator=(const SparseMatrix& other) { @@ -806,9 +807,7 @@ protected: template<typename Other> void initAssignment(const Other& other) { - eigen_assert( other.rows() == typename Other::Index(Index(other.rows())) - && other.cols() == typename Other::Index(Index(other.cols())) ); - resize(Index(other.rows()), Index(other.cols())); + resize(other.rows(), other.cols()); if(m_innerNonZeros) { std::free(m_innerNonZeros); @@ -824,15 +823,15 @@ protected: * A vector object that is equal to 0 everywhere but v at the position i */ class SingletonVector { - Index m_index; - Index m_value; + StorageIndex m_index; + StorageIndex m_value; public: - typedef Index value_type; + typedef StorageIndex value_type; SingletonVector(Index i, Index v) - : m_index(i), m_value(v) + : m_index(convert_index(i)), m_value(convert_index(v)) {} - Index operator[](Index i) const { return i==m_index ? m_value : 0; } + StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; } }; /** \internal @@ -851,14 +850,14 @@ public: eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; - m_data.index(p) = inner; + m_data.index(p) = convert_index(inner); return (m_data.value(p) = 0); } private: static void check_template_parameters() { - EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } @@ -873,7 +872,6 @@ private: }; }; - namespace internal { template<typename InputIterator, typename SparseMatrixType> @@ -882,13 +880,13 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa EIGEN_UNUSED_VARIABLE(Options); enum { IsRowMajor = SparseMatrixType::IsRowMajor }; typedef typename SparseMatrixType::Scalar Scalar; - typedef typename SparseMatrixType::Index Index; - SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols()); + typedef typename SparseMatrixType::StorageIndex StorageIndex; + SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols()); if(begin!=end) { // pass 1: count the nnz per inner-vector - Matrix<Index,Dynamic,1> wi(trMat.outerSize()); + typename SparseMatrixType::IndexVector wi(trMat.outerSize()); wi.setZero(); for(InputIterator it(begin); it!=end; ++it) { @@ -962,13 +960,13 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates() { eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task - Matrix<Index,Dynamic,1> wi(innerSize()); + IndexVector wi(innerSize()); wi.fill(-1); - Index count = 0; + StorageIndex count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers for(Index j=0; j<outerSize(); ++j) { - Index start = count; + StorageIndex start = count; Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j]; for(Index k=m_outerIndex[j]; k<oldEnd; ++k) { @@ -1021,7 +1019,7 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt OtherCopyEval otherCopyEval(otherCopy); SparseMatrix dest(other.rows(),other.cols()); - Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero(); + Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass @@ -1030,8 +1028,8 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt ++dest.m_outerIndex[it.index()]; // prefix sum - Index count = 0; - Matrix<Index,Dynamic,1> positions(dest.outerSize()); + StorageIndex count = 0; + IndexVector positions(dest.outerSize()); for (Index j=0; j<dest.outerSize(); ++j) { Index tmp = dest.m_outerIndex[j]; @@ -1043,7 +1041,7 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt // alloc dest.m_data.resize(count); // pass 2 - for (Index j=0; j<otherCopy.outerSize(); ++j) + for (StorageIndex j=0; j<otherCopy.outerSize(); ++j) { for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) { @@ -1072,14 +1070,14 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse eigen_assert(!isCompressed()); const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; + const StorageIndex inner = convert_index(IsRowMajor ? col : row); Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; - Index innerNNZ = m_innerNonZeros[outer]; + StorageIndex innerNNZ = m_innerNonZeros[outer]; if(innerNNZ>=room) { // this inner vector is full, we need to reallocate the whole buffer :( - reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ))); + reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ))); } Index startId = m_outerIndex[outer]; @@ -1112,7 +1110,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse // we start a new inner vector while (previousOuter>=0 && m_outerIndex[previousOuter]==0) { - m_outerIndex[previousOuter] = static_cast<Index>(m_data.size()); + m_outerIndex[previousOuter] = convert_index(m_data.size()); --previousOuter; } m_outerIndex[outer+1] = m_outerIndex[outer]; |