aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h14
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h4
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h10
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h2
-rw-r--r--test/sparse.h6
-rw-r--r--test/sparse_product.cpp1
-rw-r--r--test/sparse_vector.cpp13
7 files changed, 26 insertions, 24 deletions
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index cd996d5ba..5c320e2d2 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -66,9 +66,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
}
// unordered insertion
- for(int k=0; k<nnz; ++k)
+ for(Index k=0; k<nnz; ++k)
{
- int i = indices[k];
+ Index i = indices[k];
res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
@@ -76,8 +76,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
#if 0
// alternative ordered insertion code:
- int t200 = rows/(log2(200)*1.39);
- int t = (rows*100)/139;
+ Index t200 = rows/(log2(200)*1.39);
+ Index t = (rows*100)/139;
// FIXME reserve nnz non zeros
// FIXME implement fast sort algorithms for very small nnz
@@ -90,9 +90,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
if(true)
{
if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
- for(int k=0; k<nnz; ++k)
+ for(Index k=0; k<nnz; ++k)
{
- int i = indices[k];
+ Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
mask[i] = false;
}
@@ -100,7 +100,7 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
else
{
// dense path
- for(int i=0; i<rows; ++i)
+ for(Index i=0; i<rows; ++i)
{
if(mask[i])
{
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index 6132a4880..610833f3b 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -125,7 +125,7 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
inline Scalar value() const { return Base::value() * m_factor; }
protected:
- int m_outer;
+ Index m_outer;
Scalar m_factor;
};
@@ -156,7 +156,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
{
for(Index c=0; c<rhs.cols(); ++c)
{
- int n = lhs.outerSize();
+ Index n = lhs.outerSize();
for(Index j=0; j<n; ++j)
{
typename Res::Scalar tmp(0);
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index f2e234eee..0e7e760fb 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -402,7 +402,7 @@ class SparseMatrix
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
- eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
@@ -480,7 +480,7 @@ class SparseMatrix
if(m_innerNonZeros != 0)
return;
m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
- for (int i = 0; i < m_outerSize; i++)
+ for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
@@ -752,8 +752,8 @@ class SparseMatrix
else
for (Index i=0; i<m.outerSize(); ++i)
{
- int p = m.m_outerIndex[i];
- int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+ Index p = m.m_outerIndex[i];
+ Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k)
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
@@ -1022,7 +1022,7 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
wi.fill(-1);
Index count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
- for(int j=0; j<outerSize(); ++j)
+ for(Index j=0; j<outerSize(); ++j)
{
Index start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 1dccdfe1b..fcc18f5c9 100644
--- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
- //int size = lhs.outerSize();
+ //Index size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
diff --git a/test/sparse.h b/test/sparse.h
index a09c65e5f..433d48ec4 100644
--- a/test/sparse.h
+++ b/test/sparse.h
@@ -154,16 +154,16 @@ initSparse(double density,
sparseMat.finalize();
}
-template<typename Scalar> void
+template<typename Scalar,int Options,typename Index> void
initSparse(double density,
Matrix<Scalar,Dynamic,1>& refVec,
- SparseVector<Scalar>& sparseVec,
+ SparseVector<Scalar,Options,Index>& sparseVec,
std::vector<int>* zeroCoords = 0,
std::vector<int>* nonzeroCoords = 0)
{
sparseVec.reserve(int(refVec.size()*density));
sparseVec.setZero();
- for(int i=0; i<refVec.size(); i++)
+ for(Index i=0; i<refVec.size(); i++)
{
Scalar v = (internal::random<double>(0,1) < density) ? internal::random<Scalar>() : Scalar(0);
if (v!=Scalar(0))
diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp
index 664e33887..948b0026b 100644
--- a/test/sparse_product.cpp
+++ b/test/sparse_product.cpp
@@ -244,6 +244,7 @@ void test_sparse_product()
CALL_SUBTEST_1( (sparse_product<SparseMatrix<double,RowMajor> >()) );
CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, ColMajor > >()) );
CALL_SUBTEST_2( (sparse_product<SparseMatrix<std::complex<double>, RowMajor > >()) );
+ CALL_SUBTEST_3( (sparse_product<SparseMatrix<float,ColMajor,long int> >()) );
CALL_SUBTEST_4( (sparse_product_regression_test<SparseMatrix<double,RowMajor>, Matrix<double, Dynamic, Dynamic, RowMajor> >()) );
}
}
diff --git a/test/sparse_vector.cpp b/test/sparse_vector.cpp
index ec5877b6a..0c9476803 100644
--- a/test/sparse_vector.cpp
+++ b/test/sparse_vector.cpp
@@ -9,14 +9,14 @@
#include "sparse.h"
-template<typename Scalar> void sparse_vector(int rows, int cols)
+template<typename Scalar,typename Index> void sparse_vector(int rows, int cols)
{
double densityMat = (std::max)(8./(rows*cols), 0.01);
double densityVec = (std::max)(8./float(rows), 0.1);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
- typedef SparseVector<Scalar> SparseVectorType;
- typedef SparseMatrix<Scalar> SparseMatrixType;
+ typedef SparseVector<Scalar,0,Index> SparseVectorType;
+ typedef SparseMatrix<Scalar,0,Index> SparseMatrixType;
Scalar eps = 1e-6;
SparseMatrixType m1(rows,rows);
@@ -101,9 +101,10 @@ template<typename Scalar> void sparse_vector(int rows, int cols)
void test_sparse_vector()
{
for(int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_1( sparse_vector<double>(8, 8) );
- CALL_SUBTEST_2( sparse_vector<std::complex<double> >(16, 16) );
- CALL_SUBTEST_1( sparse_vector<double>(299, 535) );
+ CALL_SUBTEST_1(( sparse_vector<double,int>(8, 8) ));
+ CALL_SUBTEST_2(( sparse_vector<std::complex<double>, int>(16, 16) ));
+ CALL_SUBTEST_1(( sparse_vector<double,long int>(299, 535) ));
+ CALL_SUBTEST_1(( sparse_vector<double,short>(299, 535) ));
}
}