aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/src/SparseCore/SparseColEtree.h28
-rw-r--r--Eigen/src/SparseLU/SparseLU.h62
-rw-r--r--Eigen/src/SparseLU/SparseLUImpl.h38
-rw-r--r--Eigen/src/SparseLU/SparseLU_Memory.h16
-rw-r--r--Eigen/src/SparseLU/SparseLU_Structs.h17
-rw-r--r--Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h20
-rw-r--r--Eigen/src/SparseLU/SparseLU_Utils.h16
-rw-r--r--Eigen/src/SparseLU/SparseLU_column_bmod.h22
-rw-r--r--Eigen/src/SparseLU/SparseLU_column_dfs.h26
-rw-r--r--Eigen/src/SparseLU/SparseLU_copy_to_ucol.h4
-rw-r--r--Eigen/src/SparseLU/SparseLU_heap_relax_snode.h16
-rw-r--r--Eigen/src/SparseLU/SparseLU_kernel_bmod.h31
-rw-r--r--Eigen/src/SparseLU/SparseLU_panel_bmod.h56
-rw-r--r--Eigen/src/SparseLU/SparseLU_panel_dfs.h42
-rw-r--r--Eigen/src/SparseLU/SparseLU_pivotL.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU_pruneL.h6
-rw-r--r--Eigen/src/SparseLU/SparseLU_relax_snode.h6
-rw-r--r--test/sparselu.cpp2
18 files changed, 207 insertions, 203 deletions
diff --git a/Eigen/src/SparseCore/SparseColEtree.h b/Eigen/src/SparseCore/SparseColEtree.h
index df6b9f966..664d09600 100644
--- a/Eigen/src/SparseCore/SparseColEtree.h
+++ b/Eigen/src/SparseCore/SparseColEtree.h
@@ -36,11 +36,11 @@ namespace Eigen {
namespace internal {
/** Find the root of the tree/set containing the vertex i : Use Path halving */
-template<typename IndexVector>
-int etree_find (int i, IndexVector& pp)
+template<typename Index, typename IndexVector>
+Index etree_find (Index i, IndexVector& pp)
{
- int p = pp(i); // Parent
- int gp = pp(p); // Grand parent
+ Index p = pp(i); // Parent
+ Index gp = pp(p); // Grand parent
while (gp != p)
{
pp(i) = gp; // Parent pointer on find path is changed to former grand parent
@@ -68,7 +68,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
pp.setZero(); // Initialize disjoint sets
parent.resize(mat.cols());
//Compute first nonzero column in each row
- int row,col;
+ Index row,col;
firstRowElt.resize(m);
firstRowElt.setConstant(nc);
firstRowElt.segment(0, nc).setLinSpaced(nc, 0, nc-1);
@@ -85,7 +85,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
except use (firstRowElt[r],c) in place of an edge (r,c) of A.
Thus each row clique in A'*A is replaced by a star
centered at its first vertex, which has the same fill. */
- int rset, cset, rroot;
+ Index rset, cset, rroot;
for (col = 0; col < nc; col++)
{
found_diag = false;
@@ -97,7 +97,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
* hence the loop is executed once more */
for (typename MatrixType::InnerIterator it(mat, col); it||!found_diag; ++it)
{ // A sequence of interleaved find and union is performed
- int i = col;
+ Index i = col;
if(it) i = it.index();
if (i == col) found_diag = true;
row = firstRowElt(i);
@@ -120,10 +120,10 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
* Depth-first search from vertex n. No recursion.
* This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
*/
-template <typename IndexVector>
-void nr_etdfs (int n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, int postnum)
+template <typename Index, typename IndexVector>
+void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum)
{
- int current = n, first, next;
+ Index current = n, first, next;
while (postnum != n)
{
// No kid for the current node
@@ -167,18 +167,18 @@ void nr_etdfs (int n, IndexVector& parent, IndexVector& first_kid, IndexVector&
* \param parent Input tree
* \param post postordered tree
*/
-template <typename IndexVector>
-void treePostorder(int n, IndexVector& parent, IndexVector& post)
+template <typename Index, typename IndexVector>
+void treePostorder(Index n, IndexVector& parent, IndexVector& post)
{
IndexVector first_kid, next_kid; // Linked list of children
- int postnum;
+ Index postnum;
// Allocate storage for working arrays and results
first_kid.resize(n+1);
next_kid.setZero(n+1);
post.setZero(n+1);
// Set up structure describing children
- int v, dad;
+ Index v, dad;
first_kid.setConstant(-1);
for (v = n-1; v >= 0; v--)
{
diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h
index b1a74581a..a7296dc0b 100644
--- a/Eigen/src/SparseLU/SparseLU.h
+++ b/Eigen/src/SparseLU/SparseLU.h
@@ -42,7 +42,7 @@ template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
* \code
* VectorXd x(n), b(n);
* SparseMatrix<double, ColMajor> A;
- * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<int> > solver;
+ * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> > solver;
* // fill A and b;
* // Compute the ordering permutation vector from the structural pattern of A
* solver.analyzePattern(A);
@@ -194,13 +194,13 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
- int nrhs = B.cols();
+ Index nrhs = B.cols();
Index n = B.rows();
// Permute the right hand side to form X = Pr*B
// on return, X is overwritten by the computed solution
X.resize(n,nrhs);
- for(int j = 0; j < nrhs; ++j)
+ for(Index j = 0; j < nrhs; ++j)
X.col(j) = m_perm_r * B.col(j);
//Forward substitution with L
@@ -208,7 +208,7 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
this->matrixL().solveInPlace(X);
// Backward solve with U
- for (int k = m_Lstore.nsuper(); k >= 0; k--)
+ for (Index k = m_Lstore.nsuper(); k >= 0; k--)
{
Index fsupc = m_Lstore.supToCol()[k];
Index lda = m_Lstore.colIndexPtr()[fsupc+1] - m_Lstore.colIndexPtr()[fsupc]; // leading dimension
@@ -217,7 +217,7 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
if (nsupc == 1)
{
- for (int j = 0; j < nrhs; j++)
+ for (Index j = 0; j < nrhs; j++)
{
X(fsupc, j) /= m_Lstore.valuePtr()[luptr];
}
@@ -229,11 +229,11 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
U = A.template triangularView<Upper>().solve(U);
}
- for (int j = 0; j < nrhs; ++j)
+ for (Index j = 0; j < nrhs; ++j)
{
- for (int jcol = fsupc; jcol < fsupc + nsupc; jcol++)
+ for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
{
- typename MappedSparseMatrix<Scalar>::InnerIterator it(m_Ustore, jcol);
+ typename MappedSparseMatrix<Scalar,ColMajor, Index>::InnerIterator it(m_Ustore, jcol);
for ( ; it; ++it)
{
Index irow = it.index();
@@ -244,7 +244,7 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
} // End For U-solve
// Permute back the solution
- for (int j = 0; j < nrhs; ++j)
+ for (Index j = 0; j < nrhs; ++j)
X.col(j) = m_perm_c.inverse() * X.col(j);
return true;
@@ -270,7 +270,7 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
std::string m_lastError;
NCMatrix m_mat; // The input (permuted ) matrix
SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
- MappedSparseMatrix<Scalar> m_Ustore; // The upper triangular matrix
+ MappedSparseMatrix<Scalar,ColMajor,Index> m_Ustore; // The upper triangular matrix
PermutationType m_perm_c; // Column permutation
PermutationType m_perm_r ; // Row permutation
IndexVector m_etree; // Column elimination tree
@@ -280,9 +280,9 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
// SparseLU options
bool m_symmetricmode;
// values for performance
- internal::perfvalues m_perfv;
+ internal::perfvalues<Index> m_perfv;
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
- int m_nnzL, m_nnzU; // Nonzeros in L and U factors
+ Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
private:
// Copy constructor
@@ -317,7 +317,7 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
if (m_perm_c.size()) {
m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
//Then, permute only the column pointers
- for (int i = 0; i < mat.cols(); i++)
+ for (Index i = 0; i < mat.cols(); i++)
{
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i];
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
@@ -335,14 +335,14 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
// Renumber etree in postorder
- int m = m_mat.cols();
+ Index m = m_mat.cols();
iwork.resize(m+1);
- for (int i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
+ for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
m_etree = iwork;
// Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
PermutationType post_perm(m);
- for (int i = 0; i < m; i++)
+ for (Index i = 0; i < m; i++)
post_perm.indices()(i) = post(i);
// Combine the two permutations : postorder the permutation for future use
@@ -393,7 +393,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
{
m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
//Then, permute only the column pointers
- for (int i = 0; i < matrix.cols(); i++)
+ for (Index i = 0; i < matrix.cols(); i++)
{
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i];
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i+1] - matrix.outerIndexPtr()[i];
@@ -402,16 +402,16 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
else
{ //FIXME This should not be needed if the empty permutation is handled transparently
m_perm_c.resize(matrix.cols());
- for(int i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
+ for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
}
- int m = m_mat.rows();
- int n = m_mat.cols();
- int nnz = m_mat.nonZeros();
- int maxpanel = m_perfv.panel_size * m;
+ Index m = m_mat.rows();
+ Index n = m_mat.cols();
+ Index nnz = m_mat.nonZeros();
+ Index maxpanel = m_perfv.panel_size * m;
// Allocate working storage common to the factor routines
- int lwork = 0;
- int info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
+ Index lwork = 0;
+ Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
if (info)
{
m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
@@ -458,17 +458,17 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
// Work on one 'panel' at a time. A panel is one of the following :
// (a) a relaxed supernode at the bottom of the etree, or
// (b) panel_size contiguous columns, <panel_size> defined by the user
- int jcol;
+ Index jcol;
IndexVector panel_histo(n);
Index pivrow; // Pivotal row number in the original row matrix
- int nseg1; // Number of segments in U-column above panel row jcol
- int nseg; // Number of segments in each U-column
- int irep;
- int i, k, jj;
+ Index nseg1; // Number of segments in U-column above panel row jcol
+ Index nseg; // Number of segments in each U-column
+ Index irep;
+ Index i, k, jj;
for (jcol = 0; jcol < n; )
{
// Adjust panel size so that a panel won't overlap with the next relaxed snode.
- int panel_size = m_perfv.panel_size; // upper bound on panel width
+ Index panel_size = m_perfv.panel_size; // upper bound on panel width
for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
{
if (relax_end(k) != emptyIdxLU)
@@ -559,7 +559,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
// Create supernode matrix L
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
// Create the column major upper sparse matrix U;
- new (&m_Ustore) MappedSparseMatrix<Scalar> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
+ new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, Index> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
m_info = Success;
m_factorizationIsOk = true;
diff --git a/Eigen/src/SparseLU/SparseLUImpl.h b/Eigen/src/SparseLU/SparseLUImpl.h
index 96b3adb2b..229a77843 100644
--- a/Eigen/src/SparseLU/SparseLUImpl.h
+++ b/Eigen/src/SparseLU/SparseLUImpl.h
@@ -30,29 +30,29 @@ class SparseLUImpl
protected:
template <typename VectorType>
- int expand(VectorType& vec, int& length, int nbElts, int keep_prev, int& num_expansions);
- int memInit(int m, int n, int annz, int lwork, int fillratio, int panel_size, GlobalLU_t& glu);
+ Index expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions);
+ Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu);
template <typename VectorType>
- int memXpand(VectorType& vec, int& maxlen, int nbElts, MemType memtype, int& num_expansions);
- void heap_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end);
- void relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end);
- int snode_dfs(const int jcol, const int kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu);
- int snode_bmod (const int jcol, const int fsupc, ScalarVector& dense, GlobalLU_t& glu);
- int pivotL(const int jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, GlobalLU_t& glu);
+ Index memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions);
+ void heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end);
+ void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end);
+ Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu);
+ Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);
+ Index pivotL(const Index jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
template <typename Traits>
- void dfs_kernel(const int jj, IndexVector& perm_r,
- int& nseg, IndexVector& panel_lsub, IndexVector& segrep,
+ void dfs_kernel(const Index jj, IndexVector& perm_r,
+ Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
- IndexVector& xplore, GlobalLU_t& glu, int& nextl_col, int krow, Traits& traits);
- void panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+ IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits);
+ void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
- void panel_bmod(const int m, const int w, const int jcol, const int nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);
- int column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, int& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
- int column_bmod(const int jcol, const int nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, int fpanelc, GlobalLU_t& glu);
- int copy_to_ucol(const int jcol, const int nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu);
- void pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);
- void countnz(const int n, int& nnzL, int& nnzU, GlobalLU_t& glu);
- void fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& glu);
+ void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);
+ Index column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+ Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu);
+ Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu);
+ void pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);
+ void countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu);
+ void fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu);
template<typename , typename >
friend struct column_dfs_traits;
diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h
index f82826cbe..6d9570d19 100644
--- a/Eigen/src/SparseLU/SparseLU_Memory.h
+++ b/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -61,11 +61,11 @@ inline Index LUTempSpace(Index&m, Index& w)
*/
template <typename Scalar, typename Index>
template <typename VectorType>
-int SparseLUImpl<Scalar,Index>::expand(VectorType& vec, int& length, int nbElts, int keep_prev, int& num_expansions)
+Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions)
{
float alpha = 1.5; // Ratio of the memory increase
- int new_len; // New size of the allocated memory
+ Index new_len; // New size of the allocated memory
if(num_expansions == 0 || keep_prev)
new_len = length ; // First time allocate requested
@@ -96,7 +96,7 @@ int SparseLUImpl<Scalar,Index>::expand(VectorType& vec, int& length, int nbElts
else
{
// Reduce the size and increase again
- int tries = 0; // Number of attempts
+ Index tries = 0; // Number of attempts
do
{
alpha = (alpha + 1)/2;
@@ -136,9 +136,9 @@ int SparseLUImpl<Scalar,Index>::expand(VectorType& vec, int& length, int nbElts
* \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation
*/
template <typename Scalar, typename Index>
-int SparseLUImpl<Scalar,Index>::memInit(int m, int n, int annz, int lwork, int fillratio, int panel_size, GlobalLU_t& glu)
+Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu)
{
- int& num_expansions = glu.num_expansions; //No memory expansions so far
+ Index& num_expansions = glu.num_expansions; //No memory expansions so far
num_expansions = 0;
glu.nzumax = glu.nzlumax = (std::max)(fillratio * annz, m*n); // estimated number of nonzeros in U
glu.nzlmax = (std::max)(1., fillratio/4.) * annz; // estimated nnz in L factor
@@ -148,7 +148,7 @@ int SparseLUImpl<Scalar,Index>::memInit(int m, int n, int annz, int lwork, int f
tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
if (lwork == emptyIdxLU)
{
- int estimated_size;
+ Index estimated_size;
estimated_size = (5 * n + 5) * sizeof(Index) + tempSpace
+ (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) * sizeof(Scalar) + n;
return estimated_size;
@@ -202,9 +202,9 @@ int SparseLUImpl<Scalar,Index>::memInit(int m, int n, int annz, int lwork, int f
*/
template <typename Scalar, typename Index>
template <typename VectorType>
-int SparseLUImpl<Scalar,Index>::memXpand(VectorType& vec, int& maxlen, int nbElts, MemType memtype, int& num_expansions)
+Index SparseLUImpl<Scalar,Index>::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions)
{
- int failed_size;
+ Index failed_size;
if (memtype == USUB)
failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);
else
diff --git a/Eigen/src/SparseLU/SparseLU_Structs.h b/Eigen/src/SparseLU/SparseLU_Structs.h
index eb6930522..24d6bf179 100644
--- a/Eigen/src/SparseLU/SparseLU_Structs.h
+++ b/Eigen/src/SparseLU/SparseLU_Structs.h
@@ -89,19 +89,20 @@ struct LU_GlobalLU_t {
IndexVector xusub; // Pointers to the beginning of each column of U in ucol
Index nzumax; // Current max size of ucol
Index n; // Number of columns in the matrix
- int num_expansions;
+ Index num_expansions;
};
-// Values to set for performance
+// Values to set for performance
+template <typename Index>
struct perfvalues {
- int panel_size; // a panel consists of at most <panel_size> consecutive columns
- int relax; // To control degree of relaxing supernodes. If the number of nodes (columns)
+ Index panel_size; // a panel consists of at most <panel_size> consecutive columns
+ Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns)
// in a subtree of the elimination tree is less than relax, this subtree is considered
// as one supernode regardless of the row structures of those columns
- int maxsuper; // The maximum size for a supernode in complete LU
- int rowblk; // The minimum row dimension for 2-D blocking to be used;
- int colblk; // The minimum column dimension for 2-D blocking to be used;
- int fillfactor; // The estimated fills factors for L and U, compared with A
+ Index maxsuper; // The maximum size for a supernode in complete LU
+ Index rowblk; // The minimum row dimension for 2-D blocking to be used;
+ Index colblk; // The minimum column dimension for 2-D blocking to be used;
+ Index fillfactor; // The estimated fills factors for L and U, compared with A
};
} // end namespace internal
diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
index ac37e56c5..cb9f0587b 100644
--- a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
+++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
@@ -42,7 +42,7 @@ class MappedSuperNodalMatrix
{
}
- MappedSuperNodalMatrix(int m, int n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
+ MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
{
setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
@@ -58,7 +58,7 @@ class MappedSuperNodalMatrix
* FIXME This class will be modified such that it can be use in the course
* of the factorization.
*/
- void setInfos(int m, int n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
+ void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
{
m_row = m;
@@ -75,12 +75,12 @@ class MappedSuperNodalMatrix
/**
* Number of rows
*/
- int rows() { return m_row; }
+ Index rows() { return m_row; }
/**
* Number of columns
*/
- int cols() { return m_col; }
+ Index cols() { return m_col; }
/**
* Return the array of nonzero values packed by column
@@ -148,7 +148,7 @@ class MappedSuperNodalMatrix
/**
* Return the number of supernodes
*/
- int nsuper() const
+ Index nsuper() const
{
return m_nsuper;
}
@@ -233,11 +233,11 @@ template<typename Dest>
void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) const
{
Index n = X.rows();
- int nrhs = X.cols();
+ Index nrhs = X.cols();
const Scalar * Lval = valuePtr(); // Nonzero values
Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
work.setZero();
- for (int k = 0; k <= nsuper(); k ++)
+ for (Index k = 0; k <= nsuper(); k ++)
{
Index fsupc = supToCol()[k]; // First column of the current supernode
Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column
@@ -248,7 +248,7 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
if (nsupc == 1 )
{
- for (int j = 0; j < nrhs; j++)
+ for (Index j = 0; j < nrhs; j++)
{
InnerIterator it(*this, fsupc);
++it; // Skip the diagonal element
@@ -275,10 +275,10 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
work.block(0, 0, nrow, nrhs) = A * U;
//Begin Scatter
- for (int j = 0; j < nrhs; j++)
+ for (Index j = 0; j < nrhs; j++)
{
Index iptr = istart + nsupc;
- for (int i = 0; i < nrow; i++)
+ for (Index i = 0; i < nrow; i++)
{
irow = rowIndex()[iptr];
X(irow, j) -= work(i, j); // Scatter operation
diff --git a/Eigen/src/SparseLU/SparseLU_Utils.h b/Eigen/src/SparseLU/SparseLU_Utils.h
index 33cc9c7ac..15352ac33 100644
--- a/Eigen/src/SparseLU/SparseLU_Utils.h
+++ b/Eigen/src/SparseLU/SparseLU_Utils.h
@@ -18,13 +18,13 @@ namespace internal {
* \brief Count Nonzero elements in the factors
*/
template <typename Scalar, typename Index>
-void SparseLUImpl<Scalar,Index>::countnz(const int n, int& nnzL, int& nnzU, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu)
{
nnzL = 0;
nnzU = (glu.xusub)(n);
- int nsuper = (glu.supno)(n);
- int jlen;
- int i, j, fsupc;
+ Index nsuper = (glu.supno)(n);
+ Index jlen;
+ Index i, j, fsupc;
if (n <= 0 ) return;
// For each supernode
for (i = 0; i <= nsuper; i++)
@@ -49,12 +49,12 @@ void SparseLUImpl<Scalar,Index>::countnz(const int n, int& nnzL, int& nnzU, Glob
*
*/
template <typename Scalar, typename Index>
-void SparseLUImpl<Scalar,Index>::fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu)
{
- int fsupc, i, j, k, jstart;
+ Index fsupc, i, j, k, jstart;
- int nextl = 0;
- int nsuper = (glu.supno)(n);
+ Index nextl = 0;
+ Index nsuper = (glu.supno)(n);
// For each supernode
for (i = 0; i <= nsuper; i++)
diff --git a/Eigen/src/SparseLU/SparseLU_column_bmod.h b/Eigen/src/SparseLU/SparseLU_column_bmod.h
index 44ec61ac4..f24bd87d3 100644
--- a/Eigen/src/SparseLU/SparseLU_column_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_column_bmod.h
@@ -50,11 +50,11 @@ namespace internal {
*
*/
template <typename Scalar, typename Index>
-int SparseLUImpl<Scalar,Index>::column_bmod(const int jcol, const int nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, int fpanelc, GlobalLU_t& glu)
+Index SparseLUImpl<Scalar,Index>::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu)
{
- int jsupno, k, ksub, krep, ksupno;
- int lptr, nrow, isub, irow, nextlu, new_next, ufirst;
- int fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
+ Index jsupno, k, ksub, krep, ksupno;
+ Index lptr, nrow, isub, irow, nextlu, new_next, ufirst;
+ Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
/* krep = representative of current k-th supernode
* fsupc = first supernodal column
* nsupc = number of columns in a supernode
@@ -67,10 +67,10 @@ int SparseLUImpl<Scalar,Index>::column_bmod(const int jcol, const int nseg, Bloc
jsupno = glu.supno(jcol);
// For each nonzero supernode segment of U[*,j] in topological order
k = nseg - 1;
- int d_fsupc; // distance between the first column of the current panel and the
+ Index d_fsupc; // distance between the first column of the current panel and the
// first column of the current snode
- int fst_col; // First column within small LU update
- int segsize;
+ Index fst_col; // First column within small LU update
+ Index segsize;
for (ksub = 0; ksub < nseg; ksub++)
{
krep = segrep(k); k--;
@@ -95,7 +95,7 @@ int SparseLUImpl<Scalar,Index>::column_bmod(const int jcol, const int nseg, Bloc
nsupc = krep - fst_col + 1;
nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
nrow = nsupr - d_fsupc - nsupc;
- int lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);
+ Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);
// Perform a triangular solver and block update,
@@ -113,9 +113,9 @@ int SparseLUImpl<Scalar,Index>::column_bmod(const int jcol, const int nseg, Bloc
fsupc = glu.xsup(jsupno);
// copy the SPA dense into L\U[*,j]
- int mem;
+ Index mem;
new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc);
- int offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;
+ Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;
if(offset)
new_next += offset;
while (new_next > glu.nzlumax )
@@ -161,7 +161,7 @@ int SparseLUImpl<Scalar,Index>::column_bmod(const int jcol, const int nseg, Bloc
// points to the beginning of jcol in snode L\U(jsupno)
ufirst = glu.xlusup(jcol) + d_fsupc;
- int lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
+ Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
u = A.template triangularView<UnitLower>().solve(u);
diff --git a/Eigen/src/SparseLU/SparseLU_column_dfs.h b/Eigen/src/SparseLU/SparseLU_column_dfs.h
index 723598265..bd450ddc7 100644
--- a/Eigen/src/SparseLU/SparseLU_column_dfs.h
+++ b/Eigen/src/SparseLU/SparseLU_column_dfs.h
@@ -47,7 +47,7 @@ struct column_dfs_traits
{
return true;
}
- void mem_expand(IndexVector& lsub, int& nextl, int chmark)
+ void mem_expand(IndexVector& lsub, Index& nextl, Index chmark)
{
if (nextl >= m_glu.nzlmax)
m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions);
@@ -55,8 +55,8 @@ struct column_dfs_traits
}
enum { ExpandMem = true };
- int m_jcol;
- int& m_jsuper_ref;
+ Index m_jcol;
+ Index& m_jsuper_ref;
typename SparseLUImpl<Scalar, Index>::GlobalLU_t& m_glu;
SparseLUImpl<Scalar, Index>& m_luImpl;
};
@@ -90,22 +90,22 @@ struct column_dfs_traits
*
*/
template <typename Scalar, typename Index>
-int SparseLUImpl<Scalar,Index>::column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, int& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+Index SparseLUImpl<Scalar,Index>::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
{
- int jsuper = glu.supno(jcol);
- int nextl = glu.xlsub(jcol);
+ Index jsuper = glu.supno(jcol);
+ Index nextl = glu.xlsub(jcol);
VectorBlock<IndexVector> marker2(marker, 2*m, m);
column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu, *this);
// For each nonzero in A(*,jcol) do dfs
- for (int k = 0; lsub_col[k] != emptyIdxLU; k++)
+ for (Index k = 0; lsub_col[k] != emptyIdxLU; k++)
{
- int krow = lsub_col(k);
+ Index krow = lsub_col(k);
lsub_col(k) = emptyIdxLU;
- int kmark = marker2(krow);
+ Index kmark = marker2(krow);
// krow was visited before, go to the next nonz;
if (kmark == jcol) continue;
@@ -114,10 +114,10 @@ int SparseLUImpl<Scalar,Index>::column_dfs(const int m, const int jcol, IndexVec
xplore, glu, nextl, krow, traits);
} // for each nonzero ...
- int fsupc, jptr, jm1ptr, ito, ifrom, istop;
- int nsuper = glu.supno(jcol);
- int jcolp1 = jcol + 1;
- int jcolm1 = jcol - 1;
+ Index fsupc, jptr, jm1ptr, ito, ifrom, istop;
+ Index nsuper = glu.supno(jcol);
+ Index jcolp1 = jcol + 1;
+ Index jcolm1 = jcol - 1;
// check to see if j belongs in the same supernode as j-1
if ( jcol == 0 )
diff --git a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
index dc6c9d6a2..170610d9f 100644
--- a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
+++ b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
@@ -47,14 +47,14 @@ namespace internal {
*
*/
template <typename Scalar, typename Index>
-int SparseLUImpl<Scalar,Index>::copy_to_ucol(const int jcol, const int nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)
+Index SparseLUImpl<Scalar,Index>::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)
{
Index ksub, krep, ksupno;
Index jsupno = glu.supno(jcol);
// For each nonzero supernode segment of U[*,j] in topological order
- int k = nseg - 1, i;
+ Index k = nseg - 1, i;
Index nextu = glu.xusub(jcol);
Index kfnz, isub, segsize;
Index new_next,irow;
diff --git a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
index ef1272ea9..7a4e4305a 100644
--- a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
+++ b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
@@ -43,14 +43,14 @@ namespace internal {
* \param relax_end last column in a supernode
*/
template <typename Scalar, typename Index>
-void SparseLUImpl<Scalar,Index>::heap_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end)
+void SparseLUImpl<Scalar,Index>::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
{
// The etree may not be postordered, but its heap ordered
IndexVector post;
internal::treePostorder(n, et, post); // Post order etree
IndexVector inv_post(n+1);
- int i;
+ Index i;
for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
// Renumber etree in postorder
@@ -65,7 +65,7 @@ void SparseLUImpl<Scalar,Index>::heap_relax_snode (const int n, IndexVector& et,
// compute the number of descendants of each node in the etree
relax_end.setConstant(emptyIdxLU);
- int j, parent;
+ Index j, parent;
descendants.setZero();
for (j = 0; j < n; j++)
{
@@ -74,11 +74,11 @@ void SparseLUImpl<Scalar,Index>::heap_relax_snode (const int n, IndexVector& et,
descendants(parent) += descendants(j) + 1;
}
// Identify the relaxed supernodes by postorder traversal of the etree
- int snode_start; // beginning of a snode
- int k;
- int nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
- int nsuper_et = 0; // Number of relaxed snodes in the original etree
- int l;
+ Index snode_start; // beginning of a snode
+ Index k;
+ Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
+ Index nsuper_et = 0; // Number of relaxed snodes in the original etree
+ Index l;
for (j = 0; j < n; )
{
parent = et(j);
diff --git a/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
index 3358853fb..91f37e5af 100644
--- a/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
@@ -30,15 +30,16 @@ namespace internal {
*/
template <int SegSizeAtCompileTime> struct LU_kernel_bmod
{
- template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>
- EIGEN_DONT_INLINE static void run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, int& luptr, const int lda, const int nrow, IndexVector& lsub, const int lptr, const int no_zeros)
+ template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+ EIGEN_DONT_INLINE static void run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)
{
typedef typename ScalarVector::Scalar Scalar;
// First, copy U[*,j] segment from dense(*) to tempv(*)
// The result of triangular solve is in tempv[*];
// The result of matric-vector update is in dense[*]
- int isub = lptr + no_zeros;
- int i, irow;
+ Index isub = lptr + no_zeros;
+ int i;
+ Index irow;
for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
{
irow = lsub(isub);
@@ -55,11 +56,11 @@ template <int SegSizeAtCompileTime> struct LU_kernel_bmod
// Dense matrix-vector product y <-- B*x
luptr += segsize;
- const int PacketSize = internal::packet_traits<Scalar>::size;
- int ldl = internal::first_multiple(nrow, PacketSize);
+ const Index PacketSize = internal::packet_traits<Scalar>::size;
+ Index ldl = internal::first_multiple(nrow, PacketSize);
Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
- int aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
- int aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
+ Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
+ Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
l.setZero();
@@ -84,20 +85,20 @@ template <int SegSizeAtCompileTime> struct LU_kernel_bmod
template <> struct LU_kernel_bmod<1>
{
- template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>
- EIGEN_DONT_INLINE static void run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, int& luptr, const int lda, const int nrow,
- IndexVector& lsub, const int lptr, const int no_zeros)
+ template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+ EIGEN_DONT_INLINE static void run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow,
+ IndexVector& lsub, const Index lptr, const Index no_zeros)
{
typedef typename ScalarVector::Scalar Scalar;
Scalar f = dense(lsub(lptr + no_zeros));
luptr += lda * no_zeros + no_zeros + 1;
const Scalar* a(lusup.data() + luptr);
- const typename IndexVector::Scalar* irow(lsub.data()+lptr + no_zeros + 1);
- int i = 0;
+ const /*typename IndexVector::Scalar*/Index* irow(lsub.data()+lptr + no_zeros + 1);
+ Index i = 0;
for (; i+1 < nrow; i+=2)
{
- int i0 = *(irow++);
- int i1 = *(irow++);
+ Index i0 = *(irow++);
+ Index i1 = *(irow++);
Scalar a0 = *(a++);
Scalar a1 = *(a++);
Scalar d0 = dense.coeff(i0);
diff --git a/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
index d00709d6e..da0e0fc3c 100644
--- a/Eigen/src/SparseLU/SparseLU_panel_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -53,19 +53,19 @@ namespace internal {
*
*/
template <typename Scalar, typename Index>
-void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int jcol,
- const int nseg, ScalarVector& dense, ScalarVector& tempv,
+void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const Index jcol,
+ const Index nseg, ScalarVector& dense, ScalarVector& tempv,
IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu)
{
- int ksub,jj,nextl_col;
- int fsupc, nsupc, nsupr, nrow;
- int krep, kfnz;
- int lptr; // points to the row subscripts of a supernode
- int luptr; // ...
- int segsize,no_zeros ;
+ Index ksub,jj,nextl_col;
+ Index fsupc, nsupc, nsupr, nrow;
+ Index krep, kfnz;
+ Index lptr; // points to the row subscripts of a supernode
+ Index luptr; // ...
+ Index segsize,no_zeros ;
// For each nonz supernode segment of U[*,j] in topological order
- int k = nseg - 1;
+ Index k = nseg - 1;
const Index PacketSize = internal::packet_traits<Scalar>::size;
for (ksub = 0; ksub < nseg; ksub++)
@@ -83,8 +83,8 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int
lptr = glu.xlsub(fsupc);
// loop over the panel columns to detect the actual number of columns and rows
- int u_rows = 0;
- int u_cols = 0;
+ Index u_rows = 0;
+ Index u_cols = 0;
for (jj = jcol; jj < jcol + w; jj++)
{
nextl_col = (jj-jcol) * m;
@@ -101,11 +101,11 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int
if(nsupc >= 2)
{
- int ldu = internal::first_multiple<Index>(u_rows, PacketSize);
+ Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
Map<Matrix<Scalar,Dynamic,Dynamic>, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
// gather U
- int u_col = 0;
+ Index u_col = 0;
for (jj = jcol; jj < jcol + w; jj++)
{
nextl_col = (jj-jcol) * m;
@@ -120,12 +120,12 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int
luptr = glu.xlusup(fsupc);
no_zeros = kfnz - fsupc;
- int isub = lptr + no_zeros;
- int off = u_rows-segsize;
- for (int i = 0; i < off; i++) U(i,u_col) = 0;
- for (int i = 0; i < segsize; i++)
+ Index isub = lptr + no_zeros;
+ Index off = u_rows-segsize;
+ for (Index i = 0; i < off; i++) U(i,u_col) = 0;
+ for (Index i = 0; i < segsize; i++)
{
- int irow = glu.lsub(isub);
+ Index irow = glu.lsub(isub);
U(i+off,u_col) = dense_col(irow);
++isub;
}
@@ -133,7 +133,7 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int
}
// solve U = A^-1 U
luptr = glu.xlusup(fsupc);
- int lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
+ Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
no_zeros = (krep - u_rows + 1) - fsupc;
luptr += lda * no_zeros + no_zeros;
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
@@ -144,8 +144,8 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
- int ldl = internal::first_multiple<Index>(nrow, PacketSize);
- int offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
+ Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
+ Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
L.setZero();
@@ -165,20 +165,20 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int
segsize = krep - kfnz + 1;
no_zeros = kfnz - fsupc;
- int isub = lptr + no_zeros;
+ Index isub = lptr + no_zeros;
- int off = u_rows-segsize;
- for (int i = 0; i < segsize; i++)
+ Index off = u_rows-segsize;
+ for (Index i = 0; i < segsize; i++)
{
- int irow = glu.lsub(isub++);
+ Index irow = glu.lsub(isub++);
dense_col(irow) = U.coeff(i+off,u_col);
U.coeffRef(i+off,u_col) = 0;
}
// Scatter l into SPA dense[]
- for (int i = 0; i < nrow; i++)
+ for (Index i = 0; i < nrow; i++)
{
- int irow = glu.lsub(isub++);
+ Index irow = glu.lsub(isub++);
dense_col(irow) -= L.coeff(i,u_col);
L.coeffRef(i,u_col) = 0;
}
@@ -201,7 +201,7 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int
segsize = krep - kfnz + 1;
luptr = glu.xlusup(fsupc);
- int lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr
+ Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr
// Perform a trianglar solve and block update,
// then scatter the result of sup-col update to dense[]
diff --git a/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/Eigen/src/SparseLU/SparseLU_panel_dfs.h
index b3d9775fa..dc0054efd 100644
--- a/Eigen/src/SparseLU/SparseLU_panel_dfs.h
+++ b/Eigen/src/SparseLU/SparseLU_panel_dfs.h
@@ -50,7 +50,7 @@ struct panel_dfs_traits
}
return false;
}
- void mem_expand(IndexVector& /*glu.lsub*/, int /*nextl*/, int /*chmark*/) {}
+ void mem_expand(IndexVector& /*glu.lsub*/, Index /*nextl*/, Index /*chmark*/) {}
enum { ExpandMem = false };
Index m_jcol;
Index* m_marker;
@@ -59,19 +59,19 @@ struct panel_dfs_traits
template <typename Scalar, typename Index>
template <typename Traits>
-void SparseLUImpl<Scalar,Index>::dfs_kernel(const int jj, IndexVector& perm_r,
- int& nseg, IndexVector& panel_lsub, IndexVector& segrep,
+void SparseLUImpl<Scalar,Index>::dfs_kernel(const Index jj, IndexVector& perm_r,
+ Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
IndexVector& xplore, GlobalLU_t& glu,
- int& nextl_col, int krow, Traits& traits
+ Index& nextl_col, Index krow, Traits& traits
)
{
- int kmark = marker(krow);
+ Index kmark = marker(krow);
// For each unmarked krow of jj
marker(krow) = jj;
- int kperm = perm_r(krow);
+ Index kperm = perm_r(krow);
if (kperm == emptyIdxLU ) {
// krow is in L : place it in structure of L(*, jj)
panel_lsub(nextl_col++) = krow; // krow is indexed into A
@@ -83,9 +83,9 @@ void SparseLUImpl<Scalar,Index>::dfs_kernel(const int jj, IndexVector& perm_r,
// krow is in U : if its supernode-representative krep
// has been explored, update repfnz(*)
// krep = supernode representative of the current row
- int krep = glu.xsup(glu.supno(kperm)+1) - 1;
+ Index krep = glu.xsup(glu.supno(kperm)+1) - 1;
// First nonzero element in the current column:
- int myfnz = repfnz_col(krep);
+ Index myfnz = repfnz_col(krep);
if (myfnz != emptyIdxLU )
{
@@ -96,26 +96,26 @@ void SparseLUImpl<Scalar,Index>::dfs_kernel(const int jj, IndexVector& perm_r,
else
{
// Otherwise, perform dfs starting at krep
- int oldrep = emptyIdxLU;
+ Index oldrep = emptyIdxLU;
parent(krep) = oldrep;
repfnz_col(krep) = kperm;
- int xdfs = glu.xlsub(krep);
- int maxdfs = xprune(krep);
+ Index xdfs = glu.xlsub(krep);
+ Index maxdfs = xprune(krep);
- int kpar;
+ Index kpar;
do
{
// For each unmarked kchild of krep
while (xdfs < maxdfs)
{
- int kchild = glu.lsub(xdfs);
+ Index kchild = glu.lsub(xdfs);
xdfs++;
- int chmark = marker(kchild);
+ Index chmark = marker(kchild);
if (chmark != jj )
{
marker(kchild) = jj;
- int chperm = perm_r(kchild);
+ Index chperm = perm_r(kchild);
if (chperm == emptyIdxLU)
{
@@ -128,7 +128,7 @@ void SparseLUImpl<Scalar,Index>::dfs_kernel(const int jj, IndexVector& perm_r,
// case kchild is in U :
// chrep = its supernode-rep. If its rep has been explored,
// update its repfnz(*)
- int chrep = glu.xsup(glu.supno(chperm)+1) - 1;
+ Index chrep = glu.xsup(glu.supno(chperm)+1) - 1;
myfnz = repfnz_col(chrep);
if (myfnz != emptyIdxLU)
@@ -216,9 +216,9 @@ void SparseLUImpl<Scalar,Index>::dfs_kernel(const int jj, IndexVector& perm_r,
*/
template <typename Scalar, typename Index>
-void SparseLUImpl<Scalar,Index>::panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
{
- int nextl_col; // Next available position in panel_lsub[*,jj]
+ Index nextl_col; // Next available position in panel_lsub[*,jj]
// Initialize pointers
VectorBlock<IndexVector> marker1(marker, m, m);
@@ -227,7 +227,7 @@ void SparseLUImpl<Scalar,Index>::panel_dfs(const int m, const int w, const int j
panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
// For each column in the panel
- for (int jj = jcol; jj < jcol + w; jj++)
+ for (Index jj = jcol; jj < jcol + w; jj++)
{
nextl_col = (jj - jcol) * m;
@@ -238,10 +238,10 @@ void SparseLUImpl<Scalar,Index>::panel_dfs(const int m, const int w, const int j
// For each nnz in A[*, jj] do depth first search
for (typename MatrixType::InnerIterator it(A, jj); it; ++it)
{
- int krow = it.row();
+ Index krow = it.row();
dense_col(krow) = it.value();
- int kmark = marker(krow);
+ Index kmark = marker(krow);
if (kmark == jj)
continue; // krow visited before, go to the next nonzero
diff --git a/Eigen/src/SparseLU/SparseLU_pivotL.h b/Eigen/src/SparseLU/SparseLU_pivotL.h
index c6d7db5ac..cd6dfd0d4 100644
--- a/Eigen/src/SparseLU/SparseLU_pivotL.h
+++ b/Eigen/src/SparseLU/SparseLU_pivotL.h
@@ -57,7 +57,7 @@ namespace internal {
*
*/
template <typename Scalar, typename Index>
-int SparseLUImpl<Scalar,Index>::pivotL(const int jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, GlobalLU_t& glu)
+Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu)
{
Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol
diff --git a/Eigen/src/SparseLU/SparseLU_pruneL.h b/Eigen/src/SparseLU/SparseLU_pruneL.h
index 03da95dbb..5a855f82f 100644
--- a/Eigen/src/SparseLU/SparseLU_pruneL.h
+++ b/Eigen/src/SparseLU/SparseLU_pruneL.h
@@ -50,11 +50,11 @@ namespace internal {
*
*/
template <typename Scalar, typename Index>
-void SparseLUImpl<Scalar,Index>::pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)
{
// For each supernode-rep irep in U(*,j]
- int jsupno = glu.supno(jcol);
- int i,irep,irep1;
+ Index jsupno = glu.supno(jcol);
+ Index i,irep,irep1;
bool movnum, do_prune = false;
Index kmin, kmax, minloc, maxloc,krow;
for (i = 0; i < nseg; i++)
diff --git a/Eigen/src/SparseLU/SparseLU_relax_snode.h b/Eigen/src/SparseLU/SparseLU_relax_snode.h
index f73afdf3d..58ec32e27 100644
--- a/Eigen/src/SparseLU/SparseLU_relax_snode.h
+++ b/Eigen/src/SparseLU/SparseLU_relax_snode.h
@@ -44,11 +44,11 @@ namespace internal {
* \param relax_end last column in a supernode
*/
template <typename Scalar, typename Index>
-void SparseLUImpl<Scalar,Index>::relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end)
+void SparseLUImpl<Scalar,Index>::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
{
// compute the number of descendants of each node in the etree
- int j, parent;
+ Index j, parent;
relax_end.setConstant(emptyIdxLU);
descendants.setZero();
for (j = 0; j < n; j++)
@@ -58,7 +58,7 @@ void SparseLUImpl<Scalar,Index>::relax_snode (const int n, IndexVector& et, cons
descendants(parent) += descendants(j) + 1;
}
// Identify the relaxed supernodes by postorder traversal of the etree
- int snode_start; // beginning of a snode
+ Index snode_start; // beginning of a snode
for (j = 0; j < n; )
{
parent = et(j);
diff --git a/test/sparselu.cpp b/test/sparselu.cpp
index 2a73320eb..5b55d51e2 100644
--- a/test/sparselu.cpp
+++ b/test/sparselu.cpp
@@ -29,9 +29,11 @@ template<typename T> void test_sparselu_T()
{
SparseLU<SparseMatrix<T, ColMajor>, COLAMDOrdering<int> > sparselu_colamd;
SparseLU<SparseMatrix<T, ColMajor>, AMDOrdering<int> > sparselu_amd;
+ SparseLU<SparseMatrix<T, ColMajor, long int>, NaturalOrdering<long int> > sparselu_natural;
check_sparse_square_solving(sparselu_colamd);
check_sparse_square_solving(sparselu_amd);
+ check_sparse_square_solving(sparselu_natural);
}
void test_sparselu()