aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen
diff options
context:
space:
mode:
authorGravatar Desire NUENTSA <desire.nuentsa_wakam@inria.fr>2013-01-25 20:38:26 +0100
committerGravatar Desire NUENTSA <desire.nuentsa_wakam@inria.fr>2013-01-25 20:38:26 +0100
commit7f0f7ab5b4a0c13c0d4aba6ab6469d3e9a2f8868 (patch)
tree0f53746ee6ae52da75523064266a5c1be3aa6695 /Eigen
parentd58056bde4c1bd80497af4448a8ace0508e1bb76 (diff)
Add additional methods in SparseLU and Improve the naming conventions
Diffstat (limited to 'Eigen')
-rw-r--r--Eigen/SparseLU9
-rw-r--r--Eigen/src/SparseLU/SparseLU.h200
-rw-r--r--Eigen/src/SparseLU/SparseLUBase.h58
-rw-r--r--Eigen/src/SparseLU/SparseLUImpl.h64
-rw-r--r--Eigen/src/SparseLU/SparseLU_Memory.h43
-rw-r--r--Eigen/src/SparseLU/SparseLU_Structs.h9
-rw-r--r--Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h (renamed from Eigen/src/SparseLU/SparseLU_Matrix.h)62
-rw-r--r--Eigen/src/SparseLU/SparseLU_Utils.h8
-rw-r--r--Eigen/src/SparseLU/SparseLU_column_bmod.h8
-rw-r--r--Eigen/src/SparseLU/SparseLU_column_dfs.h37
-rw-r--r--Eigen/src/SparseLU/SparseLU_copy_to_ucol.h10
-rw-r--r--Eigen/src/SparseLU/SparseLU_heap_relax_snode.h8
-rw-r--r--Eigen/src/SparseLU/SparseLU_kernel_bmod.h4
-rw-r--r--Eigen/src/SparseLU/SparseLU_panel_bmod.h18
-rw-r--r--Eigen/src/SparseLU/SparseLU_panel_dfs.h28
-rw-r--r--Eigen/src/SparseLU/SparseLU_pivotL.h6
-rw-r--r--Eigen/src/SparseLU/SparseLU_pruneL.h10
-rw-r--r--Eigen/src/SparseLU/SparseLU_relax_snode.h9
18 files changed, 334 insertions, 257 deletions
diff --git a/Eigen/SparseLU b/Eigen/SparseLU
index 69cc6beca..38b38b531 100644
--- a/Eigen/SparseLU
+++ b/Eigen/SparseLU
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -14,7 +15,9 @@
/**
* \defgroup SparseLU_Module SparseLU module
- *
+ * This module defines a supernodal factorization of general sparse matrices.
+ * The code is fully optimized for supernode-panel updates with specialized kernels.
+ * Please, see the documentation of the SparseLU class for more details.
*/
// Ordering interface
@@ -23,8 +26,8 @@
#include "src/SparseLU/SparseLU_gemm_kernel.h"
#include "src/SparseLU/SparseLU_Structs.h"
-#include "src/SparseLU/SparseLU_Matrix.h"
-#include "src/SparseLU/SparseLUBase.h"
+#include "src/SparseLU/SparseLU_SupernodalMatrix.h"
+#include "src/SparseLU/SparseLUImpl.h"
#include "src/SparseCore/SparseColEtree.h"
#include "src/SparseLU/SparseLU_Memory.h"
#include "src/SparseLU/SparseLU_heap_relax_snode.h"
diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h
index 175794811..b1a74581a 100644
--- a/Eigen/src/SparseLU/SparseLU.h
+++ b/Eigen/src/SparseLU/SparseLU.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -13,6 +14,8 @@
namespace Eigen {
+template <typename _MatrixType, typename _OrderingType> class SparseLU;
+template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
/** \ingroup SparseLU_Module
* \class SparseLU
*
@@ -65,7 +68,7 @@ namespace Eigen {
* \sa \ref OrderingMethods_Module
*/
template <typename _MatrixType, typename _OrderingType>
-class SparseLU
+class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::Index>
{
public:
typedef _MatrixType MatrixType;
@@ -74,17 +77,18 @@ class SparseLU
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef SparseMatrix<Scalar,ColMajor,Index> NCMatrix;
- typedef SuperNodalMatrix<Scalar, Index> SCMatrix;
+ typedef internal::MappedSuperNodalMatrix<Scalar, Index> SCMatrix;
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
typedef Matrix<Index,Dynamic,1> IndexVector;
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+ typedef internal::SparseLUImpl<Scalar, Index> Base;
public:
- SparseLU():m_isInitialized(true),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
+ SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
{
initperfvalues();
}
- SparseLU(const MatrixType& matrix):m_isInitialized(true),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
+ SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
{
initperfvalues();
compute(matrix);
@@ -119,34 +123,22 @@ class SparseLU
m_symmetricmode = sym;
}
- /** Set the threshold used for a diagonal entry to be an acceptable pivot. */
- void diagPivotThresh(RealScalar thresh)
+ /** Returns an expression of the matrix L, internally stored as supernodes
+ * For a triangular solve with this matrix, use
+ * \code
+ * y = b; matrixL().solveInPlace(y);
+ * \endcode
+ */
+ SparseLUMatrixLReturnType<SCMatrix> matrixL() const
{
- m_diagpivotthresh = thresh;
+ return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
}
-
- /** Return the number of nonzero elements in the L factor */
- int nnzL()
- {
- if (m_factorizationIsOk)
- return m_nnzL;
- else
- {
- std::cerr<<"Numerical factorization should be done before\n";
- return 0;
- }
- }
- /** Return the number of nonzero elements in the U factor */
- int nnzU()
+ /** Set the threshold used for a diagonal entry to be an acceptable pivot. */
+ void setPivotThreshold(RealScalar thresh)
{
- if (m_factorizationIsOk)
- return m_nnzU;
- else
- {
- std::cerr<<"Numerical factorization should be done before\n";
- return 0;
- }
+ m_diagpivotthresh = thresh;
}
+
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
*
* \sa compute()
@@ -160,6 +152,18 @@ class SparseLU
return internal::solve_retval<SparseLU, Rhs>(*this, B.derived());
}
+ /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<SparseLU, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
+ {
+ eigen_assert(m_factorizationIsOk && "SparseLU is not initialized.");
+ eigen_assert(rows()==B.rows()
+ && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
+ return internal::sparse_solve_retval<SparseLU, Rhs>(*this, B.derived());
+ }
/** \brief Reports whether previous computation was successful.
*
@@ -174,7 +178,13 @@ class SparseLU
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
-
+ /**
+ * \returns A string describing the type of error
+ */
+ std::string lastErrorMessage() const
+ {
+ return m_lastError;
+ }
template<typename Rhs, typename Dest>
bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &_X) const
{
@@ -194,7 +204,8 @@ class SparseLU
X.col(j) = m_perm_r * B.col(j);
//Forward substitution with L
- m_Lstore.solveInPlace(X);
+// m_Lstore.solveInPlace(X);
+ this->matrixL().solveInPlace(X);
// Backward solve with U
for (int k = m_Lstore.nsuper(); k >= 0; k--)
@@ -256,6 +267,7 @@ class SparseLU
bool m_isInitialized;
bool m_factorizationIsOk;
bool m_analysisIsOk;
+ std::string m_lastError;
NCMatrix m_mat; // The input (permuted ) matrix
SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
MappedSparseMatrix<Scalar> m_Ustore; // The upper triangular matrix
@@ -263,16 +275,15 @@ class SparseLU
PermutationType m_perm_r ; // Row permutation
IndexVector m_etree; // Column elimination tree
- LU_GlobalLU_t<IndexVector, ScalarVector> m_glu;
+ typename Base::GlobalLU_t m_glu;
- // SuperLU/SparseLU options
+ // SparseLU options
bool m_symmetricmode;
-
// values for performance
- LU_perfvalues m_perfv;
+ internal::perfvalues m_perfv;
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
int m_nnzL, m_nnzU; // Nonzeros in L and U factors
-
+
private:
// Copy constructor
SparseLU (SparseLU& ) {}
@@ -301,18 +312,17 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
ord(mat,m_perm_c);
// Apply the permutation to the column of the input matrix
-// m_mat = mat * m_perm_c.inverse(); //FIXME It should be less expensive here to permute only the structural pattern of the matrix
-
//First copy the whole input matrix.
m_mat = mat;
- m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
- //Then, permute only the column pointers
- for (int i = 0; i < mat.cols(); i++)
- {
- m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i];
- m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
+ if (m_perm_c.size()) {
+ m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
+ //Then, permute only the column pointers
+ for (int i = 0; i < mat.cols(); i++)
+ {
+ m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i];
+ m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
+ }
}
-
// Compute the column elimination tree of the permuted matrix
IndexVector firstRowElt;
internal::coletree(m_mat, m_etree,firstRowElt);
@@ -331,12 +341,14 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
m_etree = iwork;
// Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
- PermutationType post_perm(m); //FIXME Use directly a constructor with post
+ PermutationType post_perm(m);
for (int i = 0; i < m; i++)
post_perm.indices()(i) = post(i);
// Combine the two permutations : postorder the permutation for future use
- m_perm_c = post_perm * m_perm_c;
+ if(m_perm_c.size()) {
+ m_perm_c = post_perm * m_perm_c;
+ }
} // end postordering
@@ -367,7 +379,7 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
template <typename MatrixType, typename OrderingType>
void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
{
-
+ using internal::emptyIdxLU;
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
@@ -377,12 +389,20 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
// Apply the column permutation computed in analyzepattern()
// m_mat = matrix * m_perm_c.inverse();
m_mat = matrix;
- m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
- //Then, permute only the column pointers
- for (int i = 0; i < matrix.cols(); i++)
+ if (m_perm_c.size())
{
- m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i];
- m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i+1] - matrix.outerIndexPtr()[i];
+ m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
+ //Then, permute only the column pointers
+ for (int i = 0; i < matrix.cols(); i++)
+ {
+ m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i];
+ m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i+1] - matrix.outerIndexPtr()[i];
+ }
+ }
+ else
+ { //FIXME This should not be needed if the empty permutation is handled transparently
+ m_perm_c.resize(matrix.cols());
+ for(int i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
}
int m = m_mat.rows();
@@ -391,10 +411,10 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
int maxpanel = m_perfv.panel_size * m;
// Allocate working storage common to the factor routines
int lwork = 0;
- int info = SparseLUBase<Scalar,Index>::LUMemInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
+ int info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
if (info)
{
- std::cerr << "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
+ m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
m_factorizationIsOk = false;
return ;
}
@@ -406,7 +426,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
IndexVector repfnz(maxpanel);
IndexVector panel_lsub(maxpanel);
IndexVector xprune(n); xprune.setZero();
- IndexVector marker(m*LU_NO_MARKER); marker.setZero();
+ IndexVector marker(m*internal::LUNoMarker); marker.setZero();
repfnz.setConstant(-1);
panel_lsub.setConstant(-1);
@@ -415,7 +435,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
ScalarVector dense;
dense.setZero(maxpanel);
ScalarVector tempv;
- tempv.setZero(LU_NUM_TEMPV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
+ tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
// Compute the inverse of perm_c
PermutationType iperm_c(m_perm_c.inverse());
@@ -423,16 +443,16 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
// Identify initial relaxed snodes
IndexVector relax_end(n);
if ( m_symmetricmode == true )
- SparseLUBase<Scalar,Index>::LU_heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
+ Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
else
- SparseLUBase<Scalar,Index>::LU_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
+ Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
m_perm_r.resize(m);
m_perm_r.indices().setConstant(-1);
marker.setConstant(-1);
- m_glu.supno(0) = IND_EMPTY; m_glu.xsup.setConstant(0);
+ m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
// Work on one 'panel' at a time. A panel is one of the following :
@@ -451,7 +471,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
int panel_size = m_perfv.panel_size; // upper bound on panel width
for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
{
- if (relax_end(k) != IND_EMPTY)
+ if (relax_end(k) != emptyIdxLU)
{
panel_size = k - jcol;
break;
@@ -461,10 +481,10 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
panel_size = n - jcol;
// Symbolic outer factorization on a panel of columns
- SparseLUBase<Scalar,Index>::LU_panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
+ Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
// Numeric sup-panel updates in topological order
- SparseLUBase<Scalar,Index>::LU_panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
+ Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
// Sparse LU within the panel, and below the panel diagonal
for ( jj = jcol; jj< jcol + panel_size; jj++)
@@ -475,10 +495,10 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
//Depth-first-search for the current column
VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
- info = SparseLUBase<Scalar,Index>::LU_column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
+ info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
if ( info )
{
- std::cerr << "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() \n";
+ m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
@@ -486,52 +506,55 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
// Numeric updates to this column
VectorBlock<ScalarVector> dense_k(dense, k, m);
VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1);
- info = SparseLUBase<Scalar,Index>::LU_column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
+ info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
if ( info )
{
- std::cerr << "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() \n";
+ m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
}
// Copy the U-segments to ucol(*)
- info = SparseLUBase<Scalar,Index>::LU_copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
+ info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
if ( info )
{
- std::cerr << "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() \n";
+ m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
}
// Form the L-segment
- info = SparseLUBase<Scalar,Index>::LU_pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
+ info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
if ( info )
{
- std::cerr<< "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT " << info <<std::endl;
+ m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
+ std::ostringstream returnInfo;
+ returnInfo << info;
+ m_lastError += returnInfo.str();
m_info = NumericalIssue;
m_factorizationIsOk = false;
return;
}
// Prune columns (0:jj-1) using column jj
- SparseLUBase<Scalar,Index>::LU_pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
+ Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
// Reset repfnz for this column
for (i = 0; i < nseg; i++)
{
irep = segrep(i);
- repfnz_k(irep) = IND_EMPTY;
+ repfnz_k(irep) = emptyIdxLU;
}
} // end SparseLU within the panel
jcol += panel_size; // Move to the next panel
} // end for -- end elimination
// Count the number of nonzeros in factors
- SparseLUBase<Scalar,Index>::LU_countnz(n, m_nnzL, m_nnzU, m_glu);
+ Base::countnz(n, m_nnzL, m_nnzU, m_glu);
// Apply permutation to the L subscripts
- SparseLUBase<Scalar,Index>::LU_fixupL(n, m_perm_r.indices(), m_glu);
+ Base::fixupL(n, m_perm_r.indices(), m_glu);
// Create supernode matrix L
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
@@ -542,6 +565,23 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
m_factorizationIsOk = true;
}
+template<typename MappedSupernodalType>
+struct SparseLUMatrixLReturnType
+{
+ typedef typename MappedSupernodalType::Index Index;
+ typedef typename MappedSupernodalType::Scalar Scalar;
+ SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
+ { }
+ Index rows() { return m_mapL.rows(); }
+ Index cols() { return m_mapL.cols(); }
+ template<typename Dest>
+ void solveInPlace( MatrixBase<Dest> &X) const
+ {
+ m_mapL.solveInPlace(X);
+ }
+ const MappedSupernodalType& m_mapL;
+};
+
namespace internal {
template<typename _MatrixType, typename Derived, typename Rhs>
@@ -557,6 +597,18 @@ struct solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
}
};
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct sparse_solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
+ : sparse_solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
+{
+ typedef SparseLU<_MatrixType,Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ this->defaultEvalTo(dst);
+ }
+};
} // end namespace internal
} // End namespace Eigen
diff --git a/Eigen/src/SparseLU/SparseLUBase.h b/Eigen/src/SparseLU/SparseLUBase.h
deleted file mode 100644
index f4c5fbead..000000000
--- a/Eigen/src/SparseLU/SparseLUBase.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef SPARSELUBASE_H
-#define SPARSELUBASE_H
-
-namespace Eigen {
-
-/** \ingroup SparseLU_Module
- * \class SparseLUBase
- * Base class for sparseLU
- */
-template <typename Scalar, typename Index>
-struct SparseLUBase
-{
- typedef Matrix<Scalar,Dynamic,1> ScalarVector;
- typedef Matrix<Index,Dynamic,1> IndexVector;
- typedef typename ScalarVector::RealScalar RealScalar;
- typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
- typedef Ref<Matrix<Index,Dynamic,1> > BlockIndexVector;
- typedef LU_GlobalLU_t<IndexVector, ScalarVector> GlobalLU_t;
- typedef SparseMatrix<Scalar,ColMajor,Index> MatrixType;
-
- template <typename VectorType>
- static int expand(VectorType& vec, int& length, int nbElts, int keep_prev, int& num_expansions);
- static int LUMemInit(int m, int n, int annz, int lwork, int fillratio, int panel_size, GlobalLU_t& glu);
- template <typename VectorType>
- static int LUMemXpand(VectorType& vec, int& maxlen, int nbElts, LU_MemType memtype, int& num_expansions);
- static void LU_heap_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end);
- static void LU_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end);
- static int LU_snode_dfs(const int jcol, const int kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, LU_GlobalLU_t<IndexVector, ScalarVector>& glu);
- static int LU_snode_bmod (const int jcol, const int fsupc, ScalarVector& dense, GlobalLU_t& glu);
- static int LU_pivotL(const int jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, GlobalLU_t& glu);
- template <typename Traits>
- static void LU_dfs_kernel(const int jj, IndexVector& perm_r,
- int& nseg, IndexVector& panel_lsub, IndexVector& segrep,
- Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
- IndexVector& xplore, GlobalLU_t& glu, int& nextl_col, int krow, Traits& traits);
- static void LU_panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
-
- static void LU_panel_bmod(const int m, const int w, const int jcol, const int nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);
- static int LU_column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, int& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
- static int LU_column_bmod(const int jcol, const int nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, int fpanelc, GlobalLU_t& glu);
- static int LU_copy_to_ucol(const int jcol, const int nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu);
- static void LU_pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);
- static void LU_countnz(const int n, int& nnzL, int& nnzU, GlobalLU_t& glu);
- static void LU_fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& glu);
-
-};
-
-} // end namespace Eigen
-
-#endif
diff --git a/Eigen/src/SparseLU/SparseLUImpl.h b/Eigen/src/SparseLU/SparseLUImpl.h
new file mode 100644
index 000000000..96b3adb2b
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLUImpl.h
@@ -0,0 +1,64 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#ifndef SPARSELU_IMPL_H
+#define SPARSELU_IMPL_H
+
+namespace Eigen {
+namespace internal {
+
+/** \ingroup SparseLU_Module
+ * \class SparseLUImpl
+ * Base class for sparseLU
+ */
+template <typename Scalar, typename Index>
+class SparseLUImpl
+{
+ public:
+ typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+ typedef Matrix<Index,Dynamic,1> IndexVector;
+ typedef typename ScalarVector::RealScalar RealScalar;
+ typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
+ typedef Ref<Matrix<Index,Dynamic,1> > BlockIndexVector;
+ typedef LU_GlobalLU_t<IndexVector, ScalarVector> GlobalLU_t;
+ typedef SparseMatrix<Scalar,ColMajor,Index> MatrixType;
+
+ protected:
+ template <typename VectorType>
+ int expand(VectorType& vec, int& length, int nbElts, int keep_prev, int& num_expansions);
+ int memInit(int m, int n, int annz, int lwork, int fillratio, int panel_size, GlobalLU_t& glu);
+ template <typename VectorType>
+ int memXpand(VectorType& vec, int& maxlen, int nbElts, MemType memtype, int& num_expansions);
+ void heap_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end);
+ void relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end);
+ int snode_dfs(const int jcol, const int kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu);
+ int snode_bmod (const int jcol, const int fsupc, ScalarVector& dense, GlobalLU_t& glu);
+ int pivotL(const int jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, GlobalLU_t& glu);
+ template <typename Traits>
+ void dfs_kernel(const int jj, IndexVector& perm_r,
+ int& nseg, IndexVector& panel_lsub, IndexVector& segrep,
+ Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
+ IndexVector& xplore, GlobalLU_t& glu, int& nextl_col, int krow, Traits& traits);
+ void panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+
+ void panel_bmod(const int m, const int w, const int jcol, const int nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);
+ int column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, int& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+ int column_bmod(const int jcol, const int nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, int fpanelc, GlobalLU_t& glu);
+ int copy_to_ucol(const int jcol, const int nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu);
+ void pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);
+ void countnz(const int n, int& nnzL, int& nnzU, GlobalLU_t& glu);
+ void fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& glu);
+
+ template<typename , typename >
+ friend struct column_dfs_traits;
+};
+
+} // end namespace internal
+} // namespace Eigen
+
+#endif
diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h
index 049d5e694..f82826cbe 100644
--- a/Eigen/src/SparseLU/SparseLU_Memory.h
+++ b/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -32,15 +32,23 @@
#define EIGEN_SPARSELU_MEMORY
namespace Eigen {
+namespace internal {
-#define LU_NO_MARKER 3
-#define LU_NUM_TEMPV(m,w,t,b) ((std::max)(m, (t+b)*w) )
-#define IND_EMPTY (-1)
+enum { LUNoMarker = 3 };
+enum {emptyIdxLU = -1};
+template<typename Index>
+inline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b)
+{
+ return (std::max)(m, (t+b)*w);
+}
+
+template< typename Scalar, typename Index>
+inline Index LUTempSpace(Index&m, Index& w)
+{
+ return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar);
+}
+
-#define LU_Reduce(alpha) ((alpha + 1) / 2) // i.e (alpha-1)/2 + 1
-#define LU_GluIntArray(n) (5* (n) + 5)
-#define LU_TempSpace(m, w) ( (2*w + 4 + LU_NO_MARKER) * m * sizeof(Index) \
- + (w + 1) * m * sizeof(Scalar) )
/**
@@ -53,7 +61,7 @@ namespace Eigen {
*/
template <typename Scalar, typename Index>
template <typename VectorType>
-int SparseLUBase<Scalar,Index>::expand(VectorType& vec, int& length, int nbElts, int keep_prev, int& num_expansions)
+int SparseLUImpl<Scalar,Index>::expand(VectorType& vec, int& length, int nbElts, int keep_prev, int& num_expansions)
{
float alpha = 1.5; // Ratio of the memory increase
@@ -91,7 +99,7 @@ int SparseLUBase<Scalar,Index>::expand(VectorType& vec, int& length, int nbElts
int tries = 0; // Number of attempts
do
{
- alpha = LU_Reduce(alpha);
+ alpha = (alpha + 1)/2;
new_len = alpha * length ;
try
{
@@ -128,7 +136,7 @@ int SparseLUBase<Scalar,Index>::expand(VectorType& vec, int& length, int nbElts
* \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation
*/
template <typename Scalar, typename Index>
-int SparseLUBase<Scalar,Index>::LUMemInit(int m, int n, int annz, int lwork, int fillratio, int panel_size, GlobalLU_t& glu)
+int SparseLUImpl<Scalar,Index>::memInit(int m, int n, int annz, int lwork, int fillratio, int panel_size, GlobalLU_t& glu)
{
int& num_expansions = glu.num_expansions; //No memory expansions so far
num_expansions = 0;
@@ -136,10 +144,12 @@ int SparseLUBase<Scalar,Index>::LUMemInit(int m, int n, int annz, int lwork, int
glu.nzlmax = (std::max)(1., fillratio/4.) * annz; // estimated nnz in L factor
// Return the estimated size to the user if necessary
- if (lwork == IND_EMPTY)
+ Index tempSpace;
+ tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
+ if (lwork == emptyIdxLU)
{
int estimated_size;
- estimated_size = LU_GluIntArray(n) * sizeof(Index) + LU_TempSpace(m, panel_size)
+ estimated_size = (5 * n + 5) * sizeof(Index) + tempSpace
+ (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) * sizeof(Scalar) + n;
return estimated_size;
}
@@ -192,13 +202,13 @@ int SparseLUBase<Scalar,Index>::LUMemInit(int m, int n, int annz, int lwork, int
*/
template <typename Scalar, typename Index>
template <typename VectorType>
-int SparseLUBase<Scalar,Index>::LUMemXpand(VectorType& vec, int& maxlen, int nbElts, LU_MemType memtype, int& num_expansions)
+int SparseLUImpl<Scalar,Index>::memXpand(VectorType& vec, int& maxlen, int nbElts, MemType memtype, int& num_expansions)
{
int failed_size;
if (memtype == USUB)
- failed_size = expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);
+ failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);
else
- failed_size = expand<VectorType>(vec, maxlen, nbElts, 0, num_expansions);
+ failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 0, num_expansions);
if (failed_size)
return failed_size;
@@ -206,6 +216,7 @@ int SparseLUBase<Scalar,Index>::LUMemXpand(VectorType& vec, int& maxlen, int nbE
return 0 ;
}
-} // end namespace Eigen
+} // end namespace internal
+} // end namespace Eigen
#endif // EIGEN_SPARSELU_MEMORY
diff --git a/Eigen/src/SparseLU/SparseLU_Structs.h b/Eigen/src/SparseLU/SparseLU_Structs.h
index 89d6e81b7..eb6930522 100644
--- a/Eigen/src/SparseLU/SparseLU_Structs.h
+++ b/Eigen/src/SparseLU/SparseLU_Structs.h
@@ -68,10 +68,10 @@
#ifndef EIGEN_LU_STRUCTS
#define EIGEN_LU_STRUCTS
-
namespace Eigen {
+namespace internal {
-typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} LU_MemType;
+typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType;
template <typename IndexVector, typename ScalarVector>
struct LU_GlobalLU_t {
@@ -93,7 +93,7 @@ struct LU_GlobalLU_t {
};
// Values to set for performance
-struct LU_perfvalues {
+struct perfvalues {
int panel_size; // a panel consists of at most <panel_size> consecutive columns
int relax; // To control degree of relaxing supernodes. If the number of nodes (columns)
// in a subtree of the elimination tree is less than relax, this subtree is considered
@@ -104,6 +104,7 @@ struct LU_perfvalues {
int fillfactor; // The estimated fills factors for L and U, compared with A
};
-} // end namespace Eigen
+} // end namespace internal
+} // end namespace Eigen
#endif // EIGEN_LU_STRUCTS
diff --git a/Eigen/src/SparseLU/SparseLU_Matrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
index d5770e1ae..ac37e56c5 100644
--- a/Eigen/src/SparseLU/SparseLU_Matrix.h
+++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
@@ -8,10 +8,11 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_SPARSELU_MATRIX_H
-#define EIGEN_SPARSELU_MATRIX_H
+#ifndef EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
+#define EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
namespace Eigen {
+namespace internal {
/** \ingroup SparseLU_Module
* \brief a class to manipulate the L supernodal factor from the SparseLU factorization
@@ -23,13 +24,13 @@ namespace Eigen {
* NOTE : This class corresponds to the SCformat structure in SuperLU
*
*/
-/* TO DO
+/* TODO
* InnerIterator as for sparsematrix
* SuperInnerIterator to iterate through all supernodes
* Function for triangular solve
*/
template <typename _Scalar, typename _Index>
-class SuperNodalMatrix
+class MappedSuperNodalMatrix
{
public:
typedef _Scalar Scalar;
@@ -37,17 +38,17 @@ class SuperNodalMatrix
typedef Matrix<Index,Dynamic,1> IndexVector;
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
public:
- SuperNodalMatrix()
+ MappedSuperNodalMatrix()
{
}
- SuperNodalMatrix(int m, int n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
+ MappedSuperNodalMatrix(int m, int n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
{
setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
}
- ~SuperNodalMatrix()
+ ~MappedSuperNodalMatrix()
{
}
@@ -69,34 +70,24 @@ class SuperNodalMatrix
m_nsuper = col_to_sup(n);
m_col_to_sup = col_to_sup.data();
m_sup_to_col = sup_to_col.data();
-
}
/**
* Number of rows
*/
- int rows()
- {
- return m_row;
- }
+ int rows() { return m_row; }
/**
* Number of columns
*/
- int cols()
- {
- return m_col;
- }
+ int cols() { return m_col; }
/**
* Return the array of nonzero values packed by column
*
* The size is nnz
*/
- Scalar* valuePtr()
- {
- return m_nzval;
- }
+ Scalar* valuePtr() { return m_nzval; }
const Scalar* valuePtr() const
{
@@ -118,10 +109,7 @@ class SuperNodalMatrix
/**
* Return the array of compressed row indices of all supernodes
*/
- Index* rowIndex()
- {
- return m_rowind;
- }
+ Index* rowIndex() { return m_rowind; }
const Index* rowIndex() const
{
@@ -131,10 +119,7 @@ class SuperNodalMatrix
/**
* Return the location in \em rowvaluePtr() which starts each column
*/
- Index* rowIndexPtr()
- {
- return m_rowind_colptr;
- }
+ Index* rowIndexPtr() { return m_rowind_colptr; }
const Index* rowIndexPtr() const
{
@@ -144,10 +129,7 @@ class SuperNodalMatrix
/**
* Return the array of column-to-supernode mapping
*/
- Index* colToSup()
- {
- return m_col_to_sup;
- }
+ Index* colToSup() { return m_col_to_sup; }
const Index* colToSup() const
{
@@ -156,10 +138,7 @@ class SuperNodalMatrix
/**
* Return the array of supernode-to-column mapping
*/
- Index* supToCol()
- {
- return m_sup_to_col;
- }
+ Index* supToCol() { return m_sup_to_col; }
const Index* supToCol() const
{
@@ -200,10 +179,10 @@ class SuperNodalMatrix
*
*/
template<typename Scalar, typename Index>
-class SuperNodalMatrix<Scalar,Index>::InnerIterator
+class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
{
public:
- InnerIterator(const SuperNodalMatrix& mat, Index outer)
+ InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)
: m_matrix(mat),
m_outer(outer),
m_idval(mat.colIndexPtr()[outer]),
@@ -235,7 +214,7 @@ class SuperNodalMatrix<Scalar,Index>::InnerIterator
}
protected:
- const SuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix
+ const MappedSuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix
const Index m_outer; // Current column
Index m_idval; //Index to browse the values in the current column
const Index m_startval; // Start of the column value
@@ -251,7 +230,7 @@ class SuperNodalMatrix<Scalar,Index>::InnerIterator
*/
template<typename Scalar, typename Index>
template<typename Dest>
-void SuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) const
+void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) const
{
Index n = X.rows();
int nrhs = X.cols();
@@ -311,6 +290,7 @@ void SuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) const
}
}
-} // end namespace Eigen
+} // end namespace internal
+} // end namespace Eigen
#endif // EIGEN_SPARSELU_MATRIX_H
diff --git a/Eigen/src/SparseLU/SparseLU_Utils.h b/Eigen/src/SparseLU/SparseLU_Utils.h
index e764823ae..33cc9c7ac 100644
--- a/Eigen/src/SparseLU/SparseLU_Utils.h
+++ b/Eigen/src/SparseLU/SparseLU_Utils.h
@@ -12,12 +12,13 @@
#define EIGEN_SPARSELU_UTILS_H
namespace Eigen {
+namespace internal {
/**
* \brief Count Nonzero elements in the factors
*/
template <typename Scalar, typename Index>
-void SparseLUBase<Scalar,Index>::LU_countnz(const int n, int& nnzL, int& nnzU, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::countnz(const int n, int& nnzL, int& nnzU, GlobalLU_t& glu)
{
nnzL = 0;
nnzU = (glu.xusub)(n);
@@ -48,7 +49,7 @@ void SparseLUBase<Scalar,Index>::LU_countnz(const int n, int& nnzL, int& nnzU, G
*
*/
template <typename Scalar, typename Index>
-void SparseLUBase<Scalar,Index>::LU_fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& glu)
{
int fsupc, i, j, k, jstart;
@@ -73,6 +74,7 @@ void SparseLUBase<Scalar,Index>::LU_fixupL(const int n, const IndexVector& perm_
glu.xlsub(n) = nextl;
}
-} // end namespace Eigen
+} // end namespace internal
+} // end namespace Eigen
#endif // EIGEN_SPARSELU_UTILS_H
diff --git a/Eigen/src/SparseLU/SparseLU_column_bmod.h b/Eigen/src/SparseLU/SparseLU_column_bmod.h
index 6d557eb81..44ec61ac4 100644
--- a/Eigen/src/SparseLU/SparseLU_column_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_column_bmod.h
@@ -32,7 +32,8 @@
#define SPARSELU_COLUMN_BMOD_H
namespace Eigen {
-
+
+namespace internal {
/**
* \brief Performs numeric block updates (sup-col) in topological order
*
@@ -49,7 +50,7 @@ namespace Eigen {
*
*/
template <typename Scalar, typename Index>
-int SparseLUBase<Scalar,Index>::LU_column_bmod(const int jcol, const int nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, int fpanelc, GlobalLU_t& glu)
+int SparseLUImpl<Scalar,Index>::column_bmod(const int jcol, const int nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, int fpanelc, GlobalLU_t& glu)
{
int jsupno, k, ksub, krep, ksupno;
int lptr, nrow, isub, irow, nextlu, new_next, ufirst;
@@ -119,7 +120,7 @@ int SparseLUBase<Scalar,Index>::LU_column_bmod(const int jcol, const int nseg, B
new_next += offset;
while (new_next > glu.nzlumax )
{
- mem = LUMemXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);
+ mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);
if (mem) return mem;
}
@@ -173,6 +174,7 @@ int SparseLUBase<Scalar,Index>::LU_column_bmod(const int jcol, const int nseg, B
return 0;
}
+} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_COLUMN_BMOD_H
diff --git a/Eigen/src/SparseLU/SparseLU_column_dfs.h b/Eigen/src/SparseLU/SparseLU_column_dfs.h
index 1bf17330a..723598265 100644
--- a/Eigen/src/SparseLU/SparseLU_column_dfs.h
+++ b/Eigen/src/SparseLU/SparseLU_column_dfs.h
@@ -30,17 +30,18 @@
#ifndef SPARSELU_COLUMN_DFS_H
#define SPARSELU_COLUMN_DFS_H
+template <typename Scalar, typename Index> class SparseLUImpl;
namespace Eigen {
namespace internal {
-
+
template<typename IndexVector, typename ScalarVector>
-struct LU_column_dfs_traits
+struct column_dfs_traits
{
- typedef typename IndexVector::Scalar Index;
typedef typename ScalarVector::Scalar Scalar;
- LU_column_dfs_traits(Index jcol, Index& jsuper, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
- : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu)
+ typedef typename IndexVector::Scalar Index;
+ column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl<Scalar, Index>::GlobalLU_t& glu, SparseLUImpl<Scalar, Index>& luImpl)
+ : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl)
{}
bool update_segrep(Index /*krep*/, Index /*jj*/)
{
@@ -49,17 +50,17 @@ struct LU_column_dfs_traits
void mem_expand(IndexVector& lsub, int& nextl, int chmark)
{
if (nextl >= m_glu.nzlmax)
- SparseLUBase<Scalar,Index>::LUMemXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions);
- if (chmark != (m_jcol-1)) m_jsuper_ref = IND_EMPTY;
+ m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions);
+ if (chmark != (m_jcol-1)) m_jsuper_ref = emptyIdxLU;
}
enum { ExpandMem = true };
int m_jcol;
int& m_jsuper_ref;
- LU_GlobalLU_t<IndexVector, ScalarVector>& m_glu;
+ typename SparseLUImpl<Scalar, Index>::GlobalLU_t& m_glu;
+ SparseLUImpl<Scalar, Index>& m_luImpl;
};
-} // end namespace internal
/**
* \brief Performs a symbolic factorization on column jcol and decide the supernode boundary
@@ -89,7 +90,7 @@ struct LU_column_dfs_traits
*
*/
template <typename Scalar, typename Index>
-int SparseLUBase<Scalar,Index>::LU_column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, int& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+int SparseLUImpl<Scalar,Index>::column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, int& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
{
int jsuper = glu.supno(jcol);
@@ -97,19 +98,19 @@ int SparseLUBase<Scalar,Index>::LU_column_dfs(const int m, const int jcol, Index
VectorBlock<IndexVector> marker2(marker, 2*m, m);
- internal::LU_column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu);
+ column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu, *this);
// For each nonzero in A(*,jcol) do dfs
- for (int k = 0; lsub_col[k] != IND_EMPTY; k++)
+ for (int k = 0; lsub_col[k] != emptyIdxLU; k++)
{
int krow = lsub_col(k);
- lsub_col(k) = IND_EMPTY;
+ lsub_col(k) = emptyIdxLU;
int kmark = marker2(krow);
// krow was visited before, go to the next nonz;
if (kmark == jcol) continue;
- LU_dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
+ dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
xplore, glu, nextl, krow, traits);
} // for each nonzero ...
@@ -130,18 +131,18 @@ int SparseLUBase<Scalar,Index>::LU_column_dfs(const int m, const int jcol, Index
jm1ptr = glu.xlsub(jcolm1);
// Use supernodes of type T2 : see SuperLU paper
- if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = IND_EMPTY;
+ if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;
// Make sure the number of columns in a supernode doesn't
// exceed threshold
- if ( (jcol - fsupc) >= maxsuper) jsuper = IND_EMPTY;
+ if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU;
/* If jcol starts a new supernode, reclaim storage space in
* glu.lsub from previous supernode. Note we only store
* the subscript set of the first and last columns of
* a supernode. (first for num values, last for pruning)
*/
- if (jsuper == IND_EMPTY)
+ if (jsuper == emptyIdxLU)
{ // starts a new supernode
if ( (fsupc < jcolm1-1) )
{ // >= 3 columns in nsuper
@@ -169,6 +170,8 @@ int SparseLUBase<Scalar,Index>::LU_column_dfs(const int m, const int jcol, Index
return 0;
}
+} // end namespace internal
+
} // end namespace Eigen
#endif
diff --git a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
index 10c85d4ff..dc6c9d6a2 100644
--- a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
+++ b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
@@ -30,6 +30,7 @@
#define SPARSELU_COPY_TO_UCOL_H
namespace Eigen {
+namespace internal {
/**
* \brief Performs numeric block updates (sup-col) in topological order
@@ -46,7 +47,7 @@ namespace Eigen {
*
*/
template <typename Scalar, typename Index>
-int SparseLUBase<Scalar,Index>::LU_copy_to_ucol(const int jcol, const int nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)
+int SparseLUImpl<Scalar,Index>::copy_to_ucol(const int jcol, const int nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)
{
Index ksub, krep, ksupno;
@@ -65,7 +66,7 @@ int SparseLUBase<Scalar,Index>::LU_copy_to_ucol(const int jcol, const int nseg,
if (jsupno != ksupno ) // should go into ucol();
{
kfnz = repfnz(krep);
- if (kfnz != IND_EMPTY)
+ if (kfnz != emptyIdxLU)
{ // Nonzero U-segment
fsupc = glu.xsup(ksupno);
isub = glu.xlsub(fsupc) + kfnz - fsupc;
@@ -73,9 +74,9 @@ int SparseLUBase<Scalar,Index>::LU_copy_to_ucol(const int jcol, const int nseg,
new_next = nextu + segsize;
while (new_next > glu.nzumax)
{
- mem = LUMemXpand<ScalarVector>(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions);
+ mem = memXpand<ScalarVector>(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions);
if (mem) return mem;
- mem = LUMemXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions);
+ mem = memXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions);
if (mem) return mem;
}
@@ -99,6 +100,7 @@ int SparseLUBase<Scalar,Index>::LU_copy_to_ucol(const int jcol, const int nseg,
return 0;
}
+} // namespace internal
} // end namespace Eigen
#endif // SPARSELU_COPY_TO_UCOL_H
diff --git a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
index a1ea5bc06..ef1272ea9 100644
--- a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
+++ b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
@@ -29,6 +29,7 @@
#define SPARSELU_HEAP_RELAX_SNODE_H
namespace Eigen {
+namespace internal {
/**
* \brief Identify the initial relaxed supernodes
@@ -42,7 +43,7 @@ namespace Eigen {
* \param relax_end last column in a supernode
*/
template <typename Scalar, typename Index>
-void SparseLUBase<Scalar,Index>::LU_heap_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end)
+void SparseLUImpl<Scalar,Index>::heap_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end)
{
// The etree may not be postordered, but its heap ordered
@@ -63,7 +64,7 @@ void SparseLUBase<Scalar,Index>::LU_heap_relax_snode (const int n, IndexVector&
et = iwork;
// compute the number of descendants of each node in the etree
- relax_end.setConstant(IND_EMPTY);
+ relax_end.setConstant(emptyIdxLU);
int j, parent;
descendants.setZero();
for (j = 0; j < n; j++)
@@ -120,6 +121,7 @@ void SparseLUBase<Scalar,Index>::LU_heap_relax_snode (const int n, IndexVector&
et = et_save;
}
-} // end namespace Eigen
+} // end namespace internal
+} // end namespace Eigen
#endif // SPARSELU_HEAP_RELAX_SNODE_H
diff --git a/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
index 8b65ff37c..3358853fb 100644
--- a/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
@@ -12,6 +12,7 @@
#define SPARSELU_KERNEL_BMOD_H
namespace Eigen {
+namespace internal {
/**
* \brief Performs numeric block updates from a given supernode to a single column
@@ -111,6 +112,7 @@ template <> struct LU_kernel_bmod<1>
}
};
-} // end namespace Eigen
+} // end namespace internal
+} // end namespace Eigen
#endif // SPARSELU_KERNEL_BMOD_H
diff --git a/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
index fbc146a36..d00709d6e 100644
--- a/Eigen/src/SparseLU/SparseLU_panel_bmod.h
+++ b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -32,6 +32,7 @@
#define SPARSELU_PANEL_BMOD_H
namespace Eigen {
+namespace internal {
/**
* \brief Performs numeric block updates (sup-panel) in topological order.
@@ -52,8 +53,9 @@ namespace Eigen {
*
*/
template <typename Scalar, typename Index>
-void SparseLUBase<Scalar,Index>::LU_panel_bmod(const int m, const int w, const int jcol, const int nseg, ScalarVector& dense, ScalarVector& tempv,
- IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::panel_bmod(const int m, const int w, const int jcol,
+ const int nseg, ScalarVector& dense, ScalarVector& tempv,
+ IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu)
{
int ksub,jj,nextl_col;
@@ -89,7 +91,7 @@ void SparseLUBase<Scalar,Index>::LU_panel_bmod(const int m, const int w, const i
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
kfnz = repfnz_col(krep);
- if ( kfnz == IND_EMPTY )
+ if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
@@ -111,7 +113,7 @@ void SparseLUBase<Scalar,Index>::LU_panel_bmod(const int m, const int w, const i
VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
kfnz = repfnz_col(krep);
- if ( kfnz == IND_EMPTY )
+ if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
@@ -158,7 +160,7 @@ void SparseLUBase<Scalar,Index>::LU_panel_bmod(const int m, const int w, const i
VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
kfnz = repfnz_col(krep);
- if ( kfnz == IND_EMPTY )
+ if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
@@ -193,7 +195,7 @@ void SparseLUBase<Scalar,Index>::LU_panel_bmod(const int m, const int w, const i
VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
kfnz = repfnz_col(krep);
- if ( kfnz == IND_EMPTY )
+ if ( kfnz == emptyIdxLU )
continue; // skip any zero segment
segsize = krep - kfnz + 1;
@@ -212,7 +214,9 @@ void SparseLUBase<Scalar,Index>::LU_panel_bmod(const int m, const int w, const i
}
} // End for each updating supernode
-}
+} // end panel bmod
+
+} // end namespace internal
} // end namespace Eigen
diff --git a/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/Eigen/src/SparseLU/SparseLU_panel_dfs.h
index 16e04423b..b3d9775fa 100644
--- a/Eigen/src/SparseLU/SparseLU_panel_dfs.h
+++ b/Eigen/src/SparseLU/SparseLU_panel_dfs.h
@@ -35,10 +35,10 @@ namespace Eigen {
namespace internal {
template<typename IndexVector>
-struct LU_panel_dfs_traits
+struct panel_dfs_traits
{
typedef typename IndexVector::Scalar Index;
- LU_panel_dfs_traits(Index jcol, Index* marker)
+ panel_dfs_traits(Index jcol, Index* marker)
: m_jcol(jcol), m_marker(marker)
{}
bool update_segrep(Index krep, Index jj)
@@ -56,11 +56,10 @@ struct LU_panel_dfs_traits
Index* m_marker;
};
-} // end namespace internal
template <typename Scalar, typename Index>
template <typename Traits>
-void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r,
+void SparseLUImpl<Scalar,Index>::dfs_kernel(const int jj, IndexVector& perm_r,
int& nseg, IndexVector& panel_lsub, IndexVector& segrep,
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
IndexVector& xplore, GlobalLU_t& glu,
@@ -73,7 +72,7 @@ void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r
// For each unmarked krow of jj
marker(krow) = jj;
int kperm = perm_r(krow);
- if (kperm == IND_EMPTY ) {
+ if (kperm == emptyIdxLU ) {
// krow is in L : place it in structure of L(*, jj)
panel_lsub(nextl_col++) = krow; // krow is indexed into A
@@ -88,7 +87,7 @@ void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r
// First nonzero element in the current column:
int myfnz = repfnz_col(krep);
- if (myfnz != IND_EMPTY )
+ if (myfnz != emptyIdxLU )
{
// Representative visited before
if (myfnz > kperm ) repfnz_col(krep) = kperm;
@@ -97,7 +96,7 @@ void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r
else
{
// Otherwise, perform dfs starting at krep
- int oldrep = IND_EMPTY;
+ int oldrep = emptyIdxLU;
parent(krep) = oldrep;
repfnz_col(krep) = kperm;
int xdfs = glu.xlsub(krep);
@@ -118,7 +117,7 @@ void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r
marker(kchild) = jj;
int chperm = perm_r(kchild);
- if (chperm == IND_EMPTY)
+ if (chperm == emptyIdxLU)
{
// case kchild is in L: place it in L(*, j)
panel_lsub(nextl_col++) = kchild;
@@ -132,7 +131,7 @@ void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r
int chrep = glu.xsup(glu.supno(chperm)+1) - 1;
myfnz = repfnz_col(chrep);
- if (myfnz != IND_EMPTY)
+ if (myfnz != emptyIdxLU)
{ // Visited before
if (myfnz > chperm)
repfnz_col(chrep) = chperm;
@@ -167,13 +166,13 @@ void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r
}
kpar = parent(krep); // Pop recursion, mimic recursion
- if (kpar == IND_EMPTY)
+ if (kpar == emptyIdxLU)
break; // dfs done
krep = kpar;
xdfs = xplore(krep);
maxdfs = xprune(krep);
- } while (kpar != IND_EMPTY); // Do until empty stack
+ } while (kpar != emptyIdxLU); // Do until empty stack
} // end if (myfnz = -1)
@@ -217,7 +216,7 @@ void SparseLUBase<Scalar,Index>::LU_dfs_kernel(const int jj, IndexVector& perm_r
*/
template <typename Scalar, typename Index>
-void SparseLUBase<Scalar,Index>::LU_panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
{
int nextl_col; // Next available position in panel_lsub[*,jj]
@@ -225,7 +224,7 @@ void SparseLUBase<Scalar,Index>::LU_panel_dfs(const int m, const int w, const in
VectorBlock<IndexVector> marker1(marker, m, m);
nseg = 0;
- internal::LU_panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
+ panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
// For each column in the panel
for (int jj = jcol; jj < jcol + w; jj++)
@@ -246,13 +245,14 @@ void SparseLUBase<Scalar,Index>::LU_panel_dfs(const int m, const int w, const in
if (kmark == jj)
continue; // krow visited before, go to the next nonzero
- LU_dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent,
+ dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent,
xplore, glu, nextl_col, krow, traits);
}// end for nonzeros in column jj
} // end for column jj
}
+} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_PANEL_DFS_H
diff --git a/Eigen/src/SparseLU/SparseLU_pivotL.h b/Eigen/src/SparseLU/SparseLU_pivotL.h
index 69472da9b..c6d7db5ac 100644
--- a/Eigen/src/SparseLU/SparseLU_pivotL.h
+++ b/Eigen/src/SparseLU/SparseLU_pivotL.h
@@ -31,6 +31,7 @@
#define SPARSELU_PIVOTL_H
namespace Eigen {
+namespace internal {
/**
* \brief Performs the numerical pivotin on the current column of L, and the CDIV operation.
@@ -56,7 +57,7 @@ namespace Eigen {
*
*/
template <typename Scalar, typename Index>
-int SparseLUBase<Scalar,Index>::LU_pivotL(const int jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, GlobalLU_t& glu)
+int SparseLUImpl<Scalar,Index>::pivotL(const int jcol, const RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, GlobalLU_t& glu)
{
Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol
@@ -72,7 +73,7 @@ int SparseLUBase<Scalar,Index>::LU_pivotL(const int jcol, const RealScalar diagp
Index diagind = iperm_c(jcol); // diagonal index
RealScalar pivmax = 0.0;
Index pivptr = nsupc;
- Index diag = IND_EMPTY;
+ Index diag = emptyIdxLU;
RealScalar rtemp;
Index isub, icol, itemp, k;
for (isub = nsupc; isub < nsupr; ++isub) {
@@ -127,6 +128,7 @@ int SparseLUBase<Scalar,Index>::LU_pivotL(const int jcol, const RealScalar diagp
return 0;
}
+} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_PIVOTL_H
diff --git a/Eigen/src/SparseLU/SparseLU_pruneL.h b/Eigen/src/SparseLU/SparseLU_pruneL.h
index 816358bc3..03da95dbb 100644
--- a/Eigen/src/SparseLU/SparseLU_pruneL.h
+++ b/Eigen/src/SparseLU/SparseLU_pruneL.h
@@ -31,6 +31,7 @@
#define SPARSELU_PRUNEL_H
namespace Eigen {
+namespace internal {
/**
* \brief Prunes the L-structure.
@@ -49,7 +50,7 @@ namespace Eigen {
*
*/
template <typename Scalar, typename Index>
-void SparseLUBase<Scalar,Index>::LU_pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)
+void SparseLUImpl<Scalar,Index>::pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)
{
// For each supernode-rep irep in U(*,j]
int jsupno = glu.supno(jcol);
@@ -63,7 +64,7 @@ void SparseLUBase<Scalar,Index>::LU_pruneL(const int jcol, const IndexVector& pe
do_prune = false;
// Don't prune with a zero U-segment
- if (repfnz(irep) == IND_EMPTY) continue;
+ if (repfnz(irep) == emptyIdxLU) continue;
// If a snode overlaps with the next panel, then the U-segment
// is fragmented into two parts -- irep and irep1. We should let
@@ -97,9 +98,9 @@ void SparseLUBase<Scalar,Index>::LU_pruneL(const int jcol, const IndexVector& pe
while (kmin <= kmax)
{
- if (perm_r(glu.lsub(kmax)) == IND_EMPTY)
+ if (perm_r(glu.lsub(kmax)) == emptyIdxLU)
kmax--;
- else if ( perm_r(glu.lsub(kmin)) != IND_EMPTY)
+ else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU)
kmin++;
else
{
@@ -128,6 +129,7 @@ void SparseLUBase<Scalar,Index>::LU_pruneL(const int jcol, const IndexVector& pe
} // End for each U-segment
}
+} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_PRUNEL_H
diff --git a/Eigen/src/SparseLU/SparseLU_relax_snode.h b/Eigen/src/SparseLU/SparseLU_relax_snode.h
index 44b279878..f73afdf3d 100644
--- a/Eigen/src/SparseLU/SparseLU_relax_snode.h
+++ b/Eigen/src/SparseLU/SparseLU_relax_snode.h
@@ -29,6 +29,8 @@
#define SPARSELU_RELAX_SNODE_H
namespace Eigen {
+
+namespace internal {
/**
* \brief Identify the initial relaxed supernodes
@@ -42,12 +44,12 @@ namespace Eigen {
* \param relax_end last column in a supernode
*/
template <typename Scalar, typename Index>
-void SparseLUBase<Scalar,Index>::LU_relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end)
+void SparseLUImpl<Scalar,Index>::relax_snode (const int n, IndexVector& et, const int relax_columns, IndexVector& descendants, IndexVector& relax_end)
{
// compute the number of descendants of each node in the etree
int j, parent;
- relax_end.setConstant(IND_EMPTY);
+ relax_end.setConstant(emptyIdxLU);
descendants.setZero();
for (j = 0; j < n; j++)
{
@@ -75,6 +77,7 @@ void SparseLUBase<Scalar,Index>::LU_relax_snode (const int n, IndexVector& et, c
}
-} // end namespace Eigen
+} // end namespace internal
+} // end namespace Eigen
#endif