aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/SparseQR
diff options
context:
space:
mode:
authorGravatar Desire NUENTSA <desire.nuentsa_wakam@inria.fr>2013-02-15 16:35:28 +0100
committerGravatar Desire NUENTSA <desire.nuentsa_wakam@inria.fr>2013-02-15 16:35:28 +0100
commit1a056b408db75ef082b658ba0e5ff726a99018bb (patch)
tree9556d0cbe42b6dc5bbded1db4c161b80c19e757f /Eigen/src/SparseQR
parent9fd465ea2b414c5a6c10c07ee1f44cb2a2720fcf (diff)
Add a rank-revealing feature to sparse QR
Diffstat (limited to 'Eigen/src/SparseQR')
-rw-r--r--Eigen/src/SparseQR/SparseQR.h279
1 files changed, 179 insertions, 100 deletions
diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h
index 7fa3e54a5..be072e40b 100644
--- a/Eigen/src/SparseQR/SparseQR.h
+++ b/Eigen/src/SparseQR/SparseQR.h
@@ -3,8 +3,8 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
-// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -35,21 +35,22 @@ namespace internal {
/**
* \ingroup SparseQR_Module
* \class SparseQR
- * \brief Sparse left-looking QR factorization
+ * \brief Sparse left-looking rank-revealing QR factorization
*
- * This class is used to perform a left-looking QR decomposition
- * of sparse matrices. The result is then used to solve linear leasts_square systems.
- * Clearly, a QR factorization is returned such that A*P = Q*R where :
+ * This class is used to perform a left-looking rank-revealing QR decomposition
+ * of sparse matrices. When a column has a norm less than a given tolerance
+ * it is implicitly permuted to the end. The QR factorization thus obtained is
+ * given by A*P = Q*R where R is upper triangular or trapezoidal.
*
- * P is the column permutation. Use colsPermutation() to get it.
+ * P is the column permutation which is the product of the fill-reducing and the
+ * rank-revealing permutations. Use colsPermutation() to get it.
*
* Q is the orthogonal matrix represented as Householder reflectors.
* Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
* You can then apply it to a vector.
*
- * R is the sparse triangular factor. Use matrixR() to get it as SparseMatrix.
- *
- * \note This is not a rank-revealing QR decomposition.
+ * R is the sparse triangular or trapezoidal matrix. This occurs when A is rank-deficient.
+ * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.
*
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
* \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
@@ -71,10 +72,10 @@ class SparseQR
typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
public:
- SparseQR () : m_isInitialized(false),m_analysisIsok(false)
+ SparseQR () : m_isInitialized(false),m_analysisIsok(false),m_lastError(""),m_useDefaultThreshold(true)
{ }
- SparseQR(const MatrixType& mat) : m_isInitialized(false),m_analysisIsok(false)
+ SparseQR(const MatrixType& mat) : m_isInitialized(false),m_analysisIsok(false),m_lastError(""),m_useDefaultThreshold(true)
{
compute(mat);
}
@@ -96,7 +97,15 @@ class SparseQR
/** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
*/
- const MatrixType& matrixR() const { return m_R; }
+ const /*SparseTriangularView<MatrixType, Upper>*/MatrixType matrixR() const { return m_R; }
+ /** \returns the number of columns in the R factor
+ * \warning This is not the rank of the matrix. It is provided here only for compatibility
+ */
+ Index rank() const
+ {
+ eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+ return m_nonzeropivots;
+ }
/** \returns an expression of the matrix Q as products of sparse Householder reflectors.
* You can do the following to get an actual SparseMatrix representation of Q:
@@ -109,33 +118,52 @@ class SparseQR
/** \returns a const reference to the fill-in reducing permutation that was applied to the columns of A
*/
- const PermutationType& colsPermutation() const
+ const PermutationType colsPermutation() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
- return m_perm_c;
+ return m_outputPerm_c;
}
+ /**
+ * \returns A string describing the type of error
+ */
+ std::string lastErrorMessage() const
+ {
+ return m_lastError;
+ }
/** \internal */
template<typename Rhs, typename Dest>
bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const
{
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
- Index rank = this->matrixR().cols();
+
+ Index rank = this->rank();
// Compute Q^T * b;
- dest = this->matrixQ().transpose() * B;
+ Dest y,b;
+ y = this->matrixQ().transpose() * B;
+ b = y;
// Solve with the triangular matrix R
- Dest y;
- y = this->matrixR().template triangularView<Upper>().solve(dest.derived().topRows(rank));
-
+ y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
+ y.bottomRows(y.size()-rank).setZero();
+
// Apply the column permutation
- if (m_perm_c.size()) dest.topRows(rank) = colsPermutation().inverse() * y;
- else dest = y;
+ if (m_perm_c.size()) dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
+ else dest = y.topRows(cols());
m_info = Success;
return true;
}
+ /** Set the threshold that is used to determine the rank and the null Householder
+ * reflections. Precisely, if the norm of a householder reflection is below this
+ * threshold, the entire column is treated as zero.
+ */
+ void setThreshold(const RealScalar& threshold)
+ {
+ m_useDefaultThreshold = false;
+ m_threshold = threshold;
+ }
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
*
* \sa compute()
@@ -167,15 +195,19 @@ class SparseQR
bool m_analysisIsok;
bool m_factorizationIsok;
mutable ComputationInfo m_info;
+ std::string m_lastError;
QRMatrixType m_pmat; // Temporary matrix
QRMatrixType m_R; // The triangular factor matrix
QRMatrixType m_Q; // The orthogonal reflectors
ScalarVector m_hcoeffs; // The Householder coefficients
- PermutationType m_perm_c; // Column permutation
- PermutationType m_perm_r; // Column permutation
+ PermutationType m_perm_c; // Fill-reducing Column permutation
+ PermutationType m_pivotperm; // The permutation for rank revealing
+ PermutationType m_outputPerm_c; //The final column permutation
+ RealScalar m_threshold; // Threshold to determine null Householder reflections
+ bool m_useDefaultThreshold; // Use default threshold
+ Index m_nonzeropivots; // Number of non zero pivots found
IndexVector m_etree; // Column elimination tree
IndexVector m_firstRowElt; // First element in each row
- IndexVector m_found_diag_elem; // Existence of diagonal elements
template <typename, typename > friend struct SparseQR_QProduct;
};
@@ -194,21 +226,19 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
ord(mat, m_perm_c);
Index n = mat.cols();
Index m = mat.rows();
- // Permute the input matrix... only the column pointers are permuted
- // FIXME: directly send "m_perm.inverse() * mat" to coletree -> need an InnerIterator to the sparse-permutation-product expression.
- m_pmat = mat;
- m_pmat.uncompress();
- for (int i = 0; i < n; i++)
+
+ if (!m_perm_c.size())
{
- Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
- m_pmat.outerIndexPtr()[p] = mat.outerIndexPtr()[i];
- m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
+ m_perm_c.resize(n);
+ m_perm_c.indices().setLinSpaced(n, 0,n-1);
}
+
// Compute the column elimination tree of the permuted matrix
- internal::coletree(m_pmat, m_etree, m_firstRowElt);
+ m_outputPerm_c = m_perm_c.inverse();
+ internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
m_R.resize(n, n);
- m_Q.resize(m, m);
+ m_Q.resize(m, n);
// Allocate space for nonzero elements : rough estimation
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
m_Q.reserve(2*mat.nonZeros());
@@ -231,38 +261,58 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
Index n = mat.cols();
IndexVector mark(m); mark.setConstant(-1); // Record the visited nodes
IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
- Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
- Index pcol;
- ScalarVector tval(m); tval.setZero(); // Temporary vector
- IndexVector iperm(n);
+ Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
+ ScalarVector tval(m);
bool found_diag;
- if (m_perm_c.size())
- for(int i = 0; i < n; i++) iperm(m_perm_c.indices()(i)) = i;
- else
- iperm.setLinSpaced(n, 0, n-1);
-
- // Left looking QR factorization : Compute a column of R and Q at a time
+
+ m_pmat = mat;
+ m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
+ for (int i = 0; i < n; i++)
+ {
+ Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
+ m_pmat.outerIndexPtr()[p] = mat.outerIndexPtr()[i];
+ m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
+ }
+
+ // Compute the default threshold.
+ if(m_useDefaultThreshold)
+ {
+ RealScalar infNorm = 0.0;
+ for (int j = 0; j < n; j++)
+ {
+ //FIXME No support for mat.col(i).maxCoeff())
+ for(typename MatrixType::InnerIterator it(m_pmat, j); it; ++it)
+ infNorm = (std::max)(infNorm, (std::abs)(it.value()));
+ }
+ m_threshold = 20 * (m + n) * infNorm *std::numeric_limits<RealScalar>::epsilon();
+ }
+
+ m_pivotperm.resize(n);
+ m_pivotperm.indices().setLinSpaced(n, 0, n-1); // For rank-revealing
+
+ // Left looking rank-revealing QR factorization : Compute a column of R and Q at a time
+ Index rank = 0; // Record the number of valid pivots
for (Index col = 0; col < n; col++)
{
+ mark.setConstant(-1);
m_R.startVec(col);
m_Q.startVec(col);
- mark(col) = col;
- Qidx(0) = col;
+ mark(rank) = col;
+ Qidx(0) = rank;
nzcolR = 0; nzcolQ = 1;
- pcol = iperm(col);
- found_diag = false;
- // Find the nonzero locations of the column k of R,
- // i.e All the nodes (with indexes lower than k) reachable through the col etree rooted at node k
- for (typename MatrixType::InnerIterator itp(mat, pcol); itp || !found_diag; ++itp)
+ found_diag = false; tval.setZero();
+ // Symbolic factorization : Find the nonzero locations of the column k of the factors R and Q
+ // i.e All the nodes (with indexes lower than rank) reachable through the col etree rooted at node k
+ for (typename MatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
{
- Index curIdx = col;
+ Index curIdx = rank ;
if (itp) curIdx = itp.row();
- if(curIdx == col) found_diag = true;
+ if(curIdx == rank) found_diag = true;
// Get the nonzeros indexes of the current column of R
Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here
if (st < 0 )
{
- std::cerr << " Empty row found during Numerical factorization ... Abort \n";
+ m_lastError = " Empty row found during Numerical factorization ";
m_info = NumericalIssue;
return;
}
@@ -278,23 +328,22 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
Index nt = nzcolR-bi;
for(int i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));
- // Copy the current row value of mat
+ // Copy the current (curIdx,pcol) value of the input mat
if (itp) tval(curIdx) = itp.value();
else tval(curIdx) = Scalar(0.);
// Compute the pattern of Q(:,k)
- if (curIdx > col && mark(curIdx) < col)
+ if (curIdx > rank && mark(curIdx) != col )
{
Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q
mark(curIdx) = col; // And mark it as visited
nzcolQ++;
}
}
-
// Browse all the indexes of R(:,col) in reverse order
for (Index i = nzcolR-1; i >= 0; i--)
{
- Index curIdx = Ridx(i);
+ Index curIdx = m_pivotperm.indices()(Ridx(i));
// Apply the <curIdx> householder vector to tval
Scalar tdot(0.);
//First compute q'*tval
@@ -308,74 +357,103 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
{
tval(itq.row()) -= itq.value() * tdot;
}
- //With the topological ordering, updates for curIdx are fully done at this point
- m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
- tval(curIdx) = Scalar(0.);
-
// Detect fill-in for the current column of Q
- if(m_etree(curIdx) == col)
+ if((m_etree(Ridx(i)) == rank) )
{
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
{
Index iQ = itq.row();
- if (mark(iQ) < col)
+ if (mark(iQ) != col)
{
Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q
mark(iQ) = col; //And mark it as visited
}
}
}
- } // End update current column of R
-
- // Record the current (unscaled) column of V.
- for (Index itq = 0; itq < nzcolQ; ++itq)
- {
- Index iQ = Qidx(itq);
- m_Q.insertBackByOuterInnerUnordered(col,iQ) = tval(iQ);
- tval(iQ) = Scalar(0.);
- }
- // Compute the new Householder reflection
+ } // End update current column
+
+ // Compute the Householder reflection for the current column
RealScalar sqrNorm =0.;
Scalar tau; RealScalar beta;
- typename QRMatrixType::InnerIterator itq(m_Q, col);
- Scalar c0 = (itq) ? itq.value() : Scalar(0.);
+ Scalar c0 = (nzcolQ) ? tval(Qidx(0)) : Scalar(0.);
//First, the squared norm of Q((col+1):m, col)
- if(itq) ++itq;
- for (; itq; ++itq)
+ for (Index itq = 1; itq < nzcolQ; ++itq)
{
- sqrNorm += internal::abs2(itq.value());
+ sqrNorm += internal::abs2(tval(Qidx(itq)));
}
+
if(sqrNorm == RealScalar(0) && internal::imag(c0) == RealScalar(0))
{
tau = RealScalar(0);
beta = internal::real(c0);
- typename QRMatrixType::InnerIterator it(m_Q,col);
- it.valueRef() = 1; //FIXME A row permutation should be performed at this point
- }
+ tval(Qidx(0)) = 1;
+ }
else
{
beta = std::sqrt(internal::abs2(c0) + sqrNorm);
if(internal::real(c0) >= RealScalar(0))
beta = -beta;
- typename QRMatrixType::InnerIterator it(m_Q,col);
- it.valueRef() = 1;
- for (++it; it; ++it)
- {
- it.valueRef() /= (c0 - beta);
- }
+ tval(Qidx(0)) = 1;
+ for (Index itq = 1; itq < nzcolQ; ++itq)
+ tval(Qidx(itq)) /= (c0 - beta);
tau = internal::conj((beta-c0) / beta);
}
- m_hcoeffs(col) = tau;
- m_R.insertBackByOuterInnerUnordered(col, col) = beta;
+ // Insert values in R
+ for (Index i = nzcolR-1; i >= 0; i--)
+ {
+ Index curIdx = Ridx(i);
+ if(curIdx < rank)
+ {
+ m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
+ tval(curIdx) = Scalar(0.);
+ }
+ }
+ if(std::abs(beta) >= m_threshold) {
+ m_R.insertBackByOuterInner(col, rank) = beta;
+ rank++;
+ // The householder coefficient
+ m_hcoeffs(col) = tau;
+ /* Record the householder reflections */
+ for (Index itq = 0; itq < nzcolQ; ++itq)
+ {
+ Index iQ = Qidx(itq);
+ m_Q.insertBackByOuterInnerUnordered(col,iQ) = tval(iQ);
+ tval(iQ) = Scalar(0.);
+ }
+ } else {
+ // Zero pivot found : Move implicitly this column to the end
+ m_hcoeffs(col) = Scalar(0);
+ for (Index j = rank; j < n-1; j++)
+ std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
+ // Recompute the column elimination tree
+ internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
+ }
}
// Finalize the column pointers of the sparse matrices R and Q
- m_R.finalize(); m_R.makeCompressed();
m_Q.finalize(); m_Q.makeCompressed();
+ m_R.finalize();m_R.makeCompressed();
+
+ m_nonzeropivots = rank;
+
+ // Permute the triangular factor to put the 'dead' columns to the end
+ MatrixType tempR(m_R);
+ m_R = tempR * m_pivotperm;
+
+
+ // Compute the inverse permutation
+ IndexVector iperm(n);
+ for(int i = 0; i < n; i++) iperm(m_perm_c.indices()(i)) = i;
+ // Update the column permutation
+ m_outputPerm_c.resize(n);
+ for (Index j = 0; j < n; j++)
+ m_outputPerm_c.indices()(j) = iperm(m_pivotperm.indices()(j));
+
m_isInitialized = true;
m_factorizationIsok = true;
m_info = Success;
+
}
namespace internal {
@@ -404,14 +482,13 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
// Get the references
SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
m_qr(qr),m_other(other),m_transpose(transpose) {}
- inline Index rows() const { return m_transpose ? m_qr.rowsQ() : m_qr.cols(); }
+ inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }
inline Index cols() const { return m_other.cols(); }
// Assign to a vector
template<typename DesType>
void evalTo(DesType& res) const
{
- Index m = m_qr.rows();
Index n = m_qr.cols();
if (m_transpose)
{
@@ -420,11 +497,13 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
res = m_other;
for (Index k = 0; k < n; k++)
{
- Scalar tau;
- // Or alternatively
- tau = m_qr.m_Q.col(k).tail(m-k).dot(res.tail(m-k));
+ Scalar tau = Scalar(0);
+ tau = m_qr.m_Q.col(k).dot(res);
tau = tau * m_qr.m_hcoeffs(k);
- res -= tau * m_qr.m_Q.col(k);
+ for (typename MatrixType::InnerIterator itq(m_qr.m_Q, k); itq; ++itq)
+ {
+ res(itq.row()) -= itq.value() * tau;
+ }
}
}
else
@@ -434,8 +513,8 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
res = m_other;
for (Index k = n-1; k >=0; k--)
{
- Scalar tau;
- tau = m_qr.m_Q.col(k).tail(m-k).dot(res.tail(m-k));
+ Scalar tau = Scalar(0);
+ tau = m_qr.m_Q.col(k).dot(res);
tau = tau * m_qr.m_hcoeffs(k);
res -= tau * m_qr.m_Q.col(k);
}