From eb392960285c2645d45118d424786a73768ff50a Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 1 Sep 2014 18:16:20 +0200 Subject: Reafctoring in D&C SVD unsupported module: clean and merge the SVDBase class to Eigen/SVD, rm copy/pasted JacobiSVD.h file --- unsupported/Eigen/src/BDCSVD/BDCSVD.h | 949 ++++++++++++++++++++++++++ unsupported/Eigen/src/BDCSVD/CMakeLists.txt | 6 + unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt | 29 + unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt | 21 + unsupported/Eigen/src/CMakeLists.txt | 1 + unsupported/Eigen/src/SVD/BDCSVD.h | 937 ------------------------- unsupported/Eigen/src/SVD/CMakeLists.txt | 6 - unsupported/Eigen/src/SVD/JacobiSVD.h | 782 --------------------- unsupported/Eigen/src/SVD/SVDBase.h | 236 ------- unsupported/Eigen/src/SVD/TODOBdcsvd.txt | 29 - unsupported/Eigen/src/SVD/doneInBDCSVD.txt | 21 - 11 files changed, 1006 insertions(+), 2011 deletions(-) create mode 100644 unsupported/Eigen/src/BDCSVD/BDCSVD.h create mode 100644 unsupported/Eigen/src/BDCSVD/CMakeLists.txt create mode 100644 unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt create mode 100644 unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt delete mode 100644 unsupported/Eigen/src/SVD/BDCSVD.h delete mode 100644 unsupported/Eigen/src/SVD/CMakeLists.txt delete mode 100644 unsupported/Eigen/src/SVD/JacobiSVD.h delete mode 100644 unsupported/Eigen/src/SVD/SVDBase.h delete mode 100644 unsupported/Eigen/src/SVD/TODOBdcsvd.txt delete mode 100644 unsupported/Eigen/src/SVD/doneInBDCSVD.txt (limited to 'unsupported/Eigen/src') diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h new file mode 100644 index 000000000..a7c369633 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h @@ -0,0 +1,949 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" +// research report written by Ming Gu and Stanley C.Eisenstat +// The code variable names correspond to the names they used in their +// report +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// Copyright (C) 2013 Jitse Niesen +// +// Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BDCSVD_H +#define EIGEN_BDCSVD_H + +#define EPSILON 0.0000000000000001 + +#define ALGOSWAP 16 + +namespace Eigen { + +template class BDCSVD; + +namespace internal { + +template +struct traits > +{ + typedef _MatrixType MatrixType; +}; + +} // end namespace internal + + +/** \ingroup SVD_Module + * + * + * \class BDCSVD + * + * \brief class Bidiagonal Divide and Conquer SVD + * + * \param MatrixType the type of the matrix of which we are computing the SVD decomposition + * We plan to have a very similar interface to JacobiSVD on this class. + * It should be used to speed up the calcul of SVD for big matrices. + */ +template +class BDCSVD : public SVDBase > +{ + typedef SVDBase Base; + +public: + using Base::rows; + using Base::cols; + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options + }; + + typedef Matrix + MatrixUType; + typedef Matrix + MatrixVType; + typedef typename internal::plain_diag_type::type SingularValuesType; + typedef typename internal::plain_row_type::type RowType; + typedef typename internal::plain_col_type::type ColType; + typedef Matrix MatrixX; + typedef Matrix MatrixXr; + typedef Matrix VectorType; + typedef Array ArrayXr; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via BDCSVD::compute(const MatrixType&). + */ + BDCSVD() : algoswap(ALGOSWAP), m_numIters(0) + {} + + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem size. + * \sa BDCSVD() + */ + BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) + : algoswap(ALGOSWAP), m_numIters(0) + { + allocate(rows, cols, computationOptions); + } + + /** \brief Constructor performing the decomposition of given matrix. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) + : algoswap(ALGOSWAP), m_numIters(0) + { + compute(matrix, computationOptions); + } + + ~BDCSVD() + { + } + + /** \brief Method performing the decomposition of given matrix using custom options. + * + * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. + */ + BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + BDCSVD& compute(const MatrixType& matrix) + { + return compute(matrix, this->m_computationOptions); + } + + void setSwitchSize(int s) + { + eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); + algoswap = s; + } + + + /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. + * + * \param b the right - hand - side of the equation to solve. + * + * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. + * + * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving. + * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(this->m_isInitialized && "BDCSVD is not initialized."); + eigen_assert(computeU() && computeV() && + "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); + return internal::solve_retval(*this, b.derived()); + } + + + const MatrixUType& matrixU() const + { + eigen_assert(this->m_isInitialized && "SVD is not initialized."); + if (isTranspose){ + eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?"); + return this->m_matrixV; + } + else + { + eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); + return this->m_matrixU; + } + + } + + + const MatrixVType& matrixV() const + { + eigen_assert(this->m_isInitialized && "SVD is not initialized."); + if (isTranspose){ + eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?"); + return this->m_matrixU; + } + else + { + eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); + return this->m_matrixV; + } + } + + using Base::computeU; + using Base::computeV; + +private: + void allocate(Index rows, Index cols, unsigned int computationOptions); + void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); + void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); + void computeSingVals(const ArrayXr& col0, const ArrayXr& diag, VectorType& singVals, + ArrayXr& shifts, ArrayXr& mus); + void perturbCol0(const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat); + void computeSingVecs(const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V); + void deflation43(Index firstCol, Index shift, Index i, Index size); + void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); + void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); + void copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, + const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV); + +protected: + MatrixXr m_naiveU, m_naiveV; + MatrixXr m_computed; + Index nRec; + int algoswap; + bool isTranspose, compU, compV; + +public: + int m_numIters; +}; //end class BDCSVD + + +// Methode to allocate ans initialize matrix and attributs +template +void BDCSVD::allocate(Index rows, Index cols, unsigned int computationOptions) +{ + isTranspose = (cols > rows); + if (Base::allocate(rows, cols, computationOptions)) return; + m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize ); + if (isTranspose){ + compU = this->computeU(); + compV = this->computeV(); + } + else + { + compV = this->computeU(); + compU = this->computeV(); + } + if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 ); + else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 ); + + if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize); + + + //should be changed for a cleaner implementation + if (isTranspose){ + bool aux; + if (this->computeU()||this->computeV()){ + aux = this->m_computeFullU; + this->m_computeFullU = this->m_computeFullV; + this->m_computeFullV = aux; + aux = this->m_computeThinU; + this->m_computeThinU = this->m_computeThinV; + this->m_computeThinV = aux; + } + } +}// end allocate + +// Methode which compute the BDCSVD for the int +template<> +BDCSVD >& BDCSVD >::compute(const MatrixType& matrix, unsigned int computationOptions) { + allocate(matrix.rows(), matrix.cols(), computationOptions); + this->m_nonzeroSingularValues = 0; + m_computed = Matrix::Zero(rows(), cols()); + for (int i=0; im_diagSize; i++) { + this->m_singularValues.coeffRef(i) = 0; + } + if (this->m_computeFullU) this->m_matrixU = Matrix::Zero(rows(), rows()); + if (this->m_computeFullV) this->m_matrixV = Matrix::Zero(cols(), cols()); + this->m_isInitialized = true; + return *this; +} + + +// Methode which compute the BDCSVD +template +BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) +{ + allocate(matrix.rows(), matrix.cols(), computationOptions); + using std::abs; + + //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ; + MatrixType copy; + if (isTranspose) copy = matrix.adjoint(); + else copy = matrix; + + internal::UpperBidiagonalization bid(copy); + + //**** step 2 Divide + m_computed.topRows(this->m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); + m_computed.template bottomRows<1>().setZero(); + divide(0, this->m_diagSize - 1, 0, 0, 0); + + //**** step 3 copy + for (int i=0; im_diagSize; i++) { + RealScalar a = abs(m_computed.coeff(i, i)); + this->m_singularValues.coeffRef(i) = a; + if (a == 0){ + this->m_nonzeroSingularValues = i; + this->m_singularValues.tail(this->m_diagSize - i - 1).setZero(); + break; + } + else if (i == this->m_diagSize - 1) + { + this->m_nonzeroSingularValues = i + 1; + break; + } + } + copyUV(bid.householderU(), bid.householderV()); + this->m_isInitialized = true; + return *this; +}// end compute + + +template +void BDCSVD::copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, + const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV) +{ + // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa + if (this->computeU()){ + Index Ucols = this->m_computeThinU ? this->m_nonzeroSingularValues : householderU.cols(); + this->m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); + Index blockCols = this->m_computeThinU ? this->m_nonzeroSingularValues : this->m_diagSize; + this->m_matrixU.block(0, 0, this->m_diagSize, blockCols) = + m_naiveV.template cast().block(0, 0, this->m_diagSize, blockCols); + this->m_matrixU = householderU * this->m_matrixU; + } + if (this->computeV()){ + Index Vcols = this->m_computeThinV ? this->m_nonzeroSingularValues : householderV.cols(); + this->m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); + Index blockCols = this->m_computeThinV ? this->m_nonzeroSingularValues : this->m_diagSize; + this->m_matrixV.block(0, 0, this->m_diagSize, blockCols) = + m_naiveU.template cast().block(0, 0, this->m_diagSize, blockCols); + this->m_matrixV = householderV * this->m_matrixV; + } +} + +// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the +// place of the submatrix we are currently working on. + +//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; +//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; +// lastCol + 1 - firstCol is the size of the submatrix. +//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) +//@param firstRowW : Same as firstRowW with the column. +//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix +// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. +template +void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, + Index firstColW, Index shift) +{ + // requires nbRows = nbCols + 1; + using std::pow; + using std::sqrt; + using std::abs; + const Index n = lastCol - firstCol + 1; + const Index k = n/2; + RealScalar alphaK; + RealScalar betaK; + RealScalar r0; + RealScalar lambda, phi, c0, s0; + MatrixXr l, f; + // We use the other algorithm which is more efficient for small + // matrices. + if (n < algoswap){ + JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), + ComputeFullU | (ComputeFullV * compV)) ; + if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU(); + else + { + m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0); + m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n); + } + if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV(); + m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); + for (int i=0; i= firstCol; i--) + { + m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1); + } + // we shift q1 at the left with a factor c0 + m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0); + // last column = q1 * - s0 + m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) << + m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0; + // q2 *= c0 + m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; + } + else + { + RealScalar q1 = (m_naiveU(0, firstCol + k)); + // we shift Q1 to the right + for (Index i = firstCol + k - 1; i >= firstCol; i--) + { + m_naiveU(0, i + 1) = m_naiveU(0, i); + } + // we shift q1 at the left with a factor c0 + m_naiveU(0, firstCol) = (q1 * c0); + // last column = q1 * - s0 + m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); + // first column = q2 * s0 + m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; + // q2 *= c0 + m_naiveU(1, lastCol + 1) *= c0; + m_naiveU.row(1).segment(firstCol + 1, k).setZero(); + m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); + } + m_computed(firstCol + shift, firstCol + shift) = r0; + m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real(); + m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real(); + + + // Second part: try to deflate singular values in combined matrix + deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); + + // Third part: compute SVD of combined matrix + MatrixXr UofSVD, VofSVD; + VectorType singVals; + computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); + if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; + else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD; + if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; + m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); + m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; +}// end divide + +// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in +// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing +// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except +// that if compV is false, then V is not computed. Singular values are sorted in decreasing order. +// +// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better +// handling of round-off errors, be consistent in ordering +template +void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) +{ + // TODO Get rid of these copies (?) + ArrayXr col0 = m_computed.block(firstCol, firstCol, n, 1); + ArrayXr diag = m_computed.block(firstCol, firstCol, n, n).diagonal(); + diag(0) = 0; + + // compute singular values and vectors (in decreasing order) + singVals.resize(n); + U.resize(n+1, n+1); + if (compV) V.resize(n, n); + + if (col0.hasNaN() || diag.hasNaN()) return; + + ArrayXr shifts(n), mus(n), zhat(n); + computeSingVals(col0, diag, singVals, shifts, mus); + perturbCol0(col0, diag, singVals, shifts, mus, zhat); + computeSingVecs(zhat, diag, singVals, shifts, mus, U, V); + + // Reverse order so that singular values in increased order + singVals.reverseInPlace(); + U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval(); + if (compV) V = V.rowwise().reverse().eval(); +} + +template +void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& diag, + VectorType& singVals, ArrayXr& shifts, ArrayXr& mus) +{ + using std::abs; + using std::swap; + + Index n = col0.size(); + for (Index k = 0; k < n; ++k) { + if (col0(k) == 0) { + // entry is deflated, so singular value is on diagonal + singVals(k) = diag(k); + mus(k) = 0; + shifts(k) = diag(k); + continue; + } + + // otherwise, use secular equation to find singular value + RealScalar left = diag(k); + RealScalar right = (k != n-1) ? diag(k+1) : (diag(n-1) + col0.matrix().norm()); + + // first decide whether it's closer to the left end or the right end + RealScalar mid = left + (right-left) / 2; + RealScalar fMid = 1 + (col0.square() / ((diag + mid) * (diag - mid))).sum(); + + RealScalar shift; + if (k == n-1 || fMid > 0) shift = left; + else shift = right; + + // measure everything relative to shift + ArrayXr diagShifted = diag - shift; + + // initial guess + RealScalar muPrev, muCur; + if (shift == left) { + muPrev = (right - left) * 0.1; + if (k == n-1) muCur = right - left; + else muCur = (right - left) * 0.5; + } else { + muPrev = -(right - left) * 0.1; + muCur = -(right - left) * 0.5; + } + + RealScalar fPrev = 1 + (col0.square() / ((diagShifted - muPrev) * (diag + shift + muPrev))).sum(); + RealScalar fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); + if (abs(fPrev) < abs(fCur)) { + swap(fPrev, fCur); + swap(muPrev, muCur); + } + + // rational interpolation: fit a function of the form a / mu + b through the two previous + // iterates and use its zero to compute the next iterate + bool useBisection = false; + while (abs(muCur - muPrev) > 8 * NumTraits::epsilon() * (std::max)(abs(muCur), abs(muPrev)) && fCur != fPrev && !useBisection) { + ++m_numIters; + + RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev); + RealScalar b = fCur - a / muCur; + + muPrev = muCur; + fPrev = fCur; + muCur = -a / b; + fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); + + if (shift == left && (muCur < 0 || muCur > right - left)) useBisection = true; + if (shift == right && (muCur < -(right - left) || muCur > 0)) useBisection = true; + } + + // fall back on bisection method if rational interpolation did not work + if (useBisection) { + RealScalar leftShifted, rightShifted; + if (shift == left) { + leftShifted = 1e-30; + if (k == 0) rightShifted = right - left; + else rightShifted = (right - left) * 0.6; // theoretically we can take 0.5, but let's be safe + } else { + leftShifted = -(right - left) * 0.6; + rightShifted = -1e-30; + } + + RealScalar fLeft = 1 + (col0.square() / ((diagShifted - leftShifted) * (diag + shift + leftShifted))).sum(); + RealScalar fRight = 1 + (col0.square() / ((diagShifted - rightShifted) * (diag + shift + rightShifted))).sum(); + assert(fLeft * fRight < 0); + + while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * (std::max)(abs(leftShifted), abs(rightShifted))) { + RealScalar midShifted = (leftShifted + rightShifted) / 2; + RealScalar fMid = 1 + (col0.square() / ((diagShifted - midShifted) * (diag + shift + midShifted))).sum(); + if (fLeft * fMid < 0) { + rightShifted = midShifted; + fRight = fMid; + } else { + leftShifted = midShifted; + fLeft = fMid; + } + } + + muCur = (leftShifted + rightShifted) / 2; + } + + singVals[k] = shift + muCur; + shifts[k] = shift; + mus[k] = muCur; + + // perturb singular value slightly if it equals diagonal entry to avoid division by zero later + // (deflation is supposed to avoid this from happening) + if (singVals[k] == left) singVals[k] *= 1 + NumTraits::epsilon(); + if (singVals[k] == right) singVals[k] *= 1 - NumTraits::epsilon(); + } +} + + +// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) +template +void BDCSVD::perturbCol0 + (const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat) +{ + Index n = col0.size(); + for (Index k = 0; k < n; ++k) { + if (col0(k) == 0) + zhat(k) = 0; + else { + // see equation (3.6) + using std::sqrt; + RealScalar tmp = + sqrt( + (singVals(n-1) + diag(k)) * (mus(n-1) + (shifts(n-1) - diag(k))) + * ( + ((singVals.head(k).array() + diag(k)) * (mus.head(k) + (shifts.head(k) - diag(k)))) + / ((diag.head(k).array() + diag(k)) * (diag.head(k).array() - diag(k))) + ).prod() + * ( + ((singVals.segment(k, n-k-1).array() + diag(k)) * (mus.segment(k, n-k-1) + (shifts.segment(k, n-k-1) - diag(k)))) + / ((diag.tail(n-k-1) + diag(k)) * (diag.tail(n-k-1) - diag(k))) + ).prod() + ); + if (col0(k) > 0) zhat(k) = tmp; + else zhat(k) = -tmp; + } + } +} + +// compute singular vectors +template +void BDCSVD::computeSingVecs + (const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, + const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V) +{ + Index n = zhat.size(); + for (Index k = 0; k < n; ++k) { + if (zhat(k) == 0) { + U.col(k) = VectorType::Unit(n+1, k); + if (compV) V.col(k) = VectorType::Unit(n, k); + } else { + U.col(k).head(n) = zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k])); + U(n,k) = 0; + U.col(k).normalize(); + + if (compV) { + V.col(k).tail(n-1) = (diag * zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k]))).tail(n-1); + V(0,k) = -1; + V.col(k).normalize(); + } + } + } + U.col(n) = VectorType::Unit(n+1, n); +} + + +// page 12_13 +// i >= 1, di almost null and zi non null. +// We use a rotation to zero out zi applied to the left of M +template +void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size){ + using std::abs; + using std::sqrt; + using std::pow; + RealScalar c = m_computed(firstCol + shift, firstCol + shift); + RealScalar s = m_computed(i, firstCol + shift); + RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); + if (r == 0){ + m_computed(i, i)=0; + return; + } + c/=r; + s/=r; + m_computed(firstCol + shift, firstCol + shift) = r; + m_computed(i, firstCol + shift) = 0; + m_computed(i, i) = 0; + if (compU){ + m_naiveU.col(firstCol).segment(firstCol,size) = + c * m_naiveU.col(firstCol).segment(firstCol, size) - + s * m_naiveU.col(i).segment(firstCol, size) ; + + m_naiveU.col(i).segment(firstCol, size) = + (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) + + (s/c) * m_naiveU.col(firstCol).segment(firstCol,size); + } +}// end deflation 43 + + +// page 13 +// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M) +// We apply two rotations to have zj = 0; +template +void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){ + using std::abs; + using std::sqrt; + using std::conj; + using std::pow; + RealScalar c = m_computed(firstColm, firstColm + j - 1); + RealScalar s = m_computed(firstColm, firstColm + i - 1); + RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); + if (r==0){ + m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); + return; + } + c/=r; + s/=r; + m_computed(firstColm + i, firstColm) = r; + m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); + m_computed(firstColm + j, firstColm) = 0; + if (compU){ + m_naiveU.col(firstColu + i).segment(firstColu, size) = + c * m_naiveU.col(firstColu + i).segment(firstColu, size) - + s * m_naiveU.col(firstColu + j).segment(firstColu, size) ; + + m_naiveU.col(firstColu + j).segment(firstColu, size) = + (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) + + (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size); + } + if (compV){ + m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) = + c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) + + s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ; + + m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) = + (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) - + (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1); + } +}// end deflation 44 + + +// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] +template +void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){ + //condition 4.1 + using std::sqrt; + const Index length = lastCol + 1 - firstCol; + RealScalar norm1 = m_computed.block(firstCol+shift, firstCol+shift, length, 1).squaredNorm(); + RealScalar norm2 = m_computed.block(firstCol+shift, firstCol+shift, length, length).diagonal().squaredNorm(); + RealScalar EPS = 10 * NumTraits::epsilon() * sqrt(norm1 + norm2); + if (m_computed(firstCol + shift, firstCol + shift) < EPS){ + m_computed(firstCol + shift, firstCol + shift) = EPS; + } + + //condition 4.2 + for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){ + if (std::abs(m_computed(i, firstCol + shift)) < EPS){ + m_computed(i, firstCol + shift) = 0; + } + } + + //condition 4.3 + for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){ + if (m_computed(i, i) < EPS){ + deflation43(firstCol, shift, i, length); + } + } + + //condition 4.4 + + Index i=firstCol + shift + 1, j=firstCol + shift + k + 1; + //we stock the final place of each line + Index *permutation = new Index[length]; + + for (Index p =1; p < length; p++) { + if (i> firstCol + shift + k){ + permutation[p] = j; + j++; + } else if (j> lastCol + shift) + { + permutation[p] = i; + i++; + } + else + { + if (m_computed(i, i) < m_computed(j, j)){ + permutation[p] = j; + j++; + } + else + { + permutation[p] = i; + i++; + } + } + } + //we do the permutation + RealScalar aux; + //we stock the current index of each col + //and the column of each index + Index *realInd = new Index[length]; + Index *realCol = new Index[length]; + for (int pos = 0; pos< length; pos++){ + realCol[pos] = pos + firstCol + shift; + realInd[pos] = pos; + } + const Index Zero = firstCol + shift; + VectorType temp; + for (int i = 1; i < length - 1; i++){ + const Index I = i + Zero; + const Index realI = realInd[i]; + const Index j = permutation[length - i] - Zero; + const Index J = realCol[j]; + + //diag displace + aux = m_computed(I, I); + m_computed(I, I) = m_computed(J, J); + m_computed(J, J) = aux; + + //firstrow displace + aux = m_computed(I, Zero); + m_computed(I, Zero) = m_computed(J, Zero); + m_computed(J, Zero) = aux; + + // change columns + if (compU) { + temp = m_naiveU.col(I - shift).segment(firstCol, length + 1); + m_naiveU.col(I - shift).segment(firstCol, length + 1) << + m_naiveU.col(J - shift).segment(firstCol, length + 1); + m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp; + } + else + { + temp = m_naiveU.col(I - shift).segment(0, 2); + m_naiveU.col(I - shift).segment(0, 2) << + m_naiveU.col(J - shift).segment(0, 2); + m_naiveU.col(J - shift).segment(0, 2) << temp; + } + if (compV) { + const Index CWI = I + firstColW - Zero; + const Index CWJ = J + firstColW - Zero; + temp = m_naiveV.col(CWI).segment(firstRowW, length); + m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length); + m_naiveV.col(CWJ).segment(firstRowW, length) << temp; + } + + //update real pos + realCol[realI] = J; + realCol[j] = I; + realInd[J - Zero] = realI; + realInd[I - Zero] = j; + } + for (Index i = firstCol + shift + 1; i +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef BDCSVD<_MatrixType> BDCSVDType; + EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs) + + template void evalTo(Dest& dst) const + { + eigen_assert(rhs().rows() == dec().rows()); + // A = U S V^* + // So A^{ - 1} = V S^{ - 1} U^* + Index diagSize = (std::min)(dec().rows(), dec().cols()); + typename BDCSVDType::SingularValuesType invertedSingVals(diagSize); + Index nonzeroSingVals = dec().nonzeroSingularValues(); + invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); + invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); + + dst = dec().matrixV().leftCols(diagSize) + * invertedSingVals.asDiagonal() + * dec().matrixU().leftCols(diagSize).adjoint() + * rhs(); + return; + } +}; + +} //end namespace internal + + /** \svd_module + * + * \return the singular value decomposition of \c *this computed by + * BDC Algorithm + * + * \sa class BDCSVD + */ +/* +template +BDCSVD::PlainObject> +MatrixBase::bdcSvd(unsigned int computationOptions) const +{ + return BDCSVD(*this, computationOptions); +} +*/ + +} // end namespace Eigen + +#endif diff --git a/unsupported/Eigen/src/BDCSVD/CMakeLists.txt b/unsupported/Eigen/src/BDCSVD/CMakeLists.txt new file mode 100644 index 000000000..73b89ea18 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_BDCSVD_SRCS "*.h") + +INSTALL(FILES + ${Eigen_BDCSVD_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/BDCSVD COMPONENT Devel + ) diff --git a/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt b/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt new file mode 100644 index 000000000..0bc9a46e6 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt @@ -0,0 +1,29 @@ +TO DO LIST + + + +(optional optimization) - do all the allocations in the allocate part + - support static matrices + - return a error at compilation time when using integer matrices (int, long, std::complex, ...) + +to finish the algorithm : + -implement the last part of the algorithm as described on the reference paper. + You may find more information on that part on this paper + + -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to + deflation. + +(suggested step by step resolution) + 0) comment the call to Jacobi in the last part of the divide method and everything right after + until the end of the method. What is commented can be a guideline to steps 3) 4) and 6) + 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation + wich should be uncommented in the divide method + 2) remember the values of the singular values that are already computed (zi=0) + 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) ) + in decreasing order + 4) set the firstcol to zero (except the first element) in m_computed + 5) compute all the singular vectors when CompV is set to true and only the left vectors when + CompV is set to false + 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to + false, /!\ if CompU is false NaiveU has only 2 rows + 7) delete everything commented in step 0) diff --git a/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt b/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt new file mode 100644 index 000000000..8563ddab8 --- /dev/null +++ b/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt @@ -0,0 +1,21 @@ +This unsupported package is about a divide and conquer algorithm to compute SVD. + +The implementation follows as closely as possible the following reference paper : +http://www.cs.yale.edu/publications/techreports/tr933.pdf + +The code documentation uses the same names for variables as the reference paper. The code, deflation included, is +working but there are a few things that could be optimised as explained in the TODOBdsvd. + +In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call +of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented. + +In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it. + +The implemented has trouble with fixed size matrices. + +In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix. + + +Paper for the third part: +http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf + diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt index 8eb2808e3..654a2327f 100644 --- a/unsupported/Eigen/src/CMakeLists.txt +++ b/unsupported/Eigen/src/CMakeLists.txt @@ -12,3 +12,4 @@ ADD_SUBDIRECTORY(Skyline) ADD_SUBDIRECTORY(SparseExtra) ADD_SUBDIRECTORY(KroneckerProduct) ADD_SUBDIRECTORY(Splines) +ADD_SUBDIRECTORY(BDCSVD) diff --git a/unsupported/Eigen/src/SVD/BDCSVD.h b/unsupported/Eigen/src/SVD/BDCSVD.h deleted file mode 100644 index 80006fd60..000000000 --- a/unsupported/Eigen/src/SVD/BDCSVD.h +++ /dev/null @@ -1,937 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD" -// research report written by Ming Gu and Stanley C.Eisenstat -// The code variable names correspond to the names they used in their -// report -// -// Copyright (C) 2013 Gauthier Brun -// Copyright (C) 2013 Nicolas Carre -// Copyright (C) 2013 Jean Ceccato -// Copyright (C) 2013 Pierre Zoppitelli -// Copyright (C) 2013 Jitse Niesen -// -// Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_BDCSVD_H -#define EIGEN_BDCSVD_H - -#define EPSILON 0.0000000000000001 - -#define ALGOSWAP 16 - -namespace Eigen { -/** \ingroup SVD_Module - * - * - * \class BDCSVD - * - * \brief class Bidiagonal Divide and Conquer SVD - * - * \param MatrixType the type of the matrix of which we are computing the SVD decomposition - * We plan to have a very similar interface to JacobiSVD on this class. - * It should be used to speed up the calcul of SVD for big matrices. - */ -template -class BDCSVD : public SVDBase<_MatrixType> -{ - typedef SVDBase<_MatrixType> Base; - -public: - using Base::rows; - using Base::cols; - - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), - MatrixOptions = MatrixType::Options - }; - - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; - typedef typename internal::plain_row_type::type RowType; - typedef typename internal::plain_col_type::type ColType; - typedef Matrix MatrixX; - typedef Matrix MatrixXr; - typedef Matrix VectorType; - typedef Array ArrayXr; - - /** \brief Default Constructor. - * - * The default constructor is useful in cases in which the user intends to - * perform decompositions via BDCSVD::compute(const MatrixType&). - */ - BDCSVD() - : SVDBase<_MatrixType>::SVDBase(), - algoswap(ALGOSWAP), m_numIters(0) - {} - - - /** \brief Default Constructor with memory preallocation - * - * Like the default constructor but with preallocation of the internal data - * according to the specified problem size. - * \sa BDCSVD() - */ - BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase(), - algoswap(ALGOSWAP), m_numIters(0) - { - allocate(rows, cols, computationOptions); - } - - /** \brief Constructor performing the decomposition of given matrix. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non - default) FullPivHouseholderQR preconditioner. - */ - BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase(), - algoswap(ALGOSWAP), m_numIters(0) - { - compute(matrix, computationOptions); - } - - ~BDCSVD() - { - } - /** \brief Method performing the decomposition of given matrix using custom options. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non - default) FullPivHouseholderQR preconditioner. - */ - SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); - - /** \brief Method performing the decomposition of given matrix using current options. - * - * \param matrix the matrix to decompose - * - * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). - */ - SVDBase& compute(const MatrixType& matrix) - { - return compute(matrix, this->m_computationOptions); - } - - void setSwitchSize(int s) - { - eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3"); - algoswap = s; - } - - - /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. - * - * \param b the right - hand - side of the equation to solve. - * - * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. - * - * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving. - * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(this->m_isInitialized && "BDCSVD is not initialized."); - eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() && - "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); - return internal::solve_retval(*this, b.derived()); - } - - - const MatrixUType& matrixU() const - { - eigen_assert(this->m_isInitialized && "SVD is not initialized."); - if (isTranspose){ - eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return this->m_matrixV; - } - else - { - eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return this->m_matrixU; - } - - } - - - const MatrixVType& matrixV() const - { - eigen_assert(this->m_isInitialized && "SVD is not initialized."); - if (isTranspose){ - eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return this->m_matrixU; - } - else - { - eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return this->m_matrixV; - } - } - -private: - void allocate(Index rows, Index cols, unsigned int computationOptions); - void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); - void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); - void computeSingVals(const ArrayXr& col0, const ArrayXr& diag, VectorType& singVals, - ArrayXr& shifts, ArrayXr& mus); - void perturbCol0(const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat); - void computeSingVecs(const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V); - void deflation43(Index firstCol, Index shift, Index i, Index size); - void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size); - void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift); - void copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, - const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV); - -protected: - MatrixXr m_naiveU, m_naiveV; - MatrixXr m_computed; - Index nRec; - int algoswap; - bool isTranspose, compU, compV; - -public: - int m_numIters; -}; //end class BDCSVD - - -// Methode to allocate ans initialize matrix and attributs -template -void BDCSVD::allocate(Index rows, Index cols, unsigned int computationOptions) -{ - isTranspose = (cols > rows); - if (SVDBase::allocate(rows, cols, computationOptions)) return; - m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize ); - if (isTranspose){ - compU = this->computeU(); - compV = this->computeV(); - } - else - { - compV = this->computeU(); - compU = this->computeV(); - } - if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 ); - else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 ); - - if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize); - - - //should be changed for a cleaner implementation - if (isTranspose){ - bool aux; - if (this->computeU()||this->computeV()){ - aux = this->m_computeFullU; - this->m_computeFullU = this->m_computeFullV; - this->m_computeFullV = aux; - aux = this->m_computeThinU; - this->m_computeThinU = this->m_computeThinV; - this->m_computeThinV = aux; - } - } -}// end allocate - -// Methode which compute the BDCSVD for the int -template<> -SVDBase >& -BDCSVD >::compute(const MatrixType& matrix, unsigned int computationOptions) { - allocate(matrix.rows(), matrix.cols(), computationOptions); - this->m_nonzeroSingularValues = 0; - m_computed = Matrix::Zero(rows(), cols()); - for (int i=0; im_diagSize; i++) { - this->m_singularValues.coeffRef(i) = 0; - } - if (this->m_computeFullU) this->m_matrixU = Matrix::Zero(rows(), rows()); - if (this->m_computeFullV) this->m_matrixV = Matrix::Zero(cols(), cols()); - this->m_isInitialized = true; - return *this; -} - - -// Methode which compute the BDCSVD -template -SVDBase& -BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) -{ - allocate(matrix.rows(), matrix.cols(), computationOptions); - using std::abs; - - //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ; - MatrixType copy; - if (isTranspose) copy = matrix.adjoint(); - else copy = matrix; - - internal::UpperBidiagonalization bid(copy); - - //**** step 2 Divide - m_computed.topRows(this->m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose(); - m_computed.template bottomRows<1>().setZero(); - divide(0, this->m_diagSize - 1, 0, 0, 0); - - //**** step 3 copy - for (int i=0; im_diagSize; i++) { - RealScalar a = abs(m_computed.coeff(i, i)); - this->m_singularValues.coeffRef(i) = a; - if (a == 0){ - this->m_nonzeroSingularValues = i; - this->m_singularValues.tail(this->m_diagSize - i - 1).setZero(); - break; - } - else if (i == this->m_diagSize - 1) - { - this->m_nonzeroSingularValues = i + 1; - break; - } - } - copyUV(bid.householderU(), bid.householderV()); - this->m_isInitialized = true; - return *this; -}// end compute - - -template -void BDCSVD::copyUV(const typename internal::UpperBidiagonalization::HouseholderUSequenceType& householderU, - const typename internal::UpperBidiagonalization::HouseholderVSequenceType& householderV) -{ - // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa - if (this->computeU()){ - Index Ucols = this->m_computeThinU ? this->m_nonzeroSingularValues : householderU.cols(); - this->m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); - Index blockCols = this->m_computeThinU ? this->m_nonzeroSingularValues : this->m_diagSize; - this->m_matrixU.block(0, 0, this->m_diagSize, blockCols) = - m_naiveV.template cast().block(0, 0, this->m_diagSize, blockCols); - this->m_matrixU = householderU * this->m_matrixU; - } - if (this->computeV()){ - Index Vcols = this->m_computeThinV ? this->m_nonzeroSingularValues : householderV.cols(); - this->m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); - Index blockCols = this->m_computeThinV ? this->m_nonzeroSingularValues : this->m_diagSize; - this->m_matrixV.block(0, 0, this->m_diagSize, blockCols) = - m_naiveU.template cast().block(0, 0, this->m_diagSize, blockCols); - this->m_matrixV = householderV * this->m_matrixV; - } -} - -// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the -// place of the submatrix we are currently working on. - -//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; -//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; -// lastCol + 1 - firstCol is the size of the submatrix. -//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W) -//@param firstRowW : Same as firstRowW with the column. -//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix -// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. -template -void BDCSVD::divide (Index firstCol, Index lastCol, Index firstRowW, - Index firstColW, Index shift) -{ - // requires nbRows = nbCols + 1; - using std::pow; - using std::sqrt; - using std::abs; - const Index n = lastCol - firstCol + 1; - const Index k = n/2; - RealScalar alphaK; - RealScalar betaK; - RealScalar r0; - RealScalar lambda, phi, c0, s0; - MatrixXr l, f; - // We use the other algorithm which is more efficient for small - // matrices. - if (n < algoswap){ - JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), - ComputeFullU | (ComputeFullV * compV)) ; - if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU(); - else - { - m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0); - m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n); - } - if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV(); - m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); - for (int i=0; i= firstCol; i--) - { - m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1); - } - // we shift q1 at the left with a factor c0 - m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0); - // last column = q1 * - s0 - m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0)); - // first column = q2 * s0 - m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) << - m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0; - // q2 *= c0 - m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; - } - else - { - RealScalar q1 = (m_naiveU(0, firstCol + k)); - // we shift Q1 to the right - for (Index i = firstCol + k - 1; i >= firstCol; i--) - { - m_naiveU(0, i + 1) = m_naiveU(0, i); - } - // we shift q1 at the left with a factor c0 - m_naiveU(0, firstCol) = (q1 * c0); - // last column = q1 * - s0 - m_naiveU(0, lastCol + 1) = (q1 * ( - s0)); - // first column = q2 * s0 - m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; - // q2 *= c0 - m_naiveU(1, lastCol + 1) *= c0; - m_naiveU.row(1).segment(firstCol + 1, k).setZero(); - m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero(); - } - m_computed(firstCol + shift, firstCol + shift) = r0; - m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real(); - m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real(); - - - // Second part: try to deflate singular values in combined matrix - deflation(firstCol, lastCol, k, firstRowW, firstColW, shift); - - // Third part: compute SVD of combined matrix - MatrixXr UofSVD, VofSVD; - VectorType singVals; - computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD); - if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD; - else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD; - if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD; - m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero(); - m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals; -}// end divide - -// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in -// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing -// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except -// that if compV is false, then V is not computed. Singular values are sorted in decreasing order. -// -// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better -// handling of round-off errors, be consistent in ordering -template -void BDCSVD::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) -{ - // TODO Get rid of these copies (?) - ArrayXr col0 = m_computed.block(firstCol, firstCol, n, 1); - ArrayXr diag = m_computed.block(firstCol, firstCol, n, n).diagonal(); - diag(0) = 0; - - // compute singular values and vectors (in decreasing order) - singVals.resize(n); - U.resize(n+1, n+1); - if (compV) V.resize(n, n); - - if (col0.hasNaN() || diag.hasNaN()) return; - - ArrayXr shifts(n), mus(n), zhat(n); - computeSingVals(col0, diag, singVals, shifts, mus); - perturbCol0(col0, diag, singVals, shifts, mus, zhat); - computeSingVecs(zhat, diag, singVals, shifts, mus, U, V); - - // Reverse order so that singular values in increased order - singVals.reverseInPlace(); - U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval(); - if (compV) V = V.rowwise().reverse().eval(); -} - -template -void BDCSVD::computeSingVals(const ArrayXr& col0, const ArrayXr& diag, - VectorType& singVals, ArrayXr& shifts, ArrayXr& mus) -{ - using std::abs; - using std::swap; - - Index n = col0.size(); - for (Index k = 0; k < n; ++k) { - if (col0(k) == 0) { - // entry is deflated, so singular value is on diagonal - singVals(k) = diag(k); - mus(k) = 0; - shifts(k) = diag(k); - continue; - } - - // otherwise, use secular equation to find singular value - RealScalar left = diag(k); - RealScalar right = (k != n-1) ? diag(k+1) : (diag(n-1) + col0.matrix().norm()); - - // first decide whether it's closer to the left end or the right end - RealScalar mid = left + (right-left) / 2; - RealScalar fMid = 1 + (col0.square() / ((diag + mid) * (diag - mid))).sum(); - - RealScalar shift; - if (k == n-1 || fMid > 0) shift = left; - else shift = right; - - // measure everything relative to shift - ArrayXr diagShifted = diag - shift; - - // initial guess - RealScalar muPrev, muCur; - if (shift == left) { - muPrev = (right - left) * 0.1; - if (k == n-1) muCur = right - left; - else muCur = (right - left) * 0.5; - } else { - muPrev = -(right - left) * 0.1; - muCur = -(right - left) * 0.5; - } - - RealScalar fPrev = 1 + (col0.square() / ((diagShifted - muPrev) * (diag + shift + muPrev))).sum(); - RealScalar fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); - if (abs(fPrev) < abs(fCur)) { - swap(fPrev, fCur); - swap(muPrev, muCur); - } - - // rational interpolation: fit a function of the form a / mu + b through the two previous - // iterates and use its zero to compute the next iterate - bool useBisection = false; - while (abs(muCur - muPrev) > 8 * NumTraits::epsilon() * (std::max)(abs(muCur), abs(muPrev)) && fCur != fPrev && !useBisection) { - ++m_numIters; - - RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev); - RealScalar b = fCur - a / muCur; - - muPrev = muCur; - fPrev = fCur; - muCur = -a / b; - fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum(); - - if (shift == left && (muCur < 0 || muCur > right - left)) useBisection = true; - if (shift == right && (muCur < -(right - left) || muCur > 0)) useBisection = true; - } - - // fall back on bisection method if rational interpolation did not work - if (useBisection) { - RealScalar leftShifted, rightShifted; - if (shift == left) { - leftShifted = 1e-30; - if (k == 0) rightShifted = right - left; - else rightShifted = (right - left) * 0.6; // theoretically we can take 0.5, but let's be safe - } else { - leftShifted = -(right - left) * 0.6; - rightShifted = -1e-30; - } - - RealScalar fLeft = 1 + (col0.square() / ((diagShifted - leftShifted) * (diag + shift + leftShifted))).sum(); - RealScalar fRight = 1 + (col0.square() / ((diagShifted - rightShifted) * (diag + shift + rightShifted))).sum(); - assert(fLeft * fRight < 0); - - while (rightShifted - leftShifted > 2 * NumTraits::epsilon() * (std::max)(abs(leftShifted), abs(rightShifted))) { - RealScalar midShifted = (leftShifted + rightShifted) / 2; - RealScalar fMid = 1 + (col0.square() / ((diagShifted - midShifted) * (diag + shift + midShifted))).sum(); - if (fLeft * fMid < 0) { - rightShifted = midShifted; - fRight = fMid; - } else { - leftShifted = midShifted; - fLeft = fMid; - } - } - - muCur = (leftShifted + rightShifted) / 2; - } - - singVals[k] = shift + muCur; - shifts[k] = shift; - mus[k] = muCur; - - // perturb singular value slightly if it equals diagonal entry to avoid division by zero later - // (deflation is supposed to avoid this from happening) - if (singVals[k] == left) singVals[k] *= 1 + NumTraits::epsilon(); - if (singVals[k] == right) singVals[k] *= 1 - NumTraits::epsilon(); - } -} - - -// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) -template -void BDCSVD::perturbCol0 - (const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat) -{ - Index n = col0.size(); - for (Index k = 0; k < n; ++k) { - if (col0(k) == 0) - zhat(k) = 0; - else { - // see equation (3.6) - using std::sqrt; - RealScalar tmp = - sqrt( - (singVals(n-1) + diag(k)) * (mus(n-1) + (shifts(n-1) - diag(k))) - * ( - ((singVals.head(k).array() + diag(k)) * (mus.head(k) + (shifts.head(k) - diag(k)))) - / ((diag.head(k).array() + diag(k)) * (diag.head(k).array() - diag(k))) - ).prod() - * ( - ((singVals.segment(k, n-k-1).array() + diag(k)) * (mus.segment(k, n-k-1) + (shifts.segment(k, n-k-1) - diag(k)))) - / ((diag.tail(n-k-1) + diag(k)) * (diag.tail(n-k-1) - diag(k))) - ).prod() - ); - if (col0(k) > 0) zhat(k) = tmp; - else zhat(k) = -tmp; - } - } -} - -// compute singular vectors -template -void BDCSVD::computeSingVecs - (const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals, - const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V) -{ - Index n = zhat.size(); - for (Index k = 0; k < n; ++k) { - if (zhat(k) == 0) { - U.col(k) = VectorType::Unit(n+1, k); - if (compV) V.col(k) = VectorType::Unit(n, k); - } else { - U.col(k).head(n) = zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k])); - U(n,k) = 0; - U.col(k).normalize(); - - if (compV) { - V.col(k).tail(n-1) = (diag * zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k]))).tail(n-1); - V(0,k) = -1; - V.col(k).normalize(); - } - } - } - U.col(n) = VectorType::Unit(n+1, n); -} - - -// page 12_13 -// i >= 1, di almost null and zi non null. -// We use a rotation to zero out zi applied to the left of M -template -void BDCSVD::deflation43(Index firstCol, Index shift, Index i, Index size){ - using std::abs; - using std::sqrt; - using std::pow; - RealScalar c = m_computed(firstCol + shift, firstCol + shift); - RealScalar s = m_computed(i, firstCol + shift); - RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); - if (r == 0){ - m_computed(i, i)=0; - return; - } - c/=r; - s/=r; - m_computed(firstCol + shift, firstCol + shift) = r; - m_computed(i, firstCol + shift) = 0; - m_computed(i, i) = 0; - if (compU){ - m_naiveU.col(firstCol).segment(firstCol,size) = - c * m_naiveU.col(firstCol).segment(firstCol, size) - - s * m_naiveU.col(i).segment(firstCol, size) ; - - m_naiveU.col(i).segment(firstCol, size) = - (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) + - (s/c) * m_naiveU.col(firstCol).segment(firstCol,size); - } -}// end deflation 43 - - -// page 13 -// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M) -// We apply two rotations to have zj = 0; -template -void BDCSVD::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){ - using std::abs; - using std::sqrt; - using std::conj; - using std::pow; - RealScalar c = m_computed(firstColm, firstColm + j - 1); - RealScalar s = m_computed(firstColm, firstColm + i - 1); - RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2)); - if (r==0){ - m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); - return; - } - c/=r; - s/=r; - m_computed(firstColm + i, firstColm) = r; - m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j); - m_computed(firstColm + j, firstColm) = 0; - if (compU){ - m_naiveU.col(firstColu + i).segment(firstColu, size) = - c * m_naiveU.col(firstColu + i).segment(firstColu, size) - - s * m_naiveU.col(firstColu + j).segment(firstColu, size) ; - - m_naiveU.col(firstColu + j).segment(firstColu, size) = - (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) + - (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size); - } - if (compV){ - m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) = - c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) + - s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ; - - m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) = - (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) - - (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1); - } -}// end deflation 44 - - -// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] -template -void BDCSVD::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){ - //condition 4.1 - using std::sqrt; - const Index length = lastCol + 1 - firstCol; - RealScalar norm1 = m_computed.block(firstCol+shift, firstCol+shift, length, 1).squaredNorm(); - RealScalar norm2 = m_computed.block(firstCol+shift, firstCol+shift, length, length).diagonal().squaredNorm(); - RealScalar EPS = 10 * NumTraits::epsilon() * sqrt(norm1 + norm2); - if (m_computed(firstCol + shift, firstCol + shift) < EPS){ - m_computed(firstCol + shift, firstCol + shift) = EPS; - } - - //condition 4.2 - for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){ - if (std::abs(m_computed(i, firstCol + shift)) < EPS){ - m_computed(i, firstCol + shift) = 0; - } - } - - //condition 4.3 - for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){ - if (m_computed(i, i) < EPS){ - deflation43(firstCol, shift, i, length); - } - } - - //condition 4.4 - - Index i=firstCol + shift + 1, j=firstCol + shift + k + 1; - //we stock the final place of each line - Index *permutation = new Index[length]; - - for (Index p =1; p < length; p++) { - if (i> firstCol + shift + k){ - permutation[p] = j; - j++; - } else if (j> lastCol + shift) - { - permutation[p] = i; - i++; - } - else - { - if (m_computed(i, i) < m_computed(j, j)){ - permutation[p] = j; - j++; - } - else - { - permutation[p] = i; - i++; - } - } - } - //we do the permutation - RealScalar aux; - //we stock the current index of each col - //and the column of each index - Index *realInd = new Index[length]; - Index *realCol = new Index[length]; - for (int pos = 0; pos< length; pos++){ - realCol[pos] = pos + firstCol + shift; - realInd[pos] = pos; - } - const Index Zero = firstCol + shift; - VectorType temp; - for (int i = 1; i < length - 1; i++){ - const Index I = i + Zero; - const Index realI = realInd[i]; - const Index j = permutation[length - i] - Zero; - const Index J = realCol[j]; - - //diag displace - aux = m_computed(I, I); - m_computed(I, I) = m_computed(J, J); - m_computed(J, J) = aux; - - //firstrow displace - aux = m_computed(I, Zero); - m_computed(I, Zero) = m_computed(J, Zero); - m_computed(J, Zero) = aux; - - // change columns - if (compU) { - temp = m_naiveU.col(I - shift).segment(firstCol, length + 1); - m_naiveU.col(I - shift).segment(firstCol, length + 1) << - m_naiveU.col(J - shift).segment(firstCol, length + 1); - m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp; - } - else - { - temp = m_naiveU.col(I - shift).segment(0, 2); - m_naiveU.col(I - shift).segment(0, 2) << - m_naiveU.col(J - shift).segment(0, 2); - m_naiveU.col(J - shift).segment(0, 2) << temp; - } - if (compV) { - const Index CWI = I + firstColW - Zero; - const Index CWJ = J + firstColW - Zero; - temp = m_naiveV.col(CWI).segment(firstRowW, length); - m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length); - m_naiveV.col(CWJ).segment(firstRowW, length) << temp; - } - - //update real pos - realCol[realI] = J; - realCol[j] = I; - realInd[J - Zero] = realI; - realInd[I - Zero] = j; - } - for (Index i = firstCol + shift + 1; i -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef BDCSVD<_MatrixType> BDCSVDType; - EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs) - - template void evalTo(Dest& dst) const - { - eigen_assert(rhs().rows() == dec().rows()); - // A = U S V^* - // So A^{ - 1} = V S^{ - 1} U^* - Index diagSize = (std::min)(dec().rows(), dec().cols()); - typename BDCSVDType::SingularValuesType invertedSingVals(diagSize); - Index nonzeroSingVals = dec().nonzeroSingularValues(); - invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); - invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); - - dst = dec().matrixV().leftCols(diagSize) - * invertedSingVals.asDiagonal() - * dec().matrixU().leftCols(diagSize).adjoint() - * rhs(); - return; - } -}; - -} //end namespace internal - - /** \svd_module - * - * \return the singular value decomposition of \c *this computed by - * BDC Algorithm - * - * \sa class BDCSVD - */ -/* -template -BDCSVD::PlainObject> -MatrixBase::bdcSvd(unsigned int computationOptions) const -{ - return BDCSVD(*this, computationOptions); -} -*/ - -} // end namespace Eigen - -#endif diff --git a/unsupported/Eigen/src/SVD/CMakeLists.txt b/unsupported/Eigen/src/SVD/CMakeLists.txt deleted file mode 100644 index b40baf092..000000000 --- a/unsupported/Eigen/src/SVD/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -FILE(GLOB Eigen_SVD_SRCS "*.h") - -INSTALL(FILES - ${Eigen_SVD_SRCS} - DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/SVD COMPONENT Devel - ) diff --git a/unsupported/Eigen/src/SVD/JacobiSVD.h b/unsupported/Eigen/src/SVD/JacobiSVD.h deleted file mode 100644 index 02fac409e..000000000 --- a/unsupported/Eigen/src/SVD/JacobiSVD.h +++ /dev/null @@ -1,782 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2009-2010 Benoit Jacob -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_JACOBISVD_H -#define EIGEN_JACOBISVD_H - -namespace Eigen { - -namespace internal { -// forward declaration (needed by ICC) -// the empty body is required by MSVC -template::IsComplex> -struct svd_precondition_2x2_block_to_be_real {}; - -/*** QR preconditioners (R-SVD) - *** - *** Their role is to reduce the problem of computing the SVD to the case of a square matrix. - *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for - *** JacobiSVD which by itself is only able to work on square matrices. - ***/ - -enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }; - -template -struct qr_preconditioner_should_do_anything -{ - enum { a = MatrixType::RowsAtCompileTime != Dynamic && - MatrixType::ColsAtCompileTime != Dynamic && - MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime, - b = MatrixType::RowsAtCompileTime != Dynamic && - MatrixType::ColsAtCompileTime != Dynamic && - MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime, - ret = !( (QRPreconditioner == NoQRPreconditioner) || - (Case == PreconditionIfMoreColsThanRows && bool(a)) || - (Case == PreconditionIfMoreRowsThanCols && bool(b)) ) - }; -}; - -template::ret -> struct qr_preconditioner_impl {}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - void allocate(const JacobiSVD&) {} - bool run(JacobiSVD&, const MatrixType&) - { - return false; - } -}; - -/*** preconditioner using FullPivHouseholderQR ***/ - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime - }; - typedef Matrix WorkspaceType; - - void allocate(const JacobiSVD& svd) - { - if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.rows(), svd.cols()); - } - if (svd.m_computeFullU) m_workspace.resize(svd.rows()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.rows() > matrix.cols()) - { - m_qr.compute(matrix); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); - if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); - return true; - } - return false; - } -private: - typedef FullPivHouseholderQR QRType; - QRType m_qr; - WorkspaceType m_workspace; -}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - Options = MatrixType::Options - }; - typedef Matrix - TransposeTypeWithSameStorageOrder; - - void allocate(const JacobiSVD& svd) - { - if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.cols(), svd.rows()); - } - m_adjoint.resize(svd.cols(), svd.rows()); - if (svd.m_computeFullV) m_workspace.resize(svd.cols()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.cols() > matrix.rows()) - { - m_adjoint = matrix.adjoint(); - m_qr.compute(m_adjoint); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); - if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); - return true; - } - else return false; - } -private: - typedef FullPivHouseholderQR QRType; - QRType m_qr; - TransposeTypeWithSameStorageOrder m_adjoint; - typename internal::plain_row_type::type m_workspace; -}; - -/*** preconditioner using ColPivHouseholderQR ***/ - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - - void allocate(const JacobiSVD& svd) - { - if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.rows(), svd.cols()); - } - if (svd.m_computeFullU) m_workspace.resize(svd.rows()); - else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.rows() > matrix.cols()) - { - m_qr.compute(matrix); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); - else if(svd.m_computeThinU) - { - svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); - } - if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); - return true; - } - return false; - } - -private: - typedef ColPivHouseholderQR QRType; - QRType m_qr; - typename internal::plain_col_type::type m_workspace; -}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - Options = MatrixType::Options - }; - - typedef Matrix - TransposeTypeWithSameStorageOrder; - - void allocate(const JacobiSVD& svd) - { - if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.cols(), svd.rows()); - } - if (svd.m_computeFullV) m_workspace.resize(svd.cols()); - else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); - m_adjoint.resize(svd.cols(), svd.rows()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.cols() > matrix.rows()) - { - m_adjoint = matrix.adjoint(); - m_qr.compute(m_adjoint); - - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); - else if(svd.m_computeThinV) - { - svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); - } - if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); - return true; - } - else return false; - } - -private: - typedef ColPivHouseholderQR QRType; - QRType m_qr; - TransposeTypeWithSameStorageOrder m_adjoint; - typename internal::plain_row_type::type m_workspace; -}; - -/*** preconditioner using HouseholderQR ***/ - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - - void allocate(const JacobiSVD& svd) - { - if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.rows(), svd.cols()); - } - if (svd.m_computeFullU) m_workspace.resize(svd.rows()); - else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.rows() > matrix.cols()) - { - m_qr.compute(matrix); - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); - else if(svd.m_computeThinU) - { - svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); - } - if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols()); - return true; - } - return false; - } -private: - typedef HouseholderQR QRType; - QRType m_qr; - typename internal::plain_col_type::type m_workspace; -}; - -template -class qr_preconditioner_impl -{ -public: - typedef typename MatrixType::Index Index; - typedef typename MatrixType::Scalar Scalar; - enum - { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - Options = MatrixType::Options - }; - - typedef Matrix - TransposeTypeWithSameStorageOrder; - - void allocate(const JacobiSVD& svd) - { - if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) - { - m_qr.~QRType(); - ::new (&m_qr) QRType(svd.cols(), svd.rows()); - } - if (svd.m_computeFullV) m_workspace.resize(svd.cols()); - else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); - m_adjoint.resize(svd.cols(), svd.rows()); - } - - bool run(JacobiSVD& svd, const MatrixType& matrix) - { - if(matrix.cols() > matrix.rows()) - { - m_adjoint = matrix.adjoint(); - m_qr.compute(m_adjoint); - - svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); - else if(svd.m_computeThinV) - { - svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); - m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); - } - if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows()); - return true; - } - else return false; - } - -private: - typedef HouseholderQR QRType; - QRType m_qr; - TransposeTypeWithSameStorageOrder m_adjoint; - typename internal::plain_row_type::type m_workspace; -}; - -/*** 2x2 SVD implementation - *** - *** JacobiSVD consists in performing a series of 2x2 SVD subproblems - ***/ - -template -struct svd_precondition_2x2_block_to_be_real -{ - typedef JacobiSVD SVD; - typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} -}; - -template -struct svd_precondition_2x2_block_to_be_real -{ - typedef JacobiSVD SVD; - typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::RealScalar RealScalar; - typedef typename SVD::Index Index; - static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) - { - using std::sqrt; - Scalar z; - JacobiRotation rot; - RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); - if(n==0) - { - z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); - work_matrix.row(p) *= z; - if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); - z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); - work_matrix.row(q) *= z; - if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); - } - else - { - rot.c() = conj(work_matrix.coeff(p,p)) / n; - rot.s() = work_matrix.coeff(q,p) / n; - work_matrix.applyOnTheLeft(p,q,rot); - if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint()); - if(work_matrix.coeff(p,q) != Scalar(0)) - { - Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); - work_matrix.col(q) *= z; - if(svd.computeV()) svd.m_matrixV.col(q) *= z; - } - if(work_matrix.coeff(q,q) != Scalar(0)) - { - z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); - work_matrix.row(q) *= z; - if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); - } - } - } -}; - -template -void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, - JacobiRotation *j_left, - JacobiRotation *j_right) -{ - using std::sqrt; - Matrix m; - m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), - numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); - JacobiRotation rot1; - RealScalar t = m.coeff(0,0) + m.coeff(1,1); - RealScalar d = m.coeff(1,0) - m.coeff(0,1); - if(t == RealScalar(0)) - { - rot1.c() = RealScalar(0); - rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1); - } - else - { - RealScalar u = d / t; - rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u)); - rot1.s() = rot1.c() * u; - } - m.applyOnTheLeft(0,1,rot1); - j_right->makeJacobi(m,0,1); - *j_left = rot1 * j_right->transpose(); -} - -} // end namespace internal - -/** \ingroup SVD_Module - * - * - * \class JacobiSVD - * - * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix - * - * \param MatrixType the type of the matrix of which we are computing the SVD decomposition - * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally - * for the R-SVD step for non-square matrices. See discussion of possible values below. - * - * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product - * \f[ A = U S V^* \f] - * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; - * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left - * and right \em singular \em vectors of \a A respectively. - * - * Singular values are always sorted in decreasing order. - * - * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly. - * - * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the - * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual - * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, - * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. - * - * Here's an example demonstrating basic usage: - * \include JacobiSVD_basic.cpp - * Output: \verbinclude JacobiSVD_basic.out - * - * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than - * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and - * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms. - * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension. - * - * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to - * terminate in finite (and reasonable) time. - * - * The possible values for QRPreconditioner are: - * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. - * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. - * Contrary to other QRs, it doesn't allow computing thin unitaries. - * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR. - * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization - * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive - * process is more reliable than the optimized bidiagonal SVD iterations. - * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing - * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in - * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking - * if QR preconditioning is needed before applying it anyway. - * - * \sa MatrixBase::jacobiSvd() - */ -template -class JacobiSVD : public SVDBase<_MatrixType> -{ - public: - - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), - MatrixOptions = MatrixType::Options - }; - - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; - typedef typename internal::plain_row_type::type RowType; - typedef typename internal::plain_col_type::type ColType; - typedef Matrix - WorkMatrixType; - - /** \brief Default Constructor. - * - * The default constructor is useful in cases in which the user intends to - * perform decompositions via JacobiSVD::compute(const MatrixType&). - */ - JacobiSVD() - : SVDBase<_MatrixType>::SVDBase() - {} - - - /** \brief Default Constructor with memory preallocation - * - * Like the default constructor but with preallocation of the internal data - * according to the specified problem size. - * \sa JacobiSVD() - */ - JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase() - { - allocate(rows, cols, computationOptions); - } - - /** \brief Constructor performing the decomposition of given matrix. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non-default) FullPivHouseholderQR preconditioner. - */ - JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) - : SVDBase<_MatrixType>::SVDBase() - { - compute(matrix, computationOptions); - } - - /** \brief Method performing the decomposition of given matrix using custom options. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non-default) FullPivHouseholderQR preconditioner. - */ - SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); - - /** \brief Method performing the decomposition of given matrix using current options. - * - * \param matrix the matrix to decompose - * - * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). - */ - SVDBase& compute(const MatrixType& matrix) - { - return compute(matrix, this->m_computationOptions); - } - - /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A. - * - * \param b the right-hand-side of the equation to solve. - * - * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V. - * - * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving. - * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$. - */ - template - inline const internal::solve_retval - solve(const MatrixBase& b) const - { - eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized."); - eigen_assert(SVDBase::computeU() && SVDBase::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice)."); - return internal::solve_retval(*this, b.derived()); - } - - - - private: - void allocate(Index rows, Index cols, unsigned int computationOptions); - - protected: - WorkMatrixType m_workMatrix; - - template - friend struct internal::svd_precondition_2x2_block_to_be_real; - template - friend struct internal::qr_preconditioner_impl; - - internal::qr_preconditioner_impl m_qr_precond_morecols; - internal::qr_preconditioner_impl m_qr_precond_morerows; -}; - -template -void JacobiSVD::allocate(Index rows, Index cols, unsigned int computationOptions) -{ - if (SVDBase::allocate(rows, cols, computationOptions)) return; - - if (QRPreconditioner == FullPivHouseholderQRPreconditioner) - { - eigen_assert(!(this->m_computeThinU || this->m_computeThinV) && - "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " - "Use the ColPivHouseholderQR preconditioner instead."); - } - - m_workMatrix.resize(this->m_diagSize, this->m_diagSize); - - if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this); - if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this); -} - -template -SVDBase& -JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) -{ - using std::abs; - allocate(matrix.rows(), matrix.cols(), computationOptions); - - // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, - // only worsening the precision of U and V as we accumulate more rotations - const RealScalar precision = RealScalar(2) * NumTraits::epsilon(); - - // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) - const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits::denorm_min(); - - /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ - - if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix)) - { - m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize); - if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows); - if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize); - if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols); - if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize); - } - - /*** step 2. The main Jacobi SVD iteration. ***/ - - bool finished = false; - while(!finished) - { - finished = true; - - // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix - - for(Index p = 1; p < this->m_diagSize; ++p) - { - for(Index q = 0; q < p; ++q) - { - // if this 2x2 sub-matrix is not diagonal already... - // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't - // keep us iterating forever. Similarly, small denormal numbers are considered zero. - using std::max; - RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), - abs(m_workMatrix.coeff(q,q)))); - if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold) - { - finished = false; - - // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal - internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q); - JacobiRotation j_left, j_right; - internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); - - // accumulate resulting Jacobi rotations - m_workMatrix.applyOnTheLeft(p,q,j_left); - if(SVDBase::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose()); - - m_workMatrix.applyOnTheRight(p,q,j_right); - if(SVDBase::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right); - } - } - } - } - - /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/ - - for(Index i = 0; i < this->m_diagSize; ++i) - { - RealScalar a = abs(m_workMatrix.coeff(i,i)); - this->m_singularValues.coeffRef(i) = a; - if(SVDBase::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a; - } - - /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/ - - this->m_nonzeroSingularValues = this->m_diagSize; - for(Index i = 0; i < this->m_diagSize; i++) - { - Index pos; - RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos); - if(maxRemainingSingularValue == RealScalar(0)) - { - this->m_nonzeroSingularValues = i; - break; - } - if(pos) - { - pos += i; - std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos)); - if(SVDBase::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i)); - if(SVDBase::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i)); - } - } - - this->m_isInitialized = true; - return *this; -} - -namespace internal { -template -struct solve_retval, Rhs> - : solve_retval_base, Rhs> -{ - typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType; - EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs) - - template void evalTo(Dest& dst) const - { - eigen_assert(rhs().rows() == dec().rows()); - - // A = U S V^* - // So A^{-1} = V S^{-1} U^* - - Index diagSize = (std::min)(dec().rows(), dec().cols()); - typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize); - - Index nonzeroSingVals = dec().nonzeroSingularValues(); - invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse(); - invertedSingVals.tail(diagSize - nonzeroSingVals).setZero(); - - dst = dec().matrixV().leftCols(diagSize) - * invertedSingVals.asDiagonal() - * dec().matrixU().leftCols(diagSize).adjoint() - * rhs(); - } -}; -} // end namespace internal - -/** \svd_module - * - * \return the singular value decomposition of \c *this computed by two-sided - * Jacobi transformations. - * - * \sa class JacobiSVD - */ -template -JacobiSVD::PlainObject> -MatrixBase::jacobiSvd(unsigned int computationOptions) const -{ - return JacobiSVD(*this, computationOptions); -} - -} // end namespace Eigen - -#endif // EIGEN_JACOBISVD_H diff --git a/unsupported/Eigen/src/SVD/SVDBase.h b/unsupported/Eigen/src/SVD/SVDBase.h deleted file mode 100644 index fd8af3b8c..000000000 --- a/unsupported/Eigen/src/SVD/SVDBase.h +++ /dev/null @@ -1,236 +0,0 @@ -// This file is part of Eigen, a lightweight C++ template library -// for linear algebra. -// -// Copyright (C) 2009-2010 Benoit Jacob -// -// Copyright (C) 2013 Gauthier Brun -// Copyright (C) 2013 Nicolas Carre -// Copyright (C) 2013 Jean Ceccato -// Copyright (C) 2013 Pierre Zoppitelli -// -// This Source Code Form is subject to the terms of the Mozilla -// Public License v. 2.0. If a copy of the MPL was not distributed -// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - -#ifndef EIGEN_SVD_H -#define EIGEN_SVD_H - -namespace Eigen { -/** \ingroup SVD_Module - * - * - * \class SVDBase - * - * \brief Mother class of SVD classes algorithms - * - * \param MatrixType the type of the matrix of which we are computing the SVD decomposition - * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product - * \f[ A = U S V^* \f] - * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal; - * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left - * and right \em singular \em vectors of \a A respectively. - * - * Singular values are always sorted in decreasing order. - * - * - * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the - * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual - * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix, - * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving. - * - * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to - * terminate in finite (and reasonable) time. - * \sa MatrixBase::genericSvd() - */ -template -class SVDBase -{ - -public: - typedef _MatrixType MatrixType; - typedef typename MatrixType::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), - MatrixOptions = MatrixType::Options - }; - - typedef Matrix - MatrixUType; - typedef Matrix - MatrixVType; - typedef typename internal::plain_diag_type::type SingularValuesType; - typedef typename internal::plain_row_type::type RowType; - typedef typename internal::plain_col_type::type ColType; - typedef Matrix - WorkMatrixType; - - - - - /** \brief Method performing the decomposition of given matrix using custom options. - * - * \param matrix the matrix to decompose - * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. - * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, - * #ComputeFullV, #ComputeThinV. - * - * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not - * available with the (non-default) FullPivHouseholderQR preconditioner. - */ - SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions); - - /** \brief Method performing the decomposition of given matrix using current options. - * - * \param matrix the matrix to decompose - * - * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). - */ - //virtual SVDBase& compute(const MatrixType& matrix) = 0; - SVDBase& compute(const MatrixType& matrix); - - /** \returns the \a U matrix. - * - * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, - * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU. - * - * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed. - * - * This method asserts that you asked for \a U to be computed. - */ - const MatrixUType& matrixU() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?"); - return m_matrixU; - } - - /** \returns the \a V matrix. - * - * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, - * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV. - * - * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed. - * - * This method asserts that you asked for \a V to be computed. - */ - const MatrixVType& matrixV() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?"); - return m_matrixV; - } - - /** \returns the vector of singular values. - * - * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the - * returned vector has size \a m. Singular values are always sorted in decreasing order. - */ - const SingularValuesType& singularValues() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - return m_singularValues; - } - - - - /** \returns the number of singular values that are not exactly 0 */ - Index nonzeroSingularValues() const - { - eigen_assert(m_isInitialized && "SVD is not initialized."); - return m_nonzeroSingularValues; - } - - - /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ - inline bool computeU() const { return m_computeFullU || m_computeThinU; } - /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ - inline bool computeV() const { return m_computeFullV || m_computeThinV; } - - - inline Index rows() const { return m_rows; } - inline Index cols() const { return m_cols; } - - -protected: - // return true if already allocated - bool allocate(Index rows, Index cols, unsigned int computationOptions) ; - - MatrixUType m_matrixU; - MatrixVType m_matrixV; - SingularValuesType m_singularValues; - bool m_isInitialized, m_isAllocated; - bool m_computeFullU, m_computeThinU; - bool m_computeFullV, m_computeThinV; - unsigned int m_computationOptions; - Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; - - - /** \brief Default Constructor. - * - * Default constructor of SVDBase - */ - SVDBase() - : m_isInitialized(false), - m_isAllocated(false), - m_computationOptions(0), - m_rows(-1), m_cols(-1) - {} - - -}; - - -template -bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) -{ - eigen_assert(rows >= 0 && cols >= 0); - - if (m_isAllocated && - rows == m_rows && - cols == m_cols && - computationOptions == m_computationOptions) - { - return true; - } - - m_rows = rows; - m_cols = cols; - m_isInitialized = false; - m_isAllocated = true; - m_computationOptions = computationOptions; - m_computeFullU = (computationOptions & ComputeFullU) != 0; - m_computeThinU = (computationOptions & ComputeThinU) != 0; - m_computeFullV = (computationOptions & ComputeFullV) != 0; - m_computeThinV = (computationOptions & ComputeThinV) != 0; - eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); - eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); - eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && - "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns."); - - m_diagSize = (std::min)(m_rows, m_cols); - m_singularValues.resize(m_diagSize); - if(RowsAtCompileTime==Dynamic) - m_matrixU.resize(m_rows, m_computeFullU ? m_rows - : m_computeThinU ? m_diagSize - : 0); - if(ColsAtCompileTime==Dynamic) - m_matrixV.resize(m_cols, m_computeFullV ? m_cols - : m_computeThinV ? m_diagSize - : 0); - - return false; -} - -}// end namespace - -#endif // EIGEN_SVD_H diff --git a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt deleted file mode 100644 index 0bc9a46e6..000000000 --- a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt +++ /dev/null @@ -1,29 +0,0 @@ -TO DO LIST - - - -(optional optimization) - do all the allocations in the allocate part - - support static matrices - - return a error at compilation time when using integer matrices (int, long, std::complex, ...) - -to finish the algorithm : - -implement the last part of the algorithm as described on the reference paper. - You may find more information on that part on this paper - - -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to - deflation. - -(suggested step by step resolution) - 0) comment the call to Jacobi in the last part of the divide method and everything right after - until the end of the method. What is commented can be a guideline to steps 3) 4) and 6) - 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation - wich should be uncommented in the divide method - 2) remember the values of the singular values that are already computed (zi=0) - 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) ) - in decreasing order - 4) set the firstcol to zero (except the first element) in m_computed - 5) compute all the singular vectors when CompV is set to true and only the left vectors when - CompV is set to false - 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to - false, /!\ if CompU is false NaiveU has only 2 rows - 7) delete everything commented in step 0) diff --git a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt deleted file mode 100644 index 8563ddab8..000000000 --- a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt +++ /dev/null @@ -1,21 +0,0 @@ -This unsupported package is about a divide and conquer algorithm to compute SVD. - -The implementation follows as closely as possible the following reference paper : -http://www.cs.yale.edu/publications/techreports/tr933.pdf - -The code documentation uses the same names for variables as the reference paper. The code, deflation included, is -working but there are a few things that could be optimised as explained in the TODOBdsvd. - -In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call -of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented. - -In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it. - -The implemented has trouble with fixed size matrices. - -In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix. - - -Paper for the third part: -http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf - -- cgit v1.2.3