aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gauthier Brun <gauthier.brun@gmail.com>2013-06-19 00:03:27 +0200
committerGravatar Gauthier Brun <gauthier.brun@gmail.com>2013-06-19 00:03:27 +0200
commit8105b5ed3fd163f4b2574a2f0a6257cf119bab95 (patch)
treed1864b8c601e2a0af4ef9cd1cc0897b4fedaa020
parentba79e39c5c641888f58bff01febe9b81c4a22cbc (diff)
new unsupported and not finished SVD, using a divide and conquert algorithm, with tests and benchmark
-rw-r--r--unsupported/Eigen/SVD39
-rw-r--r--unsupported/Eigen/src/SVD/BDCSVD.h744
-rw-r--r--unsupported/Eigen/src/SVD/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/SVD/JacobiSVD.h782
-rw-r--r--unsupported/Eigen/src/SVD/SVDBase.h236
-rw-r--r--unsupported/Eigen/src/SVD/TODOBdcsvd.txt29
-rw-r--r--unsupported/Eigen/src/SVD/doneInBDCSVD.txt21
-rw-r--r--unsupported/bench/bench_svd.cpp123
-rw-r--r--unsupported/test/bdcsvd.cpp213
-rw-r--r--unsupported/test/jacobisvd.cpp198
-rw-r--r--unsupported/test/svd_common.h261
11 files changed, 2652 insertions, 0 deletions
diff --git a/unsupported/Eigen/SVD b/unsupported/Eigen/SVD
new file mode 100644
index 000000000..7cc059280
--- /dev/null
+++ b/unsupported/Eigen/SVD
@@ -0,0 +1,39 @@
+#ifndef EIGEN_SVD_MODULE_H
+#define EIGEN_SVD_MODULE_H
+
+#include <Eigen/QR>
+#include <Eigen/Householder>
+#include <Eigen/Jacobi>
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+/** \defgroup SVD_Module SVD module
+ *
+ *
+ *
+ * This module provides SVD decomposition for matrices (both real and complex).
+ * This decomposition is accessible via the following MatrixBase method:
+ * - MatrixBase::jacobiSvd()
+ *
+ * \code
+ * #include <Eigen/SVD>
+ * \endcode
+ */
+
+#include "../../Eigen/src/misc/Solve.h"
+#include "../../Eigen/src/SVD/UpperBidiagonalization.h"
+#include "src/SVD/SVDBase.h"
+#include "src/SVD/JacobiSVD.h"
+#include "src/SVD/BDCSVD.h"
+#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
+#include "../../Eigen/src/SVD/JacobiSVD_MKL.h"
+#endif
+
+#ifdef EIGEN2_SUPPORT
+#include "../../Eigen/src/Eigen2Support/SVD.h"
+#endif
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SVD_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/src/SVD/BDCSVD.h b/unsupported/Eigen/src/SVD/BDCSVD.h
new file mode 100644
index 000000000..87ec33a05
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/BDCSVD.h
@@ -0,0 +1,744 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
+// research report written by Ming Gu and Stanley C.Eisenstat
+// The code variable names correspond to the names they used in their
+// report
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BDCSVD_H
+#define EIGEN_BDCSVD_H
+
+#define EPSILON 0.0000000000000001
+
+#define ALGOSWAP 32
+
+namespace Eigen {
+/** \ingroup SVD_Module
+ *
+ *
+ * \class BDCSVD
+ *
+ * \brief class Bidiagonal Divide and Conquer SVD
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * We plan to have a very similar interface to JacobiSVD on this class.
+ * It should be used to speed up the calcul of SVD for big matrices.
+ */
+template<typename _MatrixType>
+class BDCSVD : public SVDBase<_MatrixType>
+{
+
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename SVDBase<_MatrixType>::MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
+ typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixXr;
+ typedef Matrix<RealScalar, Dynamic, 1> VectorType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via BDCSVD::compute(const MatrixType&).
+ */
+ BDCSVD()
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa BDCSVD()
+ */
+ BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
+ */
+ BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {
+ compute(matrix, computationOptions);
+ }
+
+ ~BDCSVD()
+ {
+ }
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, this->m_computationOptions);
+ }
+
+ void setSwitchSize(int s)
+ {
+ eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 4");
+ algoswap = s;
+ }
+
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right - hand - side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<BDCSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
+ eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() &&
+ "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
+ }
+
+
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(this->m_isInitialized && "SVD is not initialized.");
+ if (isTranspose){
+ eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return this->m_matrixV;
+ }
+ else
+ {
+ eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return this->m_matrixU;
+ }
+
+ }
+
+
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(this->m_isInitialized && "SVD is not initialized.");
+ if (isTranspose){
+ eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return this->m_matrixU;
+ }
+ else
+ {
+ eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return this->m_matrixV;
+ }
+ }
+
+private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+ void divide (Index firstCol, Index lastCol, Index firstRowW,
+ Index firstColW, Index shift);
+ void deflation43(Index firstCol, Index shift, Index i, Index size);
+ void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
+ void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
+ void copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX houseHolderV);
+
+protected:
+ MatrixXr m_naiveU, m_naiveV;
+ MatrixXr m_computed;
+ Index nRec;
+ int algoswap;
+ bool isTranspose, compU, compV;
+
+}; //end class BDCSVD
+
+
+// Methode to allocate ans initialize matrix and attributs
+template<typename MatrixType>
+void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ isTranspose = (cols > rows);
+ if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
+ m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
+ if (isTranspose){
+ compU = this->computeU();
+ compV = this->computeV();
+ }
+ else
+ {
+ compV = this->computeU();
+ compU = this->computeV();
+ }
+ if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 );
+ else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 );
+
+ if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize);
+
+
+ //should be changed for a cleaner implementation
+ if (isTranspose){
+ bool aux;
+ if (this->computeU()||this->computeV()){
+ aux = this->m_computeFullU;
+ this->m_computeFullU = this->m_computeFullV;
+ this->m_computeFullV = aux;
+ aux = this->m_computeThinU;
+ this->m_computeThinU = this->m_computeThinV;
+ this->m_computeThinV = aux;
+ }
+ }
+}// end allocate
+
+// Methode which compute the BDCSVD for the int
+template<>
+SVDBase<Matrix<int, Dynamic, Dynamic> >&
+BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+ this->m_nonzeroSingularValues = 0;
+ m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
+ for (int i=0; i<this->m_diagSize; i++) {
+ this->m_singularValues.coeffRef(i) = 0;
+ }
+ if (this->m_computeFullU) this->m_matrixU = Matrix<int, Dynamic, Dynamic>::Zero(rows(), rows());
+ if (this->m_computeFullV) this->m_matrixV = Matrix<int, Dynamic, Dynamic>::Zero(cols(), cols());
+ this->m_isInitialized = true;
+ return *this;
+}
+
+
+// Methode which compute the BDCSVD
+template<typename MatrixType>
+SVDBase<MatrixType>&
+BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+ using std::abs;
+
+ //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ;
+ MatrixType copy;
+ if (isTranspose) copy = matrix.adjoint();
+ else copy = matrix;
+
+ internal::UpperBidiagonalization<MatrixX > bid(copy);
+
+ //**** step 2 Divide
+ // this is ugly and has to be redone (care of complex cast)
+ MatrixXr temp;
+ temp = bid.bidiagonal().toDenseMatrix().transpose();
+ m_computed.setZero();
+ for (int i=0; i<this->m_diagSize - 1; i++) {
+ m_computed(i, i) = temp(i, i);
+ m_computed(i + 1, i) = temp(i + 1, i);
+ }
+ m_computed(this->m_diagSize - 1, this->m_diagSize - 1) = temp(this->m_diagSize - 1, this->m_diagSize - 1);
+ divide(0, this->m_diagSize - 1, 0, 0, 0);
+
+ //**** step 3 copy
+ for (int i=0; i<this->m_diagSize; i++) {
+ RealScalar a = abs(m_computed.coeff(i, i));
+ this->m_singularValues.coeffRef(i) = a;
+ if (a == 0){
+ this->m_nonzeroSingularValues = i;
+ break;
+ }
+ else if (i == this->m_diagSize - 1)
+ {
+ this->m_nonzeroSingularValues = i + 1;
+ break;
+ }
+ }
+ copyUV(m_naiveV, m_naiveU, bid.householderU(), bid.householderV());
+ this->m_isInitialized = true;
+ return *this;
+}// end compute
+
+
+template<typename MatrixType>
+void BDCSVD<MatrixType>::copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX householderV){
+ if (this->computeU()){
+ MatrixX temp = MatrixX::Zero(naiveU.rows(), naiveU.cols());
+ temp.real() = naiveU;
+ if (this->m_computeThinU){
+ this->m_matrixU = MatrixX::Identity(householderU.cols(), this->m_nonzeroSingularValues );
+ this->m_matrixU.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues) =
+ temp.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues);
+ this->m_matrixU = householderU * this->m_matrixU ;
+ }
+ else
+ {
+ this->m_matrixU = MatrixX::Identity(householderU.cols(), householderU.cols());
+ this->m_matrixU.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
+ this->m_matrixU = householderU * this->m_matrixU ;
+ }
+ }
+ if (this->computeV()){
+ MatrixX temp = MatrixX::Zero(naiveV.rows(), naiveV.cols());
+ temp.real() = naiveV;
+ if (this->m_computeThinV){
+ this->m_matrixV = MatrixX::Identity(householderV.cols(),this->m_nonzeroSingularValues );
+ this->m_matrixV.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues) =
+ temp.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues);
+ this->m_matrixV = householderV * this->m_matrixV ;
+ }
+ else
+ {
+ this->m_matrixV = MatrixX::Identity(householderV.cols(), householderV.cols());
+ this->m_matrixV.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
+ this->m_matrixV = householderV * this->m_matrixV;
+ }
+ }
+}
+
+// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
+// place of the submatrix we are currently working on.
+
+//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
+//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
+// lastCol + 1 - firstCol is the size of the submatrix.
+//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
+//@param firstRowW : Same as firstRowW with the column.
+//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
+// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
+template<typename MatrixType>
+void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
+ Index firstColW, Index shift)
+{
+ // requires nbRows = nbCols + 1;
+ using std::pow;
+ using std::sqrt;
+ using std::abs;
+ const Index n = lastCol - firstCol + 1;
+ const Index k = n/2;
+ RealScalar alphaK;
+ RealScalar betaK;
+ RealScalar r0;
+ RealScalar lambda, phi, c0, s0;
+ MatrixXr l, f;
+ // We use the other algorithm which is more efficient for small
+ // matrices.
+ if (n < algoswap){
+ JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n),
+ ComputeFullU | (ComputeFullV * compV)) ;
+ if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU();
+ else
+ {
+ m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0);
+ m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n);
+ }
+ if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV();
+ m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
+ for (int i=0; i<n; i++)
+ {
+ m_computed(firstCol + shift + i, firstCol + shift +i) = b.singularValues().coeffRef(i);
+ }
+ return;
+ }
+ // We use the divide and conquer algorithm
+ alphaK = m_computed(firstCol + k, firstCol + k);
+ betaK = m_computed(firstCol + k + 1, firstCol + k);
+ // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
+ // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
+ // right submatrix before the left one.
+ divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
+ divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
+ if (compU)
+ {
+ lambda = m_naiveU(firstCol + k, firstCol + k);
+ phi = m_naiveU(firstCol + k + 1, lastCol + 1);
+ }
+ else
+ {
+ lambda = m_naiveU(1, firstCol + k);
+ phi = m_naiveU(0, lastCol + 1);
+ }
+ r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda))
+ + abs(betaK * phi) * abs(betaK * phi));
+ if (compU)
+ {
+ l = m_naiveU.row(firstCol + k).segment(firstCol, k);
+ f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
+ }
+ else
+ {
+ l = m_naiveU.row(1).segment(firstCol, k);
+ f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
+ }
+ if (compV) m_naiveV(firstRowW+k, firstColW) = 1;
+ if (r0 == 0)
+ {
+ c0 = 1;
+ s0 = 0;
+ }
+ else
+ {
+ c0 = alphaK * lambda / r0;
+ s0 = betaK * phi / r0;
+ }
+ if (compU)
+ {
+ MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
+ // we shiftW Q1 to the right
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
+ {
+ m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1);
+ }
+ // we shift q1 at the left with a factor c0
+ m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0);
+ // last column = q1 * - s0
+ m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0));
+ // first column = q2 * s0
+ m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) <<
+ m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0;
+ // q2 *= c0
+ m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
+ }
+ else
+ {
+ RealScalar q1 = (m_naiveU(0, firstCol + k));
+ // we shift Q1 to the right
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
+ {
+ m_naiveU(0, i + 1) = m_naiveU(0, i);
+ }
+ // we shift q1 at the left with a factor c0
+ m_naiveU(0, firstCol) = (q1 * c0);
+ // last column = q1 * - s0
+ m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
+ // first column = q2 * s0
+ m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
+ // q2 *= c0
+ m_naiveU(1, lastCol + 1) *= c0;
+ m_naiveU.row(1).segment(firstCol + 1, k).setZero();
+ m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
+ }
+ m_computed(firstCol + shift, firstCol + shift) = r0;
+ m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real();
+ m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real();
+
+
+ // the line below do the deflation of the matrix for the third part of the algorithm
+ // Here the deflation is commented because the third part of the algorithm is not implemented
+ // the third part of the algorithm is a fast SVD on the matrix m_computed which works thanks to the deflation
+
+ deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
+
+ // Third part of the algorithm, since the real third part of the algorithm is not implemeted we use a JacobiSVD
+ JacobiSVD<MatrixXr> res= JacobiSVD<MatrixXr>(m_computed.block(firstCol + shift, firstCol +shift, n + 1, n),
+ ComputeFullU | (ComputeFullV * compV)) ;
+ if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= res.matrixU();
+ else m_naiveU.block(0, firstCol, 2, n + 1) *= res.matrixU();
+
+ if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= res.matrixV();
+ m_computed.block(firstCol + shift, firstCol + shift, n, n) << MatrixXr::Zero(n, n);
+ for (int i=0; i<n; i++)
+ m_computed(firstCol + shift + i, firstCol + shift +i) = res.singularValues().coeffRef(i);
+ // end of the third part
+
+
+}// end divide
+
+
+// page 12_13
+// i >= 1, di almost null and zi non null.
+// We use a rotation to zero out zi applied to the left of M
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size){
+ using std::abs;
+ using std::sqrt;
+ using std::pow;
+ RealScalar c = m_computed(firstCol + shift, firstCol + shift);
+ RealScalar s = m_computed(i, firstCol + shift);
+ RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
+ if (r == 0){
+ m_computed(i, i)=0;
+ return;
+ }
+ c/=r;
+ s/=r;
+ m_computed(firstCol + shift, firstCol + shift) = r;
+ m_computed(i, firstCol + shift) = 0;
+ m_computed(i, i) = 0;
+ if (compU){
+ m_naiveU.col(firstCol).segment(firstCol,size) =
+ c * m_naiveU.col(firstCol).segment(firstCol, size) -
+ s * m_naiveU.col(i).segment(firstCol, size) ;
+
+ m_naiveU.col(i).segment(firstCol, size) =
+ (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) +
+ (s/c) * m_naiveU.col(firstCol).segment(firstCol,size);
+ }
+}// end deflation 43
+
+
+// page 13
+// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M)
+// We apply two rotations to have zj = 0;
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){
+ using std::abs;
+ using std::sqrt;
+ using std::conj;
+ using std::pow;
+ RealScalar c = m_computed(firstColm, firstColm + j - 1);
+ RealScalar s = m_computed(firstColm, firstColm + i - 1);
+ RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
+ if (r==0){
+ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
+ return;
+ }
+ c/=r;
+ s/=r;
+ m_computed(firstColm + i, firstColm) = r;
+ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
+ m_computed(firstColm + j, firstColm) = 0;
+ if (compU){
+ m_naiveU.col(firstColu + i).segment(firstColu, size) =
+ c * m_naiveU.col(firstColu + i).segment(firstColu, size) -
+ s * m_naiveU.col(firstColu + j).segment(firstColu, size) ;
+
+ m_naiveU.col(firstColu + j).segment(firstColu, size) =
+ (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) +
+ (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size);
+ }
+ if (compV){
+ m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) =
+ c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) +
+ s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ;
+
+ m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) =
+ (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) -
+ (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1);
+ }
+}// end deflation 44
+
+
+
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){
+ //condition 4.1
+ RealScalar EPS = EPSILON * (std::max<RealScalar>(m_computed(firstCol + shift + 1, firstCol + shift + 1), m_computed(firstCol + k, firstCol + k)));
+ const Index length = lastCol + 1 - firstCol;
+ if (m_computed(firstCol + shift, firstCol + shift) < EPS){
+ m_computed(firstCol + shift, firstCol + shift) = EPS;
+ }
+ //condition 4.2
+ for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){
+ if (std::abs(m_computed(i, firstCol + shift)) < EPS){
+ m_computed(i, firstCol + shift) = 0;
+ }
+ }
+
+ //condition 4.3
+ for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){
+ if (m_computed(i, i) < EPS){
+ deflation43(firstCol, shift, i, length);
+ }
+ }
+
+ //condition 4.4
+
+ Index i=firstCol + shift + 1, j=firstCol + shift + k + 1;
+ //we stock the final place of each line
+ Index *permutation = new Index[length];
+
+ for (Index p =1; p < length; p++) {
+ if (i> firstCol + shift + k){
+ permutation[p] = j;
+ j++;
+ } else if (j> lastCol + shift)
+ {
+ permutation[p] = i;
+ i++;
+ }
+ else
+ {
+ if (m_computed(i, i) < m_computed(j, j)){
+ permutation[p] = j;
+ j++;
+ }
+ else
+ {
+ permutation[p] = i;
+ i++;
+ }
+ }
+ }
+ //we do the permutation
+ RealScalar aux;
+ //we stock the current index of each col
+ //and the column of each index
+ Index *realInd = new Index[length];
+ Index *realCol = new Index[length];
+ for (int pos = 0; pos< length; pos++){
+ realCol[pos] = pos + firstCol + shift;
+ realInd[pos] = pos;
+ }
+ const Index Zero = firstCol + shift;
+ VectorType temp;
+ for (int i = 1; i < length - 1; i++){
+ const Index I = i + Zero;
+ const Index realI = realInd[i];
+ const Index j = permutation[length - i] - Zero;
+ const Index J = realCol[j];
+
+ //diag displace
+ aux = m_computed(I, I);
+ m_computed(I, I) = m_computed(J, J);
+ m_computed(J, J) = aux;
+
+ //firstrow displace
+ aux = m_computed(I, Zero);
+ m_computed(I, Zero) = m_computed(J, Zero);
+ m_computed(J, Zero) = aux;
+
+ // change columns
+ if (compU) {
+ temp = m_naiveU.col(I - shift).segment(firstCol, length + 1);
+ m_naiveU.col(I - shift).segment(firstCol, length + 1) <<
+ m_naiveU.col(J - shift).segment(firstCol, length + 1);
+ m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp;
+ }
+ else
+ {
+ temp = m_naiveU.col(I - shift).segment(0, 2);
+ m_naiveU.col(I - shift).segment(0, 2) <<
+ m_naiveU.col(J - shift).segment(0, 2);
+ m_naiveU.col(J - shift).segment(0, 2) << temp;
+ }
+ if (compV) {
+ const Index CWI = I + firstColW - Zero;
+ const Index CWJ = J + firstColW - Zero;
+ temp = m_naiveV.col(CWI).segment(firstRowW, length);
+ m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length);
+ m_naiveV.col(CWJ).segment(firstRowW, length) << temp;
+ }
+
+ //update real pos
+ realCol[realI] = J;
+ realCol[j] = I;
+ realInd[J - Zero] = realI;
+ realInd[I - Zero] = j;
+ }
+ for (Index i = firstCol + shift + 1; i<lastCol + shift;i++){
+ if ((m_computed(i + 1, i + 1) - m_computed(i, i)) < EPS){
+ deflation44(firstCol ,
+ firstCol + shift,
+ firstRowW,
+ firstColW,
+ i - Zero,
+ i + 1 - Zero,
+ length);
+ }
+ }
+ delete [] permutation;
+ delete [] realInd;
+ delete [] realCol;
+
+}//end deflation
+
+
+namespace internal{
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<BDCSVD<_MatrixType>, Rhs>
+ : solve_retval_base<BDCSVD<_MatrixType>, Rhs>
+{
+ typedef BDCSVD<_MatrixType> BDCSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+ // A = U S V^*
+ // So A^{ - 1} = V S^{ - 1} U^*
+ Index diagSize = (std::min)(dec().rows(), dec().cols());
+ typename BDCSVDType::SingularValuesType invertedSingVals(diagSize);
+ Index nonzeroSingVals = dec().nonzeroSingularValues();
+ invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+ invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+ dst = dec().matrixV().leftCols(diagSize)
+ * invertedSingVals.asDiagonal()
+ * dec().matrixU().leftCols(diagSize).adjoint()
+ * rhs();
+ return;
+ }
+};
+
+} //end namespace internal
+
+ /** \svd_module
+ *
+ * \return the singular value decomposition of \c *this computed by
+ * BDC Algorithm
+ *
+ * \sa class BDCSVD
+ */
+/*
+template<typename Derived>
+BDCSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
+{
+ return BDCSVD<PlainObject>(*this, computationOptions);
+}
+*/
+
+} // end namespace Eigen
+
+#endif
diff --git a/unsupported/Eigen/src/SVD/CMakeLists.txt b/unsupported/Eigen/src/SVD/CMakeLists.txt
new file mode 100644
index 000000000..b40baf092
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SVD_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SVD_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/SVD COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/SVD/JacobiSVD.h b/unsupported/Eigen/src/SVD/JacobiSVD.h
new file mode 100644
index 000000000..02fac409e
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/JacobiSVD.h
@@ -0,0 +1,782 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBISVD_H
+#define EIGEN_JACOBISVD_H
+
+namespace Eigen {
+
+namespace internal {
+// forward declaration (needed by ICC)
+// the empty body is required by MSVC
+template<typename MatrixType, int QRPreconditioner,
+ bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct svd_precondition_2x2_block_to_be_real {};
+
+/*** QR preconditioners (R-SVD)
+ ***
+ *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
+ *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
+ *** JacobiSVD which by itself is only able to work on square matrices.
+ ***/
+
+enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_should_do_anything
+{
+ enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
+ b = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
+ ret = !( (QRPreconditioner == NoQRPreconditioner) ||
+ (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
+ (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
+ };
+};
+
+template<typename MatrixType, int QRPreconditioner, int Case,
+ bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
+> struct qr_preconditioner_impl {};
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
+ bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
+ {
+ return false;
+ }
+};
+
+/*** preconditioner using FullPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+ };
+ typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef FullPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ WorkspaceType m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ m_adjoint.resize(svd.cols(), svd.rows());
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+private:
+ typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using ColPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using HouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef HouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** 2x2 SVD implementation
+ ***
+ *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
+ ***/
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+};
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+ {
+ using std::sqrt;
+ Scalar z;
+ JacobiRotation<Scalar> rot;
+ RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
+ if(n==0)
+ {
+ z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.row(p) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ else
+ {
+ rot.c() = conj(work_matrix.coeff(p,p)) / n;
+ rot.s() = work_matrix.coeff(q,p) / n;
+ work_matrix.applyOnTheLeft(p,q,rot);
+ if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+ if(work_matrix.coeff(p,q) != Scalar(0))
+ {
+ Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.col(q) *= z;
+ if(svd.computeV()) svd.m_matrixV.col(q) *= z;
+ }
+ if(work_matrix.coeff(q,q) != Scalar(0))
+ {
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ }
+ }
+};
+
+template<typename MatrixType, typename RealScalar, typename Index>
+void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
+ JacobiRotation<RealScalar> *j_left,
+ JacobiRotation<RealScalar> *j_right)
+{
+ using std::sqrt;
+ Matrix<RealScalar,2,2> m;
+ m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+ numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
+ JacobiRotation<RealScalar> rot1;
+ RealScalar t = m.coeff(0,0) + m.coeff(1,1);
+ RealScalar d = m.coeff(1,0) - m.coeff(0,1);
+ if(t == RealScalar(0))
+ {
+ rot1.c() = RealScalar(0);
+ rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+ }
+ else
+ {
+ RealScalar u = d / t;
+ rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
+ rot1.s() = rot1.c() * u;
+ }
+ m.applyOnTheLeft(0,1,rot1);
+ j_right->makeJacobi(m,0,1);
+ *j_left = rot1 * j_right->transpose();
+}
+
+} // end namespace internal
+
+/** \ingroup SVD_Module
+ *
+ *
+ * \class JacobiSVD
+ *
+ * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+ * for the R-SVD step for non-square matrices. See discussion of possible values below.
+ *
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * Here's an example demonstrating basic usage:
+ * \include JacobiSVD_basic.cpp
+ * Output: \verbinclude JacobiSVD_basic.out
+ *
+ * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
+ * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
+ * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
+ * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ *
+ * The possible values for QRPreconditioner are:
+ * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
+ * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
+ * Contrary to other QRs, it doesn't allow computing thin unitaries.
+ * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+ * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+ * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
+ * process is more reliable than the optimized bidiagonal SVD iterations.
+ * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
+ * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+ * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
+ * if QR preconditioning is needed before applying it anyway.
+ *
+ * \sa MatrixBase::jacobiSvd()
+ */
+template<typename _MatrixType, int QRPreconditioner>
+class JacobiSVD : public SVDBase<_MatrixType>
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via JacobiSVD::compute(const MatrixType&).
+ */
+ JacobiSVD()
+ : SVDBase<_MatrixType>::SVDBase()
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa JacobiSVD()
+ */
+ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase()
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase()
+ {
+ compute(matrix, computationOptions);
+ }
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, this->m_computationOptions);
+ }
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<JacobiSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(SVDBase<MatrixType>::computeU() && SVDBase<MatrixType>::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
+ }
+
+
+
+ private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+ protected:
+ WorkMatrixType m_workMatrix;
+
+ template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
+ friend struct internal::svd_precondition_2x2_block_to_be_real;
+ template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
+ friend struct internal::qr_preconditioner_impl;
+
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
+};
+
+template<typename MatrixType, int QRPreconditioner>
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
+
+ if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ {
+ eigen_assert(!(this->m_computeThinU || this->m_computeThinV) &&
+ "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
+ "Use the ColPivHouseholderQR preconditioner instead.");
+ }
+
+ m_workMatrix.resize(this->m_diagSize, this->m_diagSize);
+
+ if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this);
+ if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+SVDBase<MatrixType>&
+JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ using std::abs;
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+
+ // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
+ // only worsening the precision of U and V as we accumulate more rotations
+ const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
+
+ // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
+ const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
+
+ /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
+
+ if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
+ {
+ m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize);
+ if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows);
+ if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize);
+ if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols);
+ if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize);
+ }
+
+ /*** step 2. The main Jacobi SVD iteration. ***/
+
+ bool finished = false;
+ while(!finished)
+ {
+ finished = true;
+
+ // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
+
+ for(Index p = 1; p < this->m_diagSize; ++p)
+ {
+ for(Index q = 0; q < p; ++q)
+ {
+ // if this 2x2 sub-matrix is not diagonal already...
+ // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
+ // keep us iterating forever. Similarly, small denormal numbers are considered zero.
+ using std::max;
+ RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+ abs(m_workMatrix.coeff(q,q))));
+ if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
+ {
+ finished = false;
+
+ // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
+ internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
+ JacobiRotation<RealScalar> j_left, j_right;
+ internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+ // accumulate resulting Jacobi rotations
+ m_workMatrix.applyOnTheLeft(p,q,j_left);
+ if(SVDBase<MatrixType>::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+ m_workMatrix.applyOnTheRight(p,q,j_right);
+ if(SVDBase<MatrixType>::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right);
+ }
+ }
+ }
+ }
+
+ /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
+
+ for(Index i = 0; i < this->m_diagSize; ++i)
+ {
+ RealScalar a = abs(m_workMatrix.coeff(i,i));
+ this->m_singularValues.coeffRef(i) = a;
+ if(SVDBase<MatrixType>::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a;
+ }
+
+ /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
+
+ this->m_nonzeroSingularValues = this->m_diagSize;
+ for(Index i = 0; i < this->m_diagSize; i++)
+ {
+ Index pos;
+ RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos);
+ if(maxRemainingSingularValue == RealScalar(0))
+ {
+ this->m_nonzeroSingularValues = i;
+ break;
+ }
+ if(pos)
+ {
+ pos += i;
+ std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos));
+ if(SVDBase<MatrixType>::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i));
+ if(SVDBase<MatrixType>::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i));
+ }
+ }
+
+ this->m_isInitialized = true;
+ return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int QRPreconditioner, typename Rhs>
+struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+ : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+{
+ typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+
+ // A = U S V^*
+ // So A^{-1} = V S^{-1} U^*
+
+ Index diagSize = (std::min)(dec().rows(), dec().cols());
+ typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
+
+ Index nonzeroSingVals = dec().nonzeroSingularValues();
+ invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+ invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+ dst = dec().matrixV().leftCols(diagSize)
+ * invertedSingVals.asDiagonal()
+ * dec().matrixU().leftCols(diagSize).adjoint()
+ * rhs();
+ }
+};
+} // end namespace internal
+
+/** \svd_module
+ *
+ * \return the singular value decomposition of \c *this computed by two-sided
+ * Jacobi transformations.
+ *
+ * \sa class JacobiSVD
+ */
+template<typename Derived>
+JacobiSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
+{
+ return JacobiSVD<PlainObject>(*this, computationOptions);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_H
diff --git a/unsupported/Eigen/src/SVD/SVDBase.h b/unsupported/Eigen/src/SVD/SVDBase.h
new file mode 100644
index 000000000..fd8af3b8c
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/SVDBase.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SVD_H
+#define EIGEN_SVD_H
+
+namespace Eigen {
+/** \ingroup SVD_Module
+ *
+ *
+ * \class SVDBase
+ *
+ * \brief Mother class of SVD classes algorithms
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ * \sa MatrixBase::genericSvd()
+ */
+template<typename _MatrixType>
+class SVDBase
+{
+
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+
+
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ //virtual SVDBase& compute(const MatrixType& matrix) = 0;
+ SVDBase& compute(const MatrixType& matrix);
+
+ /** \returns the \a U matrix.
+ *
+ * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
+ *
+ * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a U to be computed.
+ */
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return m_matrixU;
+ }
+
+ /** \returns the \a V matrix.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
+ *
+ * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a V to be computed.
+ */
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return m_matrixV;
+ }
+
+ /** \returns the vector of singular values.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
+ * returned vector has size \a m. Singular values are always sorted in decreasing order.
+ */
+ const SingularValuesType& singularValues() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ return m_singularValues;
+ }
+
+
+
+ /** \returns the number of singular values that are not exactly 0 */
+ Index nonzeroSingularValues() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ return m_nonzeroSingularValues;
+ }
+
+
+ /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
+ inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+ /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
+ inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
+
+ inline Index rows() const { return m_rows; }
+ inline Index cols() const { return m_cols; }
+
+
+protected:
+ // return true if already allocated
+ bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
+
+ MatrixUType m_matrixU;
+ MatrixVType m_matrixV;
+ SingularValuesType m_singularValues;
+ bool m_isInitialized, m_isAllocated;
+ bool m_computeFullU, m_computeThinU;
+ bool m_computeFullV, m_computeThinV;
+ unsigned int m_computationOptions;
+ Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+
+
+ /** \brief Default Constructor.
+ *
+ * Default constructor of SVDBase
+ */
+ SVDBase()
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {}
+
+
+};
+
+
+template<typename MatrixType>
+bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ eigen_assert(rows >= 0 && cols >= 0);
+
+ if (m_isAllocated &&
+ rows == m_rows &&
+ cols == m_cols &&
+ computationOptions == m_computationOptions)
+ {
+ return true;
+ }
+
+ m_rows = rows;
+ m_cols = cols;
+ m_isInitialized = false;
+ m_isAllocated = true;
+ m_computationOptions = computationOptions;
+ m_computeFullU = (computationOptions & ComputeFullU) != 0;
+ m_computeThinU = (computationOptions & ComputeThinU) != 0;
+ m_computeFullV = (computationOptions & ComputeFullV) != 0;
+ m_computeThinV = (computationOptions & ComputeThinV) != 0;
+ eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
+ eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
+ eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+ "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
+
+ m_diagSize = (std::min)(m_rows, m_cols);
+ m_singularValues.resize(m_diagSize);
+ if(RowsAtCompileTime==Dynamic)
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+ : m_computeThinU ? m_diagSize
+ : 0);
+ if(ColsAtCompileTime==Dynamic)
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+ : m_computeThinV ? m_diagSize
+ : 0);
+
+ return false;
+}
+
+}// end namespace
+
+#endif // EIGEN_SVD_H
diff --git a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
new file mode 100644
index 000000000..0bc9a46e6
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
@@ -0,0 +1,29 @@
+TO DO LIST
+
+
+
+(optional optimization) - do all the allocations in the allocate part
+ - support static matrices
+ - return a error at compilation time when using integer matrices (int, long, std::complex<int>, ...)
+
+to finish the algorithm :
+ -implement the last part of the algorithm as described on the reference paper.
+ You may find more information on that part on this paper
+
+ -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to
+ deflation.
+
+(suggested step by step resolution)
+ 0) comment the call to Jacobi in the last part of the divide method and everything right after
+ until the end of the method. What is commented can be a guideline to steps 3) 4) and 6)
+ 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation
+ wich should be uncommented in the divide method
+ 2) remember the values of the singular values that are already computed (zi=0)
+ 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) )
+ in decreasing order
+ 4) set the firstcol to zero (except the first element) in m_computed
+ 5) compute all the singular vectors when CompV is set to true and only the left vectors when
+ CompV is set to false
+ 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to
+ false, /!\ if CompU is false NaiveU has only 2 rows
+ 7) delete everything commented in step 0)
diff --git a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
new file mode 100644
index 000000000..6fb6b84a6
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
@@ -0,0 +1,21 @@
+This unsupported package is about a divide and conquer algorithm to compute SVD.
+
+The implementation follows as closely as possible the following reference paper :
+www.cs.yale.edu/publications/techreports/tr933.pdf‎
+
+The code documentation uses the same names for variables as the reference paper. The code, deflation included, is
+working but there are a few things that could be optimised as explained in the TODOBdsvd.
+
+In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call
+of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented.
+
+In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it.
+
+The implemented has trouble with fixed size matrices.
+
+In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix.
+
+
+Paper for the third part:
+http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
+
diff --git a/unsupported/bench/bench_svd.cpp b/unsupported/bench/bench_svd.cpp
new file mode 100644
index 000000000..01d8231ae
--- /dev/null
+++ b/unsupported/bench/bench_svd.cpp
@@ -0,0 +1,123 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/
+
+// Bench to compare the efficiency of SVD algorithms
+
+#include <iostream>
+#include <bench/BenchTimer.h>
+#include <unsupported/Eigen/SVD>
+
+
+using namespace Eigen;
+using namespace std;
+
+// number of computations of each algorithm before the print of the time
+#ifndef REPEAT
+#define REPEAT 10
+#endif
+
+// number of tests of the same type
+#ifndef NUMBER_SAMPLE
+#define NUMBER_SAMPLE 2
+#endif
+
+template<typename MatrixType>
+void bench_svd(const MatrixType& a = MatrixType())
+{
+ MatrixType m = MatrixType::Random(a.rows(), a.cols());
+ BenchTimer timerJacobi;
+ BenchTimer timerBDC;
+ timerJacobi.reset();
+ timerBDC.reset();
+
+ cout << " Only compute Singular Values" <<endl;
+ for (int k=1; k<=NUMBER_SAMPLE; ++k)
+ {
+ timerBDC.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ BDCSVD<MatrixType> bdc_matrix(m);
+ }
+ timerBDC.stop();
+
+ timerJacobi.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ JacobiSVD<MatrixType> jacobi_matrix(m);
+ }
+ timerJacobi.stop();
+
+
+ cout << "Sample " << k << " : " << REPEAT << " computations : Jacobi : " << fixed << timerJacobi.value() << "s ";
+ cout << " || " << " BDC : " << timerBDC.value() << "s " <<endl <<endl;
+
+ if (timerBDC.value() >= timerJacobi.value())
+ cout << "KO : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+ else
+ cout << "OK : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+
+ }
+ cout << " =================" <<endl;
+ std::cout<< std::endl;
+ timerJacobi.reset();
+ timerBDC.reset();
+ cout << " Computes rotaion matrix" <<endl;
+ for (int k=1; k<=NUMBER_SAMPLE; ++k)
+ {
+ timerBDC.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ BDCSVD<MatrixType> bdc_matrix(m, ComputeFullU|ComputeFullV);
+ }
+ timerBDC.stop();
+
+ timerJacobi.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ JacobiSVD<MatrixType> jacobi_matrix(m, ComputeFullU|ComputeFullV);
+ }
+ timerJacobi.stop();
+
+
+ cout << "Sample " << k << " : " << REPEAT << " computations : Jacobi : " << fixed << timerJacobi.value() << "s ";
+ cout << " || " << " BDC : " << timerBDC.value() << "s " <<endl <<endl;
+
+ if (timerBDC.value() >= timerJacobi.value())
+ cout << "KO : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+ else
+ cout << "OK : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+
+ }
+ std::cout<< std::endl;
+}
+
+
+
+int main(int argc, char* argv[])
+{
+ std::cout<< std::endl;
+
+ std::cout<<"On a (Dynamic, Dynamic) (6, 6) Matrix" <<std::endl;
+ bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(6, 6));
+
+ std::cout<<"On a (Dynamic, Dynamic) (32, 32) Matrix" <<std::endl;
+ bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(32, 32));
+
+ //std::cout<<"On a (Dynamic, Dynamic) (128, 128) Matrix" <<std::endl;
+ //bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(128, 128));
+
+ std::cout<<"On a (Dynamic, Dynamic) (160, 160) Matrix" <<std::endl;
+ bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(160, 160));
+
+ std::cout<< "--------------------------------------------------------------------"<< std::endl;
+
+}
diff --git a/unsupported/test/bdcsvd.cpp b/unsupported/test/bdcsvd.cpp
new file mode 100644
index 000000000..115a649b0
--- /dev/null
+++ b/unsupported/test/bdcsvd.cpp
@@ -0,0 +1,213 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/
+
+#include "svd_common.h"
+#include <iostream>
+#include <Eigen/LU>
+
+// check if "svd" is the good image of "m"
+template<typename MatrixType>
+void bdcsvd_check_full(const MatrixType& m, const BDCSVD<MatrixType>& svd)
+{
+ svd_check_full< MatrixType, BDCSVD< MatrixType > >(m, svd);
+}
+
+// Compare to a reference value
+template<typename MatrixType>
+void bdcsvd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const BDCSVD<MatrixType>& referenceSvd)
+{
+ svd_compare_to_full< MatrixType, BDCSVD< MatrixType > >(m, computationOptions, referenceSvd);
+} // end bdcsvd_compare_to_full
+
+
+template<typename MatrixType>
+void bdcsvd_solve(const MatrixType& m, unsigned int computationOptions)
+{
+ svd_solve< MatrixType, BDCSVD< MatrixType > >(m, computationOptions);
+} // end template bdcsvd_solve
+
+
+// test the computations options
+template<typename MatrixType>
+void bdcsvd_test_all_computation_options(const MatrixType& m)
+{
+ BDCSVD<MatrixType> fullSvd(m, ComputeFullU|ComputeFullV);
+ svd_test_computation_options_1< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
+ svd_test_computation_options_2< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
+} // end bdcsvd_test_all_computation_options
+
+
+// Call a test with all the computations options
+template<typename MatrixType>
+void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
+{
+ MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
+ bdcsvd_test_all_computation_options<MatrixType>(m);
+} // end template bdcsvd
+
+
+// verify assert
+template<typename MatrixType>
+void bdcsvd_verify_assert(const MatrixType& m)
+{
+ svd_verify_assert< MatrixType, BDCSVD< MatrixType > >(m);
+}// end template bdcsvd_verify_assert
+
+
+// test weird values
+template<typename MatrixType>
+void bdcsvd_inf_nan()
+{
+ svd_inf_nan< MatrixType, BDCSVD< MatrixType > >();
+}// end template bdcsvd_inf_nan
+
+
+
+void bdcsvd_preallocate()
+{
+ svd_preallocate< BDCSVD< MatrixXf > >();
+} // end bdcsvd_preallocate
+
+
+// compare the Singular values returned with Jacobi and Bdc
+template<typename MatrixType>
+void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0)
+{
+ std::cout << "debut compare" << std::endl;
+ MatrixType m = MatrixType::Random(a.rows(), a.cols());
+ BDCSVD<MatrixType> bdc_svd(m);
+ JacobiSVD<MatrixType> jacobi_svd(m);
+ VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues());
+ if(computationOptions & ComputeFullU)
+ VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
+ if(computationOptions & ComputeThinU)
+ VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
+ if(computationOptions & ComputeFullV)
+ VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
+ if(computationOptions & ComputeThinV)
+ VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
+ std::cout << "fin compare" << std::endl;
+} // end template compare_bdc_jacobi
+
+
+// call the tests
+void test_bdcsvd()
+{
+ // test of Dynamic defined Matrix (42, 42) of float
+ CALL_SUBTEST_11(( bdcsvd_verify_assert<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(42,42)) ));
+ CALL_SUBTEST_11(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(42,42), 0) ));
+ CALL_SUBTEST_11(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(42,42)) ));
+
+ // test of Dynamic defined Matrix (50, 50) of double
+ CALL_SUBTEST_13(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(50,50)) ));
+ CALL_SUBTEST_13(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(50,50), 0) ));
+ CALL_SUBTEST_13(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(50, 50)) ));
+
+ // test of Dynamic defined Matrix (22, 22) of complex double
+ CALL_SUBTEST_14(( bdcsvd_verify_assert<Matrix<std::complex<double>,Dynamic,Dynamic> >
+ (Matrix<std::complex<double>,Dynamic,Dynamic>(22,22)) ));
+ CALL_SUBTEST_14(( compare_bdc_jacobi<Matrix<std::complex<double>,Dynamic,Dynamic> >
+ (Matrix<std::complex<double>, Dynamic, Dynamic> (22,22), 0) ));
+ CALL_SUBTEST_14(( bdcsvd<Matrix<std::complex<double>,Dynamic,Dynamic> >
+ (Matrix<std::complex<double>,Dynamic,Dynamic>(22, 22)) ));
+
+ // test of Dynamic defined Matrix (10, 10) of int
+ //CALL_SUBTEST_15(( bdcsvd_verify_assert<Matrix<int,Dynamic,Dynamic> >
+ // (Matrix<int,Dynamic,Dynamic>(10,10)) ));
+ //CALL_SUBTEST_15(( compare_bdc_jacobi<Matrix<int,Dynamic,Dynamic> >
+ // (Matrix<int,Dynamic,Dynamic>(10,10), 0) ));
+ //CALL_SUBTEST_15(( bdcsvd<Matrix<int,Dynamic,Dynamic> >
+ // (Matrix<int,Dynamic,Dynamic>(10, 10)) ));
+
+
+ // test of Dynamic defined Matrix (8, 6) of double
+
+ CALL_SUBTEST_16(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(8,6)) ));
+ CALL_SUBTEST_16(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(8, 6), 0) ));
+ CALL_SUBTEST_16(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(8, 6)) ));
+
+
+
+ // test of Dynamic defined Matrix (36, 12) of float
+ CALL_SUBTEST_17(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(36, 12), 0) ));
+ CALL_SUBTEST_17(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(36, 12)) ));
+
+ // test of Dynamic defined Matrix (5, 8) of double
+ CALL_SUBTEST_18(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(5, 8), 0) ));
+ CALL_SUBTEST_18(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(5, 8)) ));
+
+
+ // non regression tests
+ CALL_SUBTEST_3(( bdcsvd_verify_assert(Matrix3f()) ));
+ CALL_SUBTEST_4(( bdcsvd_verify_assert(Matrix4d()) ));
+ CALL_SUBTEST_7(( bdcsvd_verify_assert(MatrixXf(10,12)) ));
+ CALL_SUBTEST_8(( bdcsvd_verify_assert(MatrixXcd(7,5)) ));
+
+ // SUBTESTS 1 and 2 on specifics matrix
+ for(int i = 0; i < g_repeat; i++) {
+ Matrix2cd m;
+ m << 0, 1,
+ 0, 1;
+ CALL_SUBTEST_1(( bdcsvd(m, false) ));
+ m << 1, 0,
+ 1, 0;
+ CALL_SUBTEST_1(( bdcsvd(m, false) ));
+
+ Matrix2d n;
+ n << 0, 0,
+ 0, 0;
+ CALL_SUBTEST_2(( bdcsvd(n, false) ));
+ n << 0, 0,
+ 0, 1;
+ CALL_SUBTEST_2(( bdcsvd(n, false) ));
+
+ // Statics matrix don't work with BDSVD yet
+ // bdc algo on a random 3x3 float matrix
+ // CALL_SUBTEST_3(( bdcsvd<Matrix3f>() ));
+ // bdc algo on a random 4x4 double matrix
+ // CALL_SUBTEST_4(( bdcsvd<Matrix4d>() ));
+ // bdc algo on a random 3x5 float matrix
+ // CALL_SUBTEST_5(( bdcsvd<Matrix<float,3,5> >() ));
+
+ int r = internal::random<int>(1, 30),
+ c = internal::random<int>(1, 30);
+ CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(r,c)) ));
+ CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(r,c)) ));
+ (void) r;
+ (void) c;
+
+ // Test on inf/nan matrix
+ CALL_SUBTEST_7( bdcsvd_inf_nan<MatrixXf>() );
+ }
+
+ CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
+ CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
+
+ // Test problem size constructors
+ CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) );
+
+} // end test_bdcsvd
diff --git a/unsupported/test/jacobisvd.cpp b/unsupported/test/jacobisvd.cpp
new file mode 100644
index 000000000..b4e884eee
--- /dev/null
+++ b/unsupported/test/jacobisvd.cpp
@@ -0,0 +1,198 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "svd_common.h"
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_check_full(const MatrixType& m, const JacobiSVD<MatrixType, QRPreconditioner>& svd)
+{
+ svd_check_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner > >(m, svd);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const JacobiSVD<MatrixType, QRPreconditioner>& referenceSvd)
+{
+ svd_compare_to_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner> >(m, computationOptions, referenceSvd);
+}
+
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
+{
+ svd_solve< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, computationOptions);
+}
+
+
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_test_all_computation_options(const MatrixType& m)
+{
+
+ if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
+ return;
+
+ JacobiSVD< MatrixType, QRPreconditioner > fullSvd(m, ComputeFullU|ComputeFullV);
+ svd_test_computation_options_1< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
+
+ if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ return;
+ svd_test_computation_options_2< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
+
+}
+
+template<typename MatrixType>
+void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
+{
+ MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
+
+ jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m);
+}
+
+
+template<typename MatrixType>
+void jacobisvd_verify_assert(const MatrixType& m)
+{
+
+ svd_verify_assert<MatrixType, JacobiSVD< MatrixType > >(m);
+
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ MatrixType a = MatrixType::Zero(rows, cols);
+ a.setZero();
+
+ if (ColsAtCompileTime == Dynamic)
+ {
+ JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> svd_fullqr;
+ VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV))
+ VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV))
+ VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV))
+ }
+}
+
+template<typename MatrixType>
+void jacobisvd_method()
+{
+ enum { Size = MatrixType::RowsAtCompileTime };
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef Matrix<RealScalar, Size, 1> RealVecType;
+ MatrixType m = MatrixType::Identity();
+ VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones());
+ VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU());
+ VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV());
+ VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
+}
+
+
+
+template<typename MatrixType>
+void jacobisvd_inf_nan()
+{
+ svd_inf_nan<MatrixType, JacobiSVD< MatrixType > >();
+}
+
+
+// Regression test for bug 286: JacobiSVD loops indefinitely with some
+// matrices containing denormal numbers.
+void jacobisvd_bug286()
+{
+#if defined __INTEL_COMPILER
+// shut up warning #239: floating point underflow
+#pragma warning push
+#pragma warning disable 239
+#endif
+ Matrix2d M;
+ M << -7.90884e-313, -4.94e-324,
+ 0, 5.60844e-313;
+#if defined __INTEL_COMPILER
+#pragma warning pop
+#endif
+ JacobiSVD<Matrix2d> svd;
+ svd.compute(M); // just check we don't loop indefinitely
+}
+
+
+void jacobisvd_preallocate()
+{
+ svd_preallocate< JacobiSVD <MatrixXf> >();
+}
+
+void test_jacobisvd()
+{
+ CALL_SUBTEST_11(( jacobisvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(16, 6)) ));
+
+ CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) ));
+ CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) ));
+ CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) ));
+ CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) ));
+
+ for(int i = 0; i < g_repeat; i++) {
+ Matrix2cd m;
+ m << 0, 1,
+ 0, 1;
+ CALL_SUBTEST_1(( jacobisvd(m, false) ));
+ m << 1, 0,
+ 1, 0;
+ CALL_SUBTEST_1(( jacobisvd(m, false) ));
+
+ Matrix2d n;
+ n << 0, 0,
+ 0, 0;
+ CALL_SUBTEST_2(( jacobisvd(n, false) ));
+ n << 0, 0,
+ 0, 1;
+ CALL_SUBTEST_2(( jacobisvd(n, false) ));
+
+ CALL_SUBTEST_3(( jacobisvd<Matrix3f>() ));
+ CALL_SUBTEST_4(( jacobisvd<Matrix4d>() ));
+ CALL_SUBTEST_5(( jacobisvd<Matrix<float,3,5> >() ));
+ CALL_SUBTEST_6(( jacobisvd<Matrix<double,Dynamic,2> >(Matrix<double,Dynamic,2>(10,2)) ));
+
+ int r = internal::random<int>(1, 30),
+ c = internal::random<int>(1, 30);
+ CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) ));
+ CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) ));
+ (void) r;
+ (void) c;
+
+ // Test on inf/nan matrix
+ CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() );
+ }
+
+ CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
+ CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
+
+
+ // test matrixbase method
+ CALL_SUBTEST_1(( jacobisvd_method<Matrix2cd>() ));
+ CALL_SUBTEST_3(( jacobisvd_method<Matrix3f>() ));
+
+
+ // Test problem size constructors
+ CALL_SUBTEST_7( JacobiSVD<MatrixXf>(10,10) );
+
+ // Check that preallocation avoids subsequent mallocs
+ CALL_SUBTEST_9( jacobisvd_preallocate() );
+
+ // Regression check for bug 286
+ CALL_SUBTEST_2( jacobisvd_bug286() );
+}
diff --git a/unsupported/test/svd_common.h b/unsupported/test/svd_common.h
new file mode 100644
index 000000000..b40c23a2b
--- /dev/null
+++ b/unsupported/test/svd_common.h
@@ -0,0 +1,261 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// discard stack allocation as that too bypasses malloc
+#define EIGEN_STACK_ALLOCATION_LIMIT 0
+#define EIGEN_RUNTIME_NO_MALLOC
+
+#include "main.h"
+#include <unsupported/Eigen/SVD>
+#include <Eigen/LU>
+
+
+// check if "svd" is the good image of "m"
+template<typename MatrixType, typename SVD>
+void svd_check_full(const MatrixType& m, const SVD& svd)
+{
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime> MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime> MatrixVType;
+
+
+ MatrixType sigma = MatrixType::Zero(rows, cols);
+ sigma.diagonal() = svd.singularValues().template cast<Scalar>();
+ MatrixUType u = svd.matrixU();
+ MatrixVType v = svd.matrixV();
+ VERIFY_IS_APPROX(m, u * sigma * v.adjoint());
+ VERIFY_IS_UNITARY(u);
+ VERIFY_IS_UNITARY(v);
+} // end svd_check_full
+
+
+
+// Compare to a reference value
+template<typename MatrixType, typename SVD>
+void svd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const SVD& referenceSvd)
+{
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+ Index diagSize = (std::min)(rows, cols);
+
+ SVD svd(m, computationOptions);
+
+ VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues());
+ if(computationOptions & ComputeFullU)
+ VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
+ if(computationOptions & ComputeThinU)
+ VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
+ if(computationOptions & ComputeFullV)
+ VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV());
+ if(computationOptions & ComputeThinV)
+ VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
+} // end svd_compare_to_full
+
+
+
+template<typename MatrixType, typename SVD>
+void svd_solve(const MatrixType& m, unsigned int computationOptions)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, Dynamic> RhsType;
+ typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
+
+ RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
+ SVD svd(m, computationOptions);
+ SolutionType x = svd.solve(rhs);
+ // evaluate normal equation which works also for least-squares solutions
+ VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
+} // end svd_solve
+
+
+// test computations options
+// 2 functions because Jacobisvd can return before the second function
+template<typename MatrixType, typename SVD>
+void svd_test_computation_options_1(const MatrixType& m, const SVD& fullSvd)
+{
+ svd_check_full< MatrixType, SVD >(m, fullSvd);
+ svd_solve< MatrixType, SVD >(m, ComputeFullU | ComputeFullV);
+}
+
+
+template<typename MatrixType, typename SVD>
+void svd_test_computation_options_2(const MatrixType& m, const SVD& fullSvd)
+{
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeFullV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, 0, fullSvd);
+
+ if (MatrixType::ColsAtCompileTime == Dynamic) {
+ // thin U/V are only available with dynamic number of columns
+
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU|ComputeThinV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeFullV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU , fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeThinV, fullSvd);
+ svd_solve<MatrixType, SVD>(m, ComputeFullU | ComputeThinV);
+ svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeFullV);
+ svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeThinV);
+
+ typedef typename MatrixType::Index Index;
+ Index diagSize = (std::min)(m.rows(), m.cols());
+ SVD svd(m, ComputeThinU | ComputeThinV);
+ VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
+ }
+}
+
+template<typename MatrixType, typename SVD>
+void svd_verify_assert(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, 1> RhsType;
+ RhsType rhs(rows);
+ SVD svd;
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.singularValues())
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ MatrixType a = MatrixType::Zero(rows, cols);
+ a.setZero();
+ svd.compute(a, 0);
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ svd.singularValues();
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+
+ if (ColsAtCompileTime == Dynamic)
+ {
+ svd.compute(a, ComputeThinU);
+ svd.matrixU();
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ svd.compute(a, ComputeThinV);
+ svd.matrixV();
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ }
+ else
+ {
+ VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU))
+ VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV))
+ }
+}
+
+// work around stupid msvc error when constructing at compile time an expression that involves
+// a division by zero, even if the numeric type has floating point
+template<typename Scalar>
+EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); }
+
+// workaround aggressive optimization in ICC
+template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
+
+
+template<typename MatrixType, typename SVD>
+void svd_inf_nan()
+{
+ // all this function does is verify we don't iterate infinitely on nan/inf values
+
+ SVD svd;
+ typedef typename MatrixType::Scalar Scalar;
+ Scalar some_inf = Scalar(1) / zero<Scalar>();
+ VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
+ svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
+
+ Scalar some_nan = zero<Scalar> () / zero<Scalar> ();
+ VERIFY(some_nan != some_nan);
+ svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV);
+
+ MatrixType m = MatrixType::Zero(10,10);
+ m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+
+ m = MatrixType::Zero(10,10);
+ m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_nan;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+}
+
+
+template<typename SVD>
+void svd_preallocate()
+{
+ Vector3f v(3.f, 2.f, 1.f);
+ MatrixXf m = v.asDiagonal();
+
+ internal::set_is_malloc_allowed(false);
+ VERIFY_RAISES_ASSERT(VectorXf v(10);)
+ SVD svd;
+ internal::set_is_malloc_allowed(true);
+ svd.compute(m);
+ VERIFY_IS_APPROX(svd.singularValues(), v);
+
+ SVD svd2(3,3);
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+ VERIFY_IS_APPROX(svd2.singularValues(), v);
+ VERIFY_RAISES_ASSERT(svd2.matrixU());
+ VERIFY_RAISES_ASSERT(svd2.matrixV());
+ svd2.compute(m, ComputeFullU | ComputeFullV);
+ VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
+ VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+
+ SVD svd3(3,3,ComputeFullU|ComputeFullV);
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+ VERIFY_IS_APPROX(svd2.singularValues(), v);
+ VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
+ VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m, ComputeFullU|ComputeFullV);
+ internal::set_is_malloc_allowed(true);
+}
+
+
+
+
+