aboutsummaryrefslogtreecommitdiffhomepage
path: root/third_party/eigen3/Eigen/src/SVD
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/eigen3/Eigen/src/SVD')
-rw-r--r--third_party/eigen3/Eigen/src/SVD/JacobiSVD.h960
-rw-r--r--third_party/eigen3/Eigen/src/SVD/JacobiSVD_MKL.h92
-rw-r--r--third_party/eigen3/Eigen/src/SVD/UpperBidiagonalization.h396
3 files changed, 1448 insertions, 0 deletions
diff --git a/third_party/eigen3/Eigen/src/SVD/JacobiSVD.h b/third_party/eigen3/Eigen/src/SVD/JacobiSVD.h
new file mode 100644
index 0000000000..d17d3a667d
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/SVD/JacobiSVD.h
@@ -0,0 +1,960 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBISVD_H
+#define EIGEN_JACOBISVD_H
+
+namespace Eigen {
+
+namespace internal {
+// forward declaration (needed by ICC)
+// the empty body is required by MSVC
+template<typename MatrixType, int QRPreconditioner,
+ bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct svd_precondition_2x2_block_to_be_real {};
+
+/*** QR preconditioners (R-SVD)
+ ***
+ *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
+ *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
+ *** JacobiSVD which by itself is only able to work on square matrices.
+ ***/
+
+enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_should_do_anything
+{
+ enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
+ b = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
+ ret = !( (QRPreconditioner == NoQRPreconditioner) ||
+ (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
+ (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
+ };
+};
+
+template<typename MatrixType, int QRPreconditioner, int Case,
+ bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
+> struct qr_preconditioner_impl {};
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
+ bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
+ {
+ return false;
+ }
+};
+
+/*** preconditioner using FullPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+ };
+ typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef FullPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ WorkspaceType m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ m_adjoint.resize(svd.cols(), svd.rows());
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+private:
+ typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using ColPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using HouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef HouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** 2x2 SVD implementation
+ ***
+ *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
+ ***/
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+};
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+ {
+ using std::sqrt;
+ Scalar z;
+ JacobiRotation<Scalar> rot;
+ RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
+ if(n==0)
+ {
+ z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.row(p) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+ if(work_matrix.coeff(q,q)!=Scalar(0))
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ else
+ z = Scalar(0);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ else
+ {
+ rot.c() = conj(work_matrix.coeff(p,p)) / n;
+ rot.s() = work_matrix.coeff(q,p) / n;
+ work_matrix.applyOnTheLeft(p,q,rot);
+ if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+ if(work_matrix.coeff(p,q) != Scalar(0))
+ {
+ Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.col(q) *= z;
+ if(svd.computeV()) svd.m_matrixV.col(q) *= z;
+ }
+ if(work_matrix.coeff(q,q) != Scalar(0))
+ {
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ }
+ }
+};
+
+template<typename MatrixType, typename RealScalar, typename Index>
+void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
+ JacobiRotation<RealScalar> *j_left,
+ JacobiRotation<RealScalar> *j_right)
+{
+ using std::sqrt;
+ using std::abs;
+ Matrix<RealScalar,2,2> m;
+ m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+ numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
+ JacobiRotation<RealScalar> rot1;
+ RealScalar t = m.coeff(0,0) + m.coeff(1,1);
+ RealScalar d = m.coeff(1,0) - m.coeff(0,1);
+ if(t == RealScalar(0))
+ {
+ rot1.c() = RealScalar(0);
+ rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+ }
+ else
+ {
+ RealScalar t2d2 = numext::hypot(t,d);
+ rot1.c() = abs(t)/t2d2;
+ rot1.s() = d/t2d2;
+ if(t<RealScalar(0))
+ rot1.s() = -rot1.s();
+ }
+ m.applyOnTheLeft(0,1,rot1);
+ j_right->makeJacobi(m,0,1);
+ *j_left = rot1 * j_right->transpose();
+}
+
+} // end namespace internal
+
+/** \ingroup SVD_Module
+ *
+ *
+ * \class JacobiSVD
+ *
+ * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+ * for the R-SVD step for non-square matrices. See discussion of possible values below.
+ *
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * Here's an example demonstrating basic usage:
+ * \include JacobiSVD_basic.cpp
+ * Output: \verbinclude JacobiSVD_basic.out
+ *
+ * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
+ * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
+ * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
+ * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ *
+ * The possible values for QRPreconditioner are:
+ * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
+ * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
+ * Contrary to other QRs, it doesn't allow computing thin unitaries.
+ * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+ * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+ * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
+ * process is more reliable than the optimized bidiagonal SVD iterations.
+ * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
+ * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+ * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
+ * if QR preconditioning is needed before applying it anyway.
+ *
+ * \sa MatrixBase::jacobiSvd()
+ */
+template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via JacobiSVD::compute(const MatrixType&).
+ */
+ JacobiSVD()
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_usePrescribedThreshold(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1), m_diagSize(0)
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa JacobiSVD()
+ */
+ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_usePrescribedThreshold(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_usePrescribedThreshold(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {
+ compute(matrix, computationOptions);
+ }
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ JacobiSVD& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, m_computationOptions);
+ }
+
+ /** \returns the \a U matrix.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
+ *
+ * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a U to be computed.
+ */
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?");
+ return m_matrixU;
+ }
+
+ /** \returns the \a V matrix.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
+ *
+ * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a V to be computed.
+ */
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?");
+ return m_matrixV;
+ }
+
+ /** \returns the vector of singular values.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
+ * returned vector has size \a m. Singular values are always sorted in decreasing order.
+ */
+ const SingularValuesType& singularValues() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ return m_singularValues;
+ }
+
+ /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
+ inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+ /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
+ inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<JacobiSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the number of singular values that are not exactly 0 */
+ Index nonzeroSingularValues() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ return m_nonzeroSingularValues;
+ }
+
+ /** \returns the rank of the matrix of which \c *this is the SVD.
+ *
+ * \note This method has to determine which singular values should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index rank() const
+ {
+ using std::abs;
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ if(m_singularValues.size()==0) return 0;
+ RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold();
+ Index i = m_nonzeroSingularValues-1;
+ while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
+ return i+1;
+ }
+
+ /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
+ * which need to determine when singular values are to be considered nonzero.
+ * This is not used for the SVD decomposition itself.
+ *
+ * When it needs to get the threshold value, Eigen calls threshold().
+ * The default is \c NumTraits<Scalar>::epsilon()
+ *
+ * \param threshold The new value to use as the threshold.
+ *
+ * A singular value will be considered nonzero if its value is strictly greater than
+ * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
+ *
+ * If you want to come back to the default behavior, call setThreshold(Default_t)
+ */
+ JacobiSVD& setThreshold(const RealScalar& threshold)
+ {
+ m_usePrescribedThreshold = true;
+ m_prescribedThreshold = threshold;
+ return *this;
+ }
+
+ /** Allows to come back to the default behavior, letting Eigen use its default formula for
+ * determining the threshold.
+ *
+ * You should pass the special object Eigen::Default as parameter here.
+ * \code svd.setThreshold(Eigen::Default); \endcode
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ JacobiSVD& setThreshold(Default_t)
+ {
+ m_usePrescribedThreshold = false;
+ return *this;
+ }
+
+ /** Returns the threshold that will be used by certain methods such as rank().
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ RealScalar threshold() const
+ {
+ eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ return m_usePrescribedThreshold ? m_prescribedThreshold
+ : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
+ }
+
+ inline Index rows() const { return m_rows; }
+ inline Index cols() const { return m_cols; }
+
+ private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+ protected:
+ MatrixUType m_matrixU;
+ MatrixVType m_matrixV;
+ SingularValuesType m_singularValues;
+ WorkMatrixType m_workMatrix;
+ bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
+ bool m_computeFullU, m_computeThinU;
+ bool m_computeFullV, m_computeThinV;
+ unsigned int m_computationOptions;
+ Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+ RealScalar m_prescribedThreshold;
+
+ template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
+ friend struct internal::svd_precondition_2x2_block_to_be_real;
+ template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
+ friend struct internal::qr_preconditioner_impl;
+
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
+};
+
+template<typename MatrixType, int QRPreconditioner>
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ eigen_assert(rows >= 0 && cols >= 0);
+
+ if (m_isAllocated &&
+ rows == m_rows &&
+ cols == m_cols &&
+ computationOptions == m_computationOptions)
+ {
+ return;
+ }
+
+ m_rows = rows;
+ m_cols = cols;
+ m_isInitialized = false;
+ m_isAllocated = true;
+ m_computationOptions = computationOptions;
+ m_computeFullU = (computationOptions & ComputeFullU) != 0;
+ m_computeThinU = (computationOptions & ComputeThinU) != 0;
+ m_computeFullV = (computationOptions & ComputeFullV) != 0;
+ m_computeThinV = (computationOptions & ComputeThinV) != 0;
+ eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
+ eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
+ eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+ "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
+ if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ {
+ eigen_assert(!(m_computeThinU || m_computeThinV) &&
+ "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
+ "Use the ColPivHouseholderQR preconditioner instead.");
+ }
+ m_diagSize = (std::min)(m_rows, m_cols);
+ m_singularValues.resize(m_diagSize);
+ if(RowsAtCompileTime==Dynamic)
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+ : m_computeThinU ? m_diagSize
+ : 0);
+ if(ColsAtCompileTime==Dynamic)
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+ : m_computeThinV ? m_diagSize
+ : 0);
+ m_workMatrix.resize(m_diagSize, m_diagSize);
+
+ if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
+ if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+JacobiSVD<MatrixType, QRPreconditioner>&
+JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ using std::abs;
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+
+ // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
+ // only worsening the precision of U and V as we accumulate more rotations
+ const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
+
+ // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
+ const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
+
+ /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
+
+ if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
+ {
+ m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize);
+ if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
+ if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
+ if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
+ if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
+ }
+
+ // Scaling factor to reducover/under-flows
+ RealScalar scale = m_workMatrix.cwiseAbs().maxCoeff();
+ if(scale==RealScalar(0)) scale = RealScalar(1);
+ m_workMatrix /= scale;
+
+ /*** step 2. The main Jacobi SVD iteration. ***/
+
+ bool finished = false;
+ while(!finished)
+ {
+ finished = true;
+
+ // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
+
+ for(Index p = 1; p < m_diagSize; ++p)
+ {
+ for(Index q = 0; q < p; ++q)
+ {
+ // if this 2x2 sub-matrix is not diagonal already...
+ // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
+ // keep us iterating forever. Similarly, small denormal numbers are considered zero.
+ RealScalar threshold = numext::maxi(considerAsZero, precision * numext::maxi(abs(m_workMatrix.coeff(p,p)),
+ abs(m_workMatrix.coeff(q,q))));
+ if(numext::maxi(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
+ {
+ finished = false;
+
+ // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
+ internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
+ JacobiRotation<RealScalar> j_left, j_right;
+ internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+ // accumulate resulting Jacobi rotations
+ m_workMatrix.applyOnTheLeft(p,q,j_left);
+ if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+ m_workMatrix.applyOnTheRight(p,q,j_right);
+ if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
+ }
+ }
+ }
+ }
+
+ /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
+
+ for(Index i = 0; i < m_diagSize; ++i)
+ {
+ RealScalar a = abs(m_workMatrix.coeff(i,i));
+ m_singularValues.coeffRef(i) = a;
+ if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
+ }
+
+ m_singularValues *= scale;
+
+ /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
+
+ m_nonzeroSingularValues = m_diagSize;
+ for(Index i = 0; i < m_diagSize; i++)
+ {
+ Index pos;
+ RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
+ if(maxRemainingSingularValue == RealScalar(0))
+ {
+ m_nonzeroSingularValues = i;
+ break;
+ }
+ if(pos)
+ {
+ pos += i;
+ std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
+ if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
+ if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
+ }
+ }
+
+ m_isInitialized = true;
+ return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int QRPreconditioner, typename Rhs>
+struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+ : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+{
+ typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+
+ // A = U S V^*
+ // So A^{-1} = V S^{-1} U^*
+
+ Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
+ Index rank = dec().rank();
+
+ tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs();
+ tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp;
+ dst = dec().matrixV().leftCols(rank) * tmp;
+ }
+};
+} // end namespace internal
+
+#ifndef __CUDACC__
+/** \svd_module
+ *
+ * \return the singular value decomposition of \c *this computed by two-sided
+ * Jacobi transformations.
+ *
+ * \sa class JacobiSVD
+ */
+template<typename Derived>
+JacobiSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
+{
+ return JacobiSVD<PlainObject>(*this, computationOptions);
+}
+#endif // __CUDACC__
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_H
diff --git a/third_party/eigen3/Eigen/src/SVD/JacobiSVD_MKL.h b/third_party/eigen3/Eigen/src/SVD/JacobiSVD_MKL.h
new file mode 100644
index 0000000000..decda75405
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/SVD/JacobiSVD_MKL.h
@@ -0,0 +1,92 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Singular Value Decomposition - SVD.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_JACOBISVD_MKL_H
+#define EIGEN_JACOBISVD_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_SVD(EIGTYPE, MKLTYPE, MKLRTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \
+template<> inline \
+JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
+JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
+{ \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
+ typedef MatrixType::Scalar Scalar; \
+ typedef MatrixType::RealScalar RealScalar; \
+ allocate(matrix.rows(), matrix.cols(), computationOptions); \
+\
+ /*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
+ m_nonzeroSingularValues = m_diagSize; \
+\
+ lapack_int lda = matrix.outerStride(), ldu, ldvt; \
+ lapack_int matrix_order = MKLCOLROW; \
+ char jobu, jobvt; \
+ MKLTYPE *u, *vt, dummy; \
+ jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
+ jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
+ if (computeU()) { \
+ ldu = m_matrixU.outerStride(); \
+ u = (MKLTYPE*)m_matrixU.data(); \
+ } else { ldu=1; u=&dummy; }\
+ MatrixType localV; \
+ ldvt = (m_computeFullV) ? m_cols : (m_computeThinV) ? m_diagSize : 1; \
+ if (computeV()) { \
+ localV.resize(ldvt, m_cols); \
+ vt = (MKLTYPE*)localV.data(); \
+ } else { ldvt=1; vt=&dummy; }\
+ Matrix<MKLRTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
+ MatrixType m_temp; m_temp = matrix; \
+ LAPACKE_##MKLPREFIX##gesvd( matrix_order, jobu, jobvt, m_rows, m_cols, (MKLTYPE*)m_temp.data(), lda, (MKLRTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
+ if (computeV()) m_matrixV = localV.adjoint(); \
+ /* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \
+ m_isInitialized = true; \
+ return *this; \
+}
+
+EIGEN_MKL_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(dcomplex, MKL_Complex16, double, z, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(scomplex, MKL_Complex8, float , c, ColMajor, LAPACK_COL_MAJOR)
+
+EIGEN_MKL_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(dcomplex, MKL_Complex16, double, z, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(scomplex, MKL_Complex8, float , c, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_MKL_H
diff --git a/third_party/eigen3/Eigen/src/SVD/UpperBidiagonalization.h b/third_party/eigen3/Eigen/src/SVD/UpperBidiagonalization.h
new file mode 100644
index 0000000000..40067682c9
--- /dev/null
+++ b/third_party/eigen3/Eigen/src/SVD/UpperBidiagonalization.h
@@ -0,0 +1,396 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BIDIAGONALIZATION_H
+#define EIGEN_BIDIAGONALIZATION_H
+
+namespace Eigen {
+
+namespace internal {
+// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
+// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
+
+template<typename _MatrixType> class UpperBidiagonalization
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
+ typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
+ typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType;
+ typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
+ typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
+ typedef HouseholderSequence<
+ const MatrixType,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Diagonal<const MatrixType,0> >
+ > HouseholderUSequenceType;
+ typedef HouseholderSequence<
+ const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
+ Diagonal<const MatrixType,1>,
+ OnTheRight
+ > HouseholderVSequenceType;
+
+ /**
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via Bidiagonalization::compute(const MatrixType&).
+ */
+ UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
+
+ UpperBidiagonalization(const MatrixType& matrix)
+ : m_householder(matrix.rows(), matrix.cols()),
+ m_bidiagonal(matrix.cols(), matrix.cols()),
+ m_isInitialized(false)
+ {
+ compute(matrix);
+ }
+
+ UpperBidiagonalization& compute(const MatrixType& matrix);
+ UpperBidiagonalization& computeUnblocked(const MatrixType& matrix);
+
+ const MatrixType& householder() const { return m_householder; }
+ const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
+
+ const HouseholderUSequenceType householderU() const
+ {
+ eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
+ return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
+ }
+
+ const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
+ {
+ eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
+ return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
+ .setLength(m_householder.cols()-1)
+ .setShift(1);
+ }
+
+ protected:
+ MatrixType m_householder;
+ BidiagonalType m_bidiagonal;
+ bool m_isInitialized;
+};
+
+// Standard upper bidiagonalization without fancy optimizations
+// This version should be faster for small matrix size
+template<typename MatrixType>
+void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
+ typename MatrixType::RealScalar *diagonal,
+ typename MatrixType::RealScalar *upper_diagonal,
+ typename MatrixType::Scalar* tempData = 0)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+
+ Index rows = mat.rows();
+ Index cols = mat.cols();
+
+ typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;
+ TempType tempVector;
+ if(tempData==0)
+ {
+ tempVector.resize(rows);
+ tempData = tempVector.data();
+ }
+
+ for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
+ {
+ Index remainingRows = rows - k;
+ Index remainingCols = cols - k - 1;
+
+ // construct left householder transform in-place in A
+ mat.col(k).tail(remainingRows)
+ .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
+ // apply householder transform to remaining part of A on the left
+ mat.bottomRightCorner(remainingRows, remainingCols)
+ .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);
+
+ if(k == cols-1) break;
+
+ // construct right householder transform in-place in mat
+ mat.row(k).tail(remainingCols)
+ .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
+ // apply householder transform to remaining part of mat on the left
+ mat.bottomRightCorner(remainingRows-1, remainingCols)
+ .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
+ }
+}
+
+/** \internal
+ * Helper routine for the block reduction to upper bidiagonal form.
+ *
+ * Let's partition the matrix A:
+ *
+ * | A00 A01 |
+ * A = | |
+ * | A10 A11 |
+ *
+ * This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10]
+ * and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11
+ * is updated using matrix-matrix products:
+ * A22 -= V * Y^T - X * U^T
+ * where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01
+ * respectively, and the update matrices X and Y are computed during the reduction.
+ *
+ */
+template<typename MatrixType>
+void upperbidiagonalization_blocked_helper(MatrixType& A,
+ typename MatrixType::RealScalar *diagonal,
+ typename MatrixType::RealScalar *upper_diagonal,
+ typename MatrixType::Index bs,
+ Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic> > X,
+ Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic> > Y)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef Ref<Matrix<Scalar, Dynamic, 1> > SubColumnType;
+ typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, InnerStride<> > SubRowType;
+ typedef Ref<Matrix<Scalar, Dynamic, Dynamic> > SubMatType;
+
+ Index brows = A.rows();
+ Index bcols = A.cols();
+
+ Scalar tau_u, tau_u_prev(0), tau_v;
+
+ for(Index k = 0; k < bs; ++k)
+ {
+ Index remainingRows = brows - k;
+ Index remainingCols = bcols - k - 1;
+
+ SubMatType X_k1( X.block(k,0, remainingRows,k) );
+ SubMatType V_k1( A.block(k,0, remainingRows,k) );
+
+ // 1 - update the k-th column of A
+ SubColumnType v_k = A.col(k).tail(remainingRows);
+ v_k -= V_k1 * Y.row(k).head(k).adjoint();
+ if(k) v_k -= X_k1 * A.col(k).head(k);
+
+ // 2 - construct left Householder transform in-place
+ v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
+
+ if(k+1<bcols)
+ {
+ SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) );
+ SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
+
+ // this eases the application of Householder transforAions
+ // A(k,k) will store tau_v later
+ A(k,k) = Scalar(1);
+
+ // 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
+ {
+ SubColumnType y_k( Y.col(k).tail(remainingCols) );
+
+ // let's use the begining of column k of Y as a temporary vector
+ SubColumnType tmp( Y.col(k).head(k) );
+ y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
+ tmp.noalias() = V_k1.adjoint() * v_k;
+ y_k.noalias() -= Y_k.leftCols(k) * tmp;
+ tmp.noalias() = X_k1.adjoint() * v_k;
+ y_k.noalias() -= U_k1.adjoint() * tmp;
+ y_k *= numext::conj(tau_v);
+ }
+
+ // 4 - update k-th row of A (it will become u_k)
+ SubRowType u_k( A.row(k).tail(remainingCols) );
+ u_k = u_k.conjugate();
+ {
+ u_k -= Y_k * A.row(k).head(k+1).adjoint();
+ if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
+ }
+
+ // 5 - construct right Householder transform in-placecols
+ u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);
+
+ // this eases the application of Householder transforAions
+ // A(k,k+1) will store tau_u later
+ A(k,k+1) = Scalar(1);
+
+ // 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
+ {
+ SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
+
+ // let's use the begining of column k of X as a temporary vectors
+ // note that tmp0 and tmp1 overlaps
+ SubColumnType tmp0 ( X.col(k).head(k) ),
+ tmp1 ( X.col(k).head(k+1) );
+
+ x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
+ tmp0.noalias() = U_k1 * u_k.transpose();
+ x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0;
+ tmp1.noalias() = Y_k.adjoint() * u_k.transpose();
+ x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
+ x_k *= numext::conj(tau_u);
+ tau_u = numext::conj(tau_u);
+ u_k = u_k.conjugate();
+ }
+
+ if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
+ tau_u_prev = tau_u;
+ }
+ else
+ A.coeffRef(k-1,k) = tau_u_prev;
+
+ A.coeffRef(k,k) = tau_v;
+ }
+
+ if(bs<bcols)
+ A.coeffRef(bs-1,bs) = tau_u_prev;
+
+ // update A22
+ if(bcols>bs && brows>bs)
+ {
+ SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
+ SubMatType A10( A.block(bs,0, brows-bs,bs) );
+ SubMatType A01( A.block(0,bs, bs,bcols-bs) );
+ Scalar tmp = A01(bs-1,0);
+ A01(bs-1,0) = 1;
+ A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
+ A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
+ A01(bs-1,0) = tmp;
+ }
+}
+
+/** \internal
+ *
+ * Implementation of a block-bidiagonal reduction.
+ * It is based on the following paper:
+ * The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form.
+ * by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995)
+ * section 3.3
+ */
+template<typename MatrixType, typename BidiagType>
+void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal,
+ typename MatrixType::Index maxBlockSize=32,
+ typename MatrixType::Scalar* /*tempData*/ = 0)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
+
+ Index rows = A.rows();
+ Index cols = A.cols();
+ Index size = (std::min)(rows, cols);
+
+ Matrix<Scalar,MatrixType::RowsAtCompileTime,Dynamic,ColMajor,MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
+ Matrix<Scalar,MatrixType::ColsAtCompileTime,Dynamic,ColMajor,MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
+ Index blockSize = (std::min)(maxBlockSize,size);
+
+ Index k = 0;
+ for(k = 0; k < size; k += blockSize)
+ {
+ Index bs = (std::min)(size-k,blockSize); // actual size of the block
+ Index brows = rows - k; // rows of the block
+ Index bcols = cols - k; // columns of the block
+
+ // partition the matrix A:
+ //
+ // | A00 A01 A02 |
+ // | |
+ // A = | A10 A11 A12 |
+ // | |
+ // | A20 A21 A22 |
+ //
+ // where A11 is a bs x bs diagonal block,
+ // and let:
+ // | A11 A12 |
+ // B = | |
+ // | A21 A22 |
+
+ BlockType B = A.block(k,k,brows,bcols);
+
+ // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
+ // Finally, the algorithm continue on the updated A22.
+ //
+ // However, if B is too small, or A22 empty, then let's use an unblocked strategy
+ if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
+ {
+ upperbidiagonalization_inplace_unblocked(B,
+ &(bidiagonal.template diagonal<0>().coeffRef(k)),
+ &(bidiagonal.template diagonal<1>().coeffRef(k)),
+ X.data()
+ );
+ break; // We're done
+ }
+ else
+ {
+ upperbidiagonalization_blocked_helper<BlockType>( B,
+ &(bidiagonal.template diagonal<0>().coeffRef(k)),
+ &(bidiagonal.template diagonal<1>().coeffRef(k)),
+ bs,
+ X.topLeftCorner(brows,bs),
+ Y.topLeftCorner(bcols,bs)
+ );
+ }
+ }
+}
+
+template<typename _MatrixType>
+UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix)
+{
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+
+ eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
+
+ m_householder = matrix;
+
+ ColVectorType temp(rows);
+
+ upperbidiagonalization_inplace_unblocked(m_householder,
+ &(m_bidiagonal.template diagonal<0>().coeffRef(0)),
+ &(m_bidiagonal.template diagonal<1>().coeffRef(0)),
+ temp.data());
+
+ m_isInitialized = true;
+ return *this;
+}
+
+template<typename _MatrixType>
+UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
+{
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+
+ eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
+
+ m_householder = matrix;
+ upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal);
+
+ m_isInitialized = true;
+ return *this;
+}
+
+#if 0
+/** \return the Householder QR decomposition of \c *this.
+ *
+ * \sa class Bidiagonalization
+ */
+template<typename Derived>
+const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::bidiagonalization() const
+{
+ return UpperBidiagonalization<PlainObject>(eval());
+}
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BIDIAGONALIZATION_H