aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2010-10-08 10:42:40 -0400
committerGravatar Benoit Jacob <jacob.benoit.1@gmail.com>2010-10-08 10:42:40 -0400
commit8ba8d90063ab6b297b1ba912f806eaa71cbdf003 (patch)
tree75f215f5b1659294bcf829ae0665c43b11b04fa0
parent6fad2eb97be8fa187b332657a104347dd4d3da9a (diff)
add option to compute thin U/V.
By default nothing is computed. You have to ask explicitly for thin/full U/V if you want them.
-rw-r--r--Eigen/src/Core/util/Constants.h8
-rw-r--r--Eigen/src/SVD/JacobiSVD.h141
-rw-r--r--test/jacobisvd.cpp105
3 files changed, 186 insertions, 68 deletions
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h
index aacde6465..60da5c76a 100644
--- a/Eigen/src/Core/util/Constants.h
+++ b/Eigen/src/Core/util/Constants.h
@@ -242,10 +242,12 @@ enum AccessorLevels {
enum DecompositionOptions {
Pivoting = 0x01, // LDLT,
NoPivoting = 0x02, // LDLT,
- ComputeU = 0x10, // SVD,
- ComputeV = 0x20, // SVD,
+ ComputeFullU = 0x04, // SVD,
+ ComputeThinU = 0x08, // SVD,
+ ComputeFullV = 0x10, // SVD,
+ ComputeThinV = 0x20, // SVD,
EigenvaluesOnly = 0x40, // all eigen solvers
- ComputeEigenvectors = 0x80, // all eigen solvers
+ ComputeEigenvectors = 0x80, // all eigen solvers
EigVecMask = EigenvaluesOnly | ComputeEigenvectors,
Ax_lBx = 0x100,
ABx_lx = 0x200,
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 60b4c6353..bce7719bf 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -81,10 +81,12 @@ struct ei_qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner,
{
if(matrix.rows() > matrix.cols())
{
+ ei_assert(!svd.m_computeThinU && "JacobiSVD: can't compute a thin U with the FullPivHouseholderQR preconditioner. "
+ "Use the ColPivHouseholderQR preconditioner instead.");
FullPivHouseholderQR<MatrixType> qr(matrix);
svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeU) svd.m_matrixU = qr.matrixQ();
- if(svd.m_computeV) svd.m_matrixV = qr.colsPermutation();
+ if(svd.m_computeFullU) svd.m_matrixU = qr.matrixQ();
+ if(svd.computeV()) svd.m_matrixV = qr.colsPermutation();
return true;
}
return false;
@@ -98,13 +100,15 @@ struct ei_qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner,
{
if(matrix.cols() > matrix.rows())
{
+ ei_assert(!svd.m_computeThinV && "JacobiSVD: can't compute a thin V with the FullPivHouseholderQR preconditioner. "
+ "Use the ColPivHouseholderQR preconditioner instead.");
typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime,
MatrixType::Options, MatrixType::MaxColsAtCompileTime, MatrixType::MaxRowsAtCompileTime>
TransposeTypeWithSameStorageOrder;
FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> qr(matrix.adjoint());
svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeV) svd.m_matrixV = qr.matrixQ();
- if(svd.m_computeU) svd.m_matrixU = qr.colsPermutation();
+ if(svd.m_computeFullV) svd.m_matrixV = qr.matrixQ();
+ if(svd.computeU()) svd.m_matrixU = qr.colsPermutation();
return true;
}
else return false;
@@ -120,8 +124,12 @@ struct ei_qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner,
{
ColPivHouseholderQR<MatrixType> qr(matrix);
svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeU) svd.m_matrixU = qr.householderQ();
- if(svd.m_computeV) svd.m_matrixV = qr.colsPermutation();
+ if(svd.m_computeFullU) svd.m_matrixU = qr.householderQ();
+ else if(svd.m_computeThinU) {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixU);
+ }
+ if(svd.computeV()) svd.m_matrixV = qr.colsPermutation();
return true;
}
return false;
@@ -140,8 +148,12 @@ struct ei_qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner,
TransposeTypeWithSameStorageOrder;
ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> qr(matrix.adjoint());
svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeV) svd.m_matrixV = qr.householderQ();
- if(svd.m_computeU) svd.m_matrixU = qr.colsPermutation();
+ if(svd.m_computeFullV) svd.m_matrixV = qr.householderQ();
+ else if(svd.m_computeThinV) {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixV);
+ }
+ if(svd.computeU()) svd.m_matrixU = qr.colsPermutation();
return true;
}
else return false;
@@ -157,8 +169,12 @@ struct ei_qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, Precon
{
HouseholderQR<MatrixType> qr(matrix);
svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeU) svd.m_matrixU = qr.householderQ();
- if(svd.m_computeV) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+ if(svd.m_computeFullU) svd.m_matrixU = qr.householderQ();
+ else if(svd.m_computeThinU) {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixU);
+ }
+ if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
return true;
}
return false;
@@ -177,8 +193,12 @@ struct ei_qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, Precon
TransposeTypeWithSameStorageOrder;
HouseholderQR<TransposeTypeWithSameStorageOrder> qr(matrix.adjoint());
svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeV) svd.m_matrixV = qr.householderQ();
- if(svd.m_computeU) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+ if(svd.m_computeFullV) svd.m_matrixV = qr.householderQ();
+ else if(svd.m_computeThinV) {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixV);
+ }
+ if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
return true;
}
else return false;
@@ -195,13 +215,21 @@ struct ei_qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, Precon
* \brief Jacobi SVD decomposition of a square matrix
*
* \param MatrixType the type of the matrix of which we are computing the SVD decomposition
- * \param QRPreconditioner the type of QR decomposition that will be used internally for the R-SVD step
- * for non-square matrices. The default, FullPivHouseholderQR, is safest but slow.
- * Consider using ColPivHouseholderQR instead of greater speed while still being
- * quite safe, or even HouseholderQR to get closer to the speed and unsafety of
- * bidiagonalizing SVD implementations. Finally, if you don't need to handle non-square matrices,
- * you don't need any QR decomposition; you can then pass the dummy type NoQRDecomposition,
- * which will result in smaller executable size and shorter compilation times.
+ * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+ * for the R-SVD step for non-square matrices. See discussion of possible values below.
+ *
+ * The possible values for QRPreconditioner are:
+ * \li FullPivHouseholderQRPreconditioner (the default), is the safest and slowest. It uses full-pivoting QR.
+ * We make it the default so that JacobiSVD is guaranteed to be entirely, uncompromisingly safe by default.
+ * Contrary to other QRs, it doesn't allow computing thin unitaries.
+ * \li ColPivHouseholderQRPreconditioner is faster, and in practice still very safe, although theoretically not as safe as the default
+ * full-pivoting preconditioner. It uses column-pivoting QR.
+ * \li HouseholderQRPreconditioner is even faster, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+ * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+ * is inherently non-pivoting).
+ * \li NoQRPreconditioner allows to not use a QR preconditioner at all. This is useful if you know that you will only be computing
+ * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+ * faster compilation and smaller executable code.
*
* \sa MatrixBase::jacobiSvd()
*/
@@ -256,6 +284,21 @@ template<typename MatrixType, int QRPreconditioner> class JacobiSVD
m_workMatrix(rows, cols),
m_isInitialized(false) {}
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are ComputeFullU, ComputeThinU,
+ * ComputeFullV, ComputeThinV.
+ *
+ * Thin unitaries are not available with the default FullPivHouseholderQRPreconditioner, see class documentation for details.
+ * If you want thin unitaries, use another preconditioner, for example:
+ * \code
+ * JacobiSVD<MatrixXf, ColPivHouseholderQRPreconditioner> svd(matrix, ComputeThinU);
+ * \endcode
+ *
+ * Thin unitaries also are only available if your matrix type has a Dynamic number of columns (for example MatrixXf).
+ */
JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
: m_matrixU(matrix.rows(), matrix.rows()),
m_matrixV(matrix.cols(), matrix.cols()),
@@ -269,11 +312,27 @@ template<typename MatrixType, int QRPreconditioner> class JacobiSVD
compute(matrix, computationOptions);
}
+ /** \brief Method performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are ComputeFullU, ComputeThinU,
+ * ComputeFullV, ComputeThinV.
+ *
+ * Thin unitaries are not available with the default FullPivHouseholderQRPreconditioner, see class documentation for details.
+ * If you want thin unitaries, use another preconditioner, for example:
+ * \code
+ * JacobiSVD<MatrixXf, ColPivHouseholderQRPreconditioner> svd(matrix, ComputeThinU);
+ * \endcode
+ *
+ * Thin unitaries also are only available if your matrix type has a Dynamic number of columns (for example MatrixXf).
+ */
JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions = 0);
const MatrixUType& matrixU() const
{
ei_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ ei_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?");
return m_matrixU;
}
@@ -286,15 +345,21 @@ template<typename MatrixType, int QRPreconditioner> class JacobiSVD
const MatrixVType& matrixV() const
{
ei_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ ei_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?");
return m_matrixV;
}
+ inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+ inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
protected:
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
WorkMatrixType m_workMatrix;
- bool m_isInitialized, m_computeU, m_computeV;
+ bool m_isInitialized;
+ bool m_computeFullU, m_computeThinU;
+ bool m_computeFullV, m_computeThinV;
template<typename _MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct ei_svd_precondition_2x2_block_to_be_real;
@@ -326,28 +391,28 @@ struct ei_svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, tr
{
z = ei_abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
- if(svd.m_computeU) svd.m_matrixU.col(p) *= ei_conj(z);
+ if(svd.computeU()) svd.m_matrixU.col(p) *= ei_conj(z);
z = ei_abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
- if(svd.m_computeU) svd.m_matrixU.col(q) *= ei_conj(z);
+ if(svd.computeU()) svd.m_matrixU.col(q) *= ei_conj(z);
}
else
{
rot.c() = ei_conj(work_matrix.coeff(p,p)) / n;
rot.s() = work_matrix.coeff(q,p) / n;
work_matrix.applyOnTheLeft(p,q,rot);
- if(svd.m_computeU) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+ if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
if(work_matrix.coeff(p,q) != Scalar(0))
{
Scalar z = ei_abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.col(q) *= z;
- if(svd.m_computeV) svd.m_matrixV.col(q) *= z;
+ if(svd.computeV()) svd.m_matrixV.col(q) *= z;
}
if(work_matrix.coeff(q,q) != Scalar(0))
{
z = ei_abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
- if(svd.m_computeU) svd.m_matrixU.col(q) *= ei_conj(z);
+ if(svd.computeU()) svd.m_matrixU.col(q) *= ei_conj(z);
}
}
}
@@ -384,8 +449,14 @@ template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
- m_computeU = computationOptions & ComputeU;
- m_computeV = computationOptions & ComputeV;
+ m_computeFullU = computationOptions & ComputeFullU;
+ m_computeThinU = computationOptions & ComputeThinU;
+ m_computeFullV = computationOptions & ComputeFullV;
+ m_computeThinV = computationOptions & ComputeThinV;
+ ei_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
+ ei_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
+ ei_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+ "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
Index rows = matrix.rows();
Index cols = matrix.cols();
Index diagSize = std::min(rows, cols);
@@ -396,8 +467,10 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
&& !ei_qr_preconditioner_impl<MatrixType, QRPreconditioner, PreconditionIfMoreRowsThanCols>::run(*this, matrix))
{
m_workMatrix = matrix.block(0,0,diagSize,diagSize);
- if(m_computeU) m_matrixU.setIdentity(rows,rows);
- if(m_computeV) m_matrixV.setIdentity(cols,cols);
+ if(m_computeFullU) m_matrixU.setIdentity(rows,rows);
+ if(m_computeThinU) m_matrixU.setIdentity(rows,diagSize);
+ if(m_computeFullV) m_matrixV.setIdentity(cols,cols);
+ if(m_computeThinV) m_matrixV.setIdentity(diagSize,cols);
}
bool finished = false;
@@ -418,10 +491,10 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
ei_real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
m_workMatrix.applyOnTheLeft(p,q,j_left);
- if(m_computeU) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+ if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
m_workMatrix.applyOnTheRight(p,q,j_right);
- if(m_computeV) m_matrixV.applyOnTheRight(p,q,j_right);
+ if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
}
}
}
@@ -431,7 +504,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
{
RealScalar a = ei_abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = a;
- if(m_computeU && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
+ if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
}
for(Index i = 0; i < diagSize; i++)
@@ -442,8 +515,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
{
pos += i;
std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
- if(m_computeU) m_matrixU.col(pos).swap(m_matrixU.col(i));
- if(m_computeV) m_matrixV.col(pos).swap(m_matrixV.col(i));
+ if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
+ if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
}
}
diff --git a/test/jacobisvd.cpp b/test/jacobisvd.cpp
index 56b91c983..32b25f9d2 100644
--- a/test/jacobisvd.cpp
+++ b/test/jacobisvd.cpp
@@ -28,7 +28,7 @@
#include <Eigen/LU>
template<typename MatrixType, int QRPreconditioner>
-void svd_with_qr_preconditioner(const MatrixType& m = MatrixType(), bool pickrandom = true)
+void jacobisvd_check_full(const MatrixType& m, const JacobiSVD<MatrixType, QRPreconditioner>& svd)
{
typedef typename MatrixType::Index Index;
Index rows = m.rows();
@@ -46,33 +46,76 @@ void svd_with_qr_preconditioner(const MatrixType& m = MatrixType(), bool pickran
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
typedef Matrix<Scalar, ColsAtCompileTime, 1> InputVectorType;
- MatrixType a;
- if(pickrandom) a = MatrixType::Random(rows,cols);
- else a = m;
-
- JacobiSVD<MatrixType, QRPreconditioner> svd(a, ComputeU|ComputeV);
MatrixType sigma = MatrixType::Zero(rows,cols);
sigma.diagonal() = svd.singularValues().template cast<Scalar>();
MatrixUType u = svd.matrixU();
MatrixVType v = svd.matrixV();
- //std::cout << "a\n" << a << std::endl;
- //std::cout << "b\n" << u * sigma * v.adjoint() << std::endl;
-
- VERIFY_IS_APPROX(a, u * sigma * v.adjoint());
+ VERIFY_IS_APPROX(m, u * sigma * v.adjoint());
VERIFY_IS_UNITARY(u);
VERIFY_IS_UNITARY(v);
}
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const JacobiSVD<MatrixType, QRPreconditioner>& referenceSvd)
+{
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+ Index diagSize = std::min(rows, cols);
+
+ JacobiSVD<MatrixType, QRPreconditioner> svd(m, computationOptions);
+
+ VERIFY_IS_EQUAL(svd.singularValues(), referenceSvd.singularValues());
+ if(computationOptions & ComputeFullU)
+ VERIFY_IS_EQUAL(svd.matrixU(), referenceSvd.matrixU());
+ if(computationOptions & ComputeThinU)
+ VERIFY_IS_EQUAL(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
+ if(computationOptions & ComputeFullV)
+ VERIFY_IS_EQUAL(svd.matrixV(), referenceSvd.matrixV());
+ if(computationOptions & ComputeThinV)
+ VERIFY_IS_EQUAL(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
+}
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_test_all_computation_options(const MatrixType& m)
+{
+ if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
+ return;
+ JacobiSVD<MatrixType, QRPreconditioner> fullSvd(m, ComputeFullU|ComputeFullV);
+
+ jacobisvd_check_full(m, fullSvd);
+
+ if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ return;
+
+ jacobisvd_compare_to_full(m, ComputeFullU, fullSvd);
+ jacobisvd_compare_to_full(m, ComputeFullV, fullSvd);
+ jacobisvd_compare_to_full(m, 0, fullSvd);
+
+ if (MatrixType::ColsAtCompileTime == Dynamic) {
+ // thin U/V are only available with dynamic number of columns
+ jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd);
+ jacobisvd_compare_to_full(m, ComputeThinV, fullSvd);
+ jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd);
+ jacobisvd_compare_to_full(m, ComputeThinU , fullSvd);
+ jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd);
+ }
+}
+
template<typename MatrixType>
-void svd(const MatrixType& m = MatrixType(), bool pickrandom = true)
+void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
{
- svd_with_qr_preconditioner<MatrixType, FullPivHouseholderQRPreconditioner>(m, pickrandom);
- svd_with_qr_preconditioner<MatrixType, ColPivHouseholderQRPreconditioner>(m, pickrandom);
- svd_with_qr_preconditioner<MatrixType, HouseholderQRPreconditioner>(m, pickrandom);
+ MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
+ jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m);
}
-template<typename MatrixType> void svd_verify_assert()
+template<typename MatrixType> void jacobisvd_verify_assert()
{
MatrixType tmp;
@@ -93,29 +136,29 @@ void test_jacobisvd()
Matrix2cd m;
m << 0, 1,
0, 1;
- CALL_SUBTEST_1(( svd(m, false) ));
+ CALL_SUBTEST_1(( jacobisvd(m, false) ));
m << 1, 0,
1, 0;
- CALL_SUBTEST_1(( svd(m, false) ));
+ CALL_SUBTEST_1(( jacobisvd(m, false) ));
Matrix2d n;
n << 1, 1,
1, -1;
- CALL_SUBTEST_2(( svd(n, false) ));
- CALL_SUBTEST_3(( svd<Matrix3f>() ));
- CALL_SUBTEST_4(( svd<Matrix4d>() ));
- CALL_SUBTEST_5(( svd<Matrix<float,3,5> >() ));
- CALL_SUBTEST_6(( svd<Matrix<double,Dynamic,2> >(Matrix<double,Dynamic,2>(10,2)) ));
-
- CALL_SUBTEST_7(( svd<MatrixXf>(MatrixXf(50,50)) ));
- CALL_SUBTEST_8(( svd<MatrixXcd>(MatrixXcd(14,7)) ));
+ CALL_SUBTEST_2(( jacobisvd(n, false) ));
+ CALL_SUBTEST_3(( jacobisvd<Matrix3f>() ));
+ CALL_SUBTEST_4(( jacobisvd<Matrix4d>() ));
+ CALL_SUBTEST_5(( jacobisvd<Matrix<float,3,5> >() ));
+ CALL_SUBTEST_6(( jacobisvd<Matrix<double,Dynamic,2> >(Matrix<double,Dynamic,2>(10,2)) ));
+
+ CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(50,50)) ));
+ CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(14,7)) ));
}
- CALL_SUBTEST_9(( svd<MatrixXf>(MatrixXf(300,200)) ));
- CALL_SUBTEST_10(( svd<MatrixXcd>(MatrixXcd(100,150)) ));
+ CALL_SUBTEST_9(( jacobisvd<MatrixXf>(MatrixXf(300,200)) ));
+ CALL_SUBTEST_10(( jacobisvd<MatrixXcd>(MatrixXcd(100,150)) ));
- CALL_SUBTEST_3(( svd_verify_assert<Matrix3f>() ));
- CALL_SUBTEST_3(( svd_verify_assert<Matrix3d>() ));
- CALL_SUBTEST_9(( svd_verify_assert<MatrixXf>() ));
- CALL_SUBTEST_11(( svd_verify_assert<MatrixXd>() ));
+ CALL_SUBTEST_3(( jacobisvd_verify_assert<Matrix3f>() ));
+ CALL_SUBTEST_3(( jacobisvd_verify_assert<Matrix3d>() ));
+ CALL_SUBTEST_9(( jacobisvd_verify_assert<MatrixXf>() ));
+ CALL_SUBTEST_11(( jacobisvd_verify_assert<MatrixXd>() ));
// Test problem size constructors
CALL_SUBTEST_12( JacobiSVD<MatrixXf>(10, 20) );