aboutsummaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/CMakeLists.txt41
-rw-r--r--test/array.cpp25
-rw-r--r--test/block.cpp8
-rw-r--r--test/evaluators.cpp142
-rw-r--r--test/geo_homogeneous.cpp7
-rw-r--r--test/geo_orthomethods.cpp9
-rw-r--r--test/inverse.cpp9
-rw-r--r--test/jacobisvd.cpp375
-rw-r--r--test/linearstructure.cpp40
-rw-r--r--test/main.h9
-rw-r--r--test/mixingtypes.cpp9
-rw-r--r--test/nesting_ops.cpp2
-rw-r--r--test/product.h8
-rw-r--r--test/product_mmtr.cpp3
-rw-r--r--test/product_notemporary.cpp3
-rw-r--r--test/qr_fullpivoting.cpp6
-rw-r--r--test/sparse_basic.cpp9
-rw-r--r--test/sparse_product.cpp22
-rw-r--r--test/sparse_vector.cpp1
-rw-r--r--test/stable_norm.cpp69
-rw-r--r--test/svd_common.h454
-rw-r--r--test/upperbidiagonalization.cpp2
-rw-r--r--test/vectorization_logic.cpp49
-rw-r--r--test/vectorwiseop.cpp4
24 files changed, 880 insertions, 426 deletions
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 47aefddb8..530e9e4e1 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -139,17 +139,12 @@ endif(TEST_LIB)
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Official")
add_custom_target(BuildOfficial)
-option(EIGEN_TEST_EVALUATORS "Enable work in progress evaluators" OFF)
-if(EIGEN_TEST_EVALUATORS)
- add_definitions("-DEIGEN_TEST_EVALUATORS=1")
- add_definitions("-DEIGEN_ENABLE_EVALUATORS=1")
-endif(EIGEN_TEST_EVALUATORS)
-
ei_add_test(meta)
ei_add_test(sizeof)
ei_add_test(dynalloc)
ei_add_test(nomalloc)
ei_add_test(first_aligned)
+ei_add_test(nullary)
ei_add_test(mixingtypes)
ei_add_test(packetmath)
ei_add_test(unalignedassert)
@@ -165,6 +160,9 @@ ei_add_test(redux)
ei_add_test(visitor)
ei_add_test(block)
ei_add_test(corners)
+ei_add_test(swap)
+ei_add_test(resize)
+ei_add_test(conservative_resize)
ei_add_test(product_small)
ei_add_test(product_large)
ei_add_test(product_extra)
@@ -193,6 +191,7 @@ ei_add_test(product_trsolve)
ei_add_test(product_mmtr)
ei_add_test(product_notemporary)
ei_add_test(stable_norm)
+ei_add_test(permutationmatrices)
ei_add_test(bandmatrix)
ei_add_test(cholesky)
ei_add_test(lu)
@@ -212,30 +211,30 @@ ei_add_test(real_qz)
ei_add_test(eigensolver_generalized_real)
ei_add_test(jacobi)
ei_add_test(jacobisvd)
+ei_add_test(householder)
ei_add_test(geo_orthomethods)
-ei_add_test(geo_homogeneous)
ei_add_test(geo_quaternion)
-ei_add_test(geo_transformations)
ei_add_test(geo_eulerangles)
-ei_add_test(geo_hyperplane)
ei_add_test(geo_parametrizedline)
ei_add_test(geo_alignedbox)
+ei_add_test(geo_hyperplane)
+ei_add_test(geo_transformations)
+ei_add_test(geo_homogeneous)
ei_add_test(stdvector)
ei_add_test(stdvector_overload)
ei_add_test(stdlist)
ei_add_test(stddeque)
-ei_add_test(resize)
-ei_add_test(sparse_vector)
ei_add_test(sparse_basic)
+ei_add_test(sparse_vector)
ei_add_test(sparse_product)
ei_add_test(sparse_solvers)
-ei_add_test(umeyama)
-ei_add_test(householder)
-ei_add_test(swap)
-ei_add_test(conservative_resize)
-ei_add_test(permutationmatrices)
ei_add_test(sparse_permutations)
-ei_add_test(nullary)
+ei_add_test(simplicial_cholesky)
+ei_add_test(conjugate_gradient)
+ei_add_test(bicgstab)
+ei_add_test(sparselu)
+ei_add_test(sparseqr)
+ei_add_test(umeyama)
ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
ei_add_test(zerosized)
ei_add_test(dontalign)
@@ -249,13 +248,7 @@ ei_add_test(special_numbers)
ei_add_test(rvalue_types)
ei_add_test(dense_storage)
-ei_add_test(simplicial_cholesky)
-ei_add_test(conjugate_gradient)
-ei_add_test(bicgstab)
-ei_add_test(sparselu)
-ei_add_test(sparseqr)
-
-# ei_add_test(denseLM)
+# # ei_add_test(denseLM)
if(QT4_FOUND)
ei_add_test(qtvector "" "${QT_QTCORE_LIBRARY}")
diff --git a/test/array.cpp b/test/array.cpp
index 010fead2d..ac9be097d 100644
--- a/test/array.cpp
+++ b/test/array.cpp
@@ -81,6 +81,31 @@ template<typename ArrayType> void array(const ArrayType& m)
VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1);
m3 = m1;
VERIFY_IS_APPROX(m3.rowwise() -= rv1, m1.rowwise() - rv1);
+
+ // Conversion from scalar
+ VERIFY_IS_APPROX((m3 = s1), ArrayType::Constant(rows,cols,s1));
+ VERIFY_IS_APPROX((m3 = 1), ArrayType::Constant(rows,cols,1));
+ VERIFY_IS_APPROX((m3.topLeftCorner(rows,cols) = 1), ArrayType::Constant(rows,cols,1));
+ typedef Array<Scalar,
+ ArrayType::RowsAtCompileTime==Dynamic?2:ArrayType::RowsAtCompileTime,
+ ArrayType::ColsAtCompileTime==Dynamic?2:ArrayType::ColsAtCompileTime,
+ ArrayType::Options> FixedArrayType;
+ FixedArrayType f1(s1);
+ VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1));
+ FixedArrayType f2(numext::real(s1));
+ VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1)));
+ FixedArrayType f3((int)100*numext::real(s1));
+ VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1)));
+ f1.setRandom();
+ FixedArrayType f4(f1.data());
+ VERIFY_IS_APPROX(f4, f1);
+
+ // Check possible conflicts with 1D ctor
+ typedef Array<Scalar, Dynamic, 1> OneDArrayType;
+ OneDArrayType o1(rows);
+ VERIFY(o1.size()==rows);
+ OneDArrayType o4((int)rows);
+ VERIFY(o4.size()==rows);
}
template<typename ArrayType> void comparisons(const ArrayType& m)
diff --git a/test/block.cpp b/test/block.cpp
index 269acd28e..3b77b704a 100644
--- a/test/block.cpp
+++ b/test/block.cpp
@@ -130,6 +130,14 @@ template<typename MatrixType> void block(const MatrixType& m)
VERIFY(numext::real(ones.col(c1).dot(ones.col(c2))) == RealScalar(rows));
VERIFY(numext::real(ones.row(r1).dot(ones.row(r2))) == RealScalar(cols));
+
+ // chekc that linear acccessors works on blocks
+ m1 = m1_copy;
+ if((MatrixType::Flags&RowMajorBit)==0)
+ VERIFY_IS_EQUAL(m1.leftCols(c1).coeff(r1+c1*rows), m1(r1,c1));
+ else
+ VERIFY_IS_EQUAL(m1.topRows(r1).coeff(c1+r1*cols), m1(r1,c1));
+
// now test some block-inside-of-block.
diff --git a/test/evaluators.cpp b/test/evaluators.cpp
index e3922c1be..f41968da8 100644
--- a/test/evaluators.cpp
+++ b/test/evaluators.cpp
@@ -1,7 +1,78 @@
-#define EIGEN_ENABLE_EVALUATORS
+
#include "main.h"
-using internal::copy_using_evaluator;
+namespace Eigen {
+
+ template<typename DstXprType, typename SrcXprType>
+ EIGEN_STRONG_INLINE
+ DstXprType& copy_using_evaluator(const EigenBase<DstXprType> &dst, const SrcXprType &src)
+ {
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op<typename DstXprType::Scalar>());
+ return dst.const_cast_derived();
+ }
+
+ template<typename DstXprType, template <typename> class StorageBase, typename SrcXprType>
+ EIGEN_STRONG_INLINE
+ const DstXprType& copy_using_evaluator(const NoAlias<DstXprType, StorageBase>& dst, const SrcXprType &src)
+ {
+ call_assignment(dst, src.derived(), internal::assign_op<typename DstXprType::Scalar>());
+ return dst.expression();
+ }
+
+ template<typename DstXprType, typename SrcXprType>
+ EIGEN_STRONG_INLINE
+ DstXprType& copy_using_evaluator(const PlainObjectBase<DstXprType> &dst, const SrcXprType &src)
+ {
+ #ifdef EIGEN_NO_AUTOMATIC_RESIZING
+ eigen_assert((dst.size()==0 || (IsVectorAtCompileTime ? (dst.size() == src.size())
+ : (dst.rows() == src.rows() && dst.cols() == src.cols())))
+ && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
+ #else
+ dst.const_cast_derived().resizeLike(src.derived());
+ #endif
+
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::assign_op<typename DstXprType::Scalar>());
+ return dst.const_cast_derived();
+ }
+
+ template<typename DstXprType, typename SrcXprType>
+ void add_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
+ {
+ typedef typename DstXprType::Scalar Scalar;
+ call_assignment(const_cast<DstXprType&>(dst), src.derived(), internal::add_assign_op<Scalar>());
+ }
+
+ template<typename DstXprType, typename SrcXprType>
+ void subtract_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
+ {
+ typedef typename DstXprType::Scalar Scalar;
+ call_assignment(const_cast<DstXprType&>(dst), src.derived(), internal::sub_assign_op<Scalar>());
+ }
+
+ template<typename DstXprType, typename SrcXprType>
+ void multiply_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
+ {
+ typedef typename DstXprType::Scalar Scalar;
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::mul_assign_op<Scalar>());
+ }
+
+ template<typename DstXprType, typename SrcXprType>
+ void divide_assign_using_evaluator(const DstXprType& dst, const SrcXprType& src)
+ {
+ typedef typename DstXprType::Scalar Scalar;
+ call_assignment(dst.const_cast_derived(), src.derived(), internal::div_assign_op<Scalar>());
+ }
+
+ template<typename DstXprType, typename SrcXprType>
+ void swap_using_evaluator(const DstXprType& dst, const SrcXprType& src)
+ {
+ typedef typename DstXprType::Scalar Scalar;
+ call_assignment(dst.const_cast_derived(), src.const_cast_derived(), internal::swap_assign_op<Scalar>());
+ }
+
+}
+
+
using namespace std;
#define VERIFY_IS_APPROX_EVALUATOR(DEST,EXPR) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (EXPR).eval());
@@ -72,8 +143,19 @@ void test_evaluators()
c = a*a;
copy_using_evaluator(a, prod(a,a));
VERIFY_IS_APPROX(a,c);
+
+ // check compound assignment of products
+ d = c;
+ add_assign_using_evaluator(c.noalias(), prod(a,b));
+ d.noalias() += a*b;
+ VERIFY_IS_APPROX(c, d);
+
+ d = c;
+ subtract_assign_using_evaluator(c.noalias(), prod(a,b));
+ d.noalias() -= a*b;
+ VERIFY_IS_APPROX(c, d);
}
-
+
{
// test product with all possible sizes
int s = internal::random<int>(1,100);
@@ -124,7 +206,7 @@ void test_evaluators()
// this does not work because Random is eval-before-nested:
// copy_using_evaluator(w, Vector2d::Random().transpose());
-
+
// test CwiseUnaryOp
VERIFY_IS_APPROX_EVALUATOR(v2, 3 * v);
VERIFY_IS_APPROX_EVALUATOR(w, (3 * v).transpose());
@@ -327,4 +409,56 @@ void test_evaluators()
arr_ref.row(1) /= (arr_ref.row(2) + 1);
VERIFY_IS_APPROX(arr, arr_ref);
}
+
+ {
+ // test triangular shapes
+ MatrixXd A = MatrixXd::Random(6,6), B(6,6), C(6,6), D(6,6);
+ A.setRandom();B.setRandom();
+ VERIFY_IS_APPROX_EVALUATOR2(B, A.triangularView<Upper>(), MatrixXd(A.triangularView<Upper>()));
+
+ A.setRandom();B.setRandom();
+ VERIFY_IS_APPROX_EVALUATOR2(B, A.triangularView<UnitLower>(), MatrixXd(A.triangularView<UnitLower>()));
+
+ A.setRandom();B.setRandom();
+ VERIFY_IS_APPROX_EVALUATOR2(B, A.triangularView<UnitUpper>(), MatrixXd(A.triangularView<UnitUpper>()));
+
+ A.setRandom();B.setRandom();
+ C = B; C.triangularView<Upper>() = A;
+ copy_using_evaluator(B.triangularView<Upper>(), A);
+ VERIFY(B.isApprox(C) && "copy_using_evaluator(B.triangularView<Upper>(), A)");
+
+ A.setRandom();B.setRandom();
+ C = B; C.triangularView<Lower>() = A.triangularView<Lower>();
+ copy_using_evaluator(B.triangularView<Lower>(), A.triangularView<Lower>());
+ VERIFY(B.isApprox(C) && "copy_using_evaluator(B.triangularView<Lower>(), A.triangularView<Lower>())");
+
+
+ A.setRandom();B.setRandom();
+ C = B; C.triangularView<Lower>() = A.triangularView<Upper>().transpose();
+ copy_using_evaluator(B.triangularView<Lower>(), A.triangularView<Upper>().transpose());
+ VERIFY(B.isApprox(C) && "copy_using_evaluator(B.triangularView<Lower>(), A.triangularView<Lower>().transpose())");
+
+
+ A.setRandom();B.setRandom(); C = B; D = A;
+ C.triangularView<Upper>().swap(D.triangularView<Upper>());
+ swap_using_evaluator(B.triangularView<Upper>(), A.triangularView<Upper>());
+ VERIFY(B.isApprox(C) && "swap_using_evaluator(B.triangularView<Upper>(), A.triangularView<Upper>())");
+
+
+ VERIFY_IS_APPROX_EVALUATOR2(B, prod(A.triangularView<Upper>(),A), MatrixXd(A.triangularView<Upper>()*A));
+
+ VERIFY_IS_APPROX_EVALUATOR2(B, prod(A.selfadjointView<Upper>(),A), MatrixXd(A.selfadjointView<Upper>()*A));
+
+ }
+
+ {
+ // test diagonal shapes
+ VectorXd d = VectorXd::Random(6);
+ MatrixXd A = MatrixXd::Random(6,6), B(6,6);
+ A.setRandom();B.setRandom();
+
+ VERIFY_IS_APPROX_EVALUATOR2(B, lazyprod(d.asDiagonal(),A), MatrixXd(d.asDiagonal()*A));
+ VERIFY_IS_APPROX_EVALUATOR2(B, lazyprod(A,d.asDiagonal()), MatrixXd(A*d.asDiagonal()));
+
+ }
}
diff --git a/test/geo_homogeneous.cpp b/test/geo_homogeneous.cpp
index c91bde819..2f9d18c0f 100644
--- a/test/geo_homogeneous.cpp
+++ b/test/geo_homogeneous.cpp
@@ -38,6 +38,10 @@ template<typename Scalar,int Size> void homogeneous(void)
hv0 << v0, 1;
VERIFY_IS_APPROX(v0.homogeneous(), hv0);
VERIFY_IS_APPROX(v0, hv0.hnormalized());
+
+ VERIFY_IS_APPROX(v0.homogeneous().sum(), hv0.sum());
+ VERIFY_IS_APPROX(v0.homogeneous().minCoeff(), hv0.minCoeff());
+ VERIFY_IS_APPROX(v0.homogeneous().maxCoeff(), hv0.maxCoeff());
hm0 << m0, ones.transpose();
VERIFY_IS_APPROX(m0.colwise().homogeneous(), hm0);
@@ -57,7 +61,6 @@ template<typename Scalar,int Size> void homogeneous(void)
VERIFY_IS_APPROX((v0.transpose().rowwise().homogeneous().eval()) * t2,
v0.transpose().rowwise().homogeneous() * t2);
- m0.transpose().rowwise().homogeneous().eval();
VERIFY_IS_APPROX((m0.transpose().rowwise().homogeneous().eval()) * t2,
m0.transpose().rowwise().homogeneous() * t2);
@@ -82,7 +85,7 @@ template<typename Scalar,int Size> void homogeneous(void)
VERIFY_IS_APPROX(aff * pts.colwise().homogeneous(), (aff * pts1).colwise().hnormalized());
VERIFY_IS_APPROX(caff * pts.colwise().homogeneous(), (caff * pts1).colwise().hnormalized());
VERIFY_IS_APPROX(proj * pts.colwise().homogeneous(), (proj * pts1));
-
+
VERIFY_IS_APPROX((aff * pts1).colwise().hnormalized(), aff * pts);
VERIFY_IS_APPROX((caff * pts1).colwise().hnormalized(), caff * pts);
diff --git a/test/geo_orthomethods.cpp b/test/geo_orthomethods.cpp
index c836dae40..7f8beb205 100644
--- a/test/geo_orthomethods.cpp
+++ b/test/geo_orthomethods.cpp
@@ -33,6 +33,7 @@ template<typename Scalar> void orthomethods_3()
VERIFY_IS_MUCH_SMALLER_THAN(v1.dot(v1.cross(v2)), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(v1.cross(v2).dot(v2), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(v2.dot(v1.cross(v2)), Scalar(1));
+ VERIFY_IS_MUCH_SMALLER_THAN(v1.cross(Vector3::Random()).dot(v1), Scalar(1));
Matrix3 mat3;
mat3 << v0.normalized(),
(v0.cross(v1)).normalized(),
@@ -47,6 +48,13 @@ template<typename Scalar> void orthomethods_3()
int i = internal::random<int>(0,2);
mcross = mat3.colwise().cross(vec3);
VERIFY_IS_APPROX(mcross.col(i), mat3.col(i).cross(vec3));
+
+ VERIFY_IS_MUCH_SMALLER_THAN((mat3.adjoint() * mat3.colwise().cross(vec3)).diagonal().cwiseAbs().sum(), Scalar(1));
+ VERIFY_IS_MUCH_SMALLER_THAN((mat3.adjoint() * mat3.colwise().cross(Vector3::Random())).diagonal().cwiseAbs().sum(), Scalar(1));
+
+ VERIFY_IS_MUCH_SMALLER_THAN((vec3.adjoint() * mat3.colwise().cross(vec3)).cwiseAbs().sum(), Scalar(1));
+ VERIFY_IS_MUCH_SMALLER_THAN((vec3.adjoint() * Matrix3::Random().colwise().cross(vec3)).cwiseAbs().sum(), Scalar(1));
+
mcross = mat3.rowwise().cross(vec3);
VERIFY_IS_APPROX(mcross.row(i), mat3.row(i).cross(vec3));
@@ -57,6 +65,7 @@ template<typename Scalar> void orthomethods_3()
v40.w() = v41.w() = v42.w() = 0;
v42.template head<3>() = v40.template head<3>().cross(v41.template head<3>());
VERIFY_IS_APPROX(v40.cross3(v41), v42);
+ VERIFY_IS_MUCH_SMALLER_THAN(v40.cross3(Vector4::Random()).dot(v40), Scalar(1));
// check mixed product
typedef Matrix<RealScalar, 3, 1> RealVector3;
diff --git a/test/inverse.cpp b/test/inverse.cpp
index 8187b088d..1e7b20958 100644
--- a/test/inverse.cpp
+++ b/test/inverse.cpp
@@ -68,6 +68,15 @@ template<typename MatrixType> void inverse(const MatrixType& m)
VERIFY_IS_MUCH_SMALLER_THAN(abs(det-m3.determinant()), RealScalar(1));
m3.computeInverseWithCheck(m4, invertible);
VERIFY( rows==1 ? invertible : !invertible );
+
+ // check with submatrices
+ {
+ Matrix<Scalar, MatrixType::RowsAtCompileTime+1, MatrixType::RowsAtCompileTime+1, MatrixType::Options> m3;
+ m3.setRandom();
+ m3.topLeftCorner(rows,rows) = m1;
+ m2 = m3.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>().inverse();
+ VERIFY_IS_APPROX( (m3.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>()), m2.inverse() );
+ }
#endif
// check in-place inversion
diff --git a/test/jacobisvd.cpp b/test/jacobisvd.cpp
index 36721b496..bfcadce95 100644
--- a/test/jacobisvd.cpp
+++ b/test/jacobisvd.cpp
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -14,273 +14,47 @@
#include "main.h"
#include <Eigen/SVD>
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_check_full(const MatrixType& m, const JacobiSVD<MatrixType, QRPreconditioner>& svd)
-{
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef typename MatrixType::Scalar Scalar;
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime> MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime> MatrixVType;
-
- MatrixType sigma = MatrixType::Zero(rows,cols);
- sigma.diagonal() = svd.singularValues().template cast<Scalar>();
- MatrixUType u = svd.matrixU();
- MatrixVType v = svd.matrixV();
-
- VERIFY_IS_APPROX(m, u * sigma * v.adjoint());
- VERIFY_IS_UNITARY(u);
- VERIFY_IS_UNITARY(v);
-}
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_compare_to_full(const MatrixType& m,
- unsigned int computationOptions,
- const JacobiSVD<MatrixType, QRPreconditioner>& referenceSvd)
-{
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
- Index diagSize = (std::min)(rows, cols);
-
- JacobiSVD<MatrixType, QRPreconditioner> svd(m, computationOptions);
-
- VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues());
- if(computationOptions & ComputeFullU)
- VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
- if(computationOptions & ComputeThinU)
- VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
- if(computationOptions & ComputeFullV)
- VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV());
- if(computationOptions & ComputeThinV)
- VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
-}
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
-{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, Dynamic> RhsType;
- typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
-
- RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
- JacobiSVD<MatrixType, QRPreconditioner> svd(m, computationOptions);
-
- if(internal::is_same<RealScalar,double>::value) svd.setThreshold(1e-8);
- else if(internal::is_same<RealScalar,float>::value) svd.setThreshold(1e-4);
-
- SolutionType x = svd.solve(rhs);
-
- RealScalar residual = (m*x-rhs).norm();
- // Check that there is no significantly better solution in the neighborhood of x
- if(!test_isMuchSmallerThan(residual,rhs.norm()))
- {
- // If the residual is very small, then we have an exact solution, so we are already good.
- for(int k=0;k<x.rows();++k)
- {
- SolutionType y(x);
- y.row(k).array() += 2*NumTraits<RealScalar>::epsilon();
- RealScalar residual_y = (m*y-rhs).norm();
- VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
-
- y.row(k) = x.row(k).array() - 2*NumTraits<RealScalar>::epsilon();
- residual_y = (m*y-rhs).norm();
- VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
- }
- }
-
- // evaluate normal equation which works also for least-squares solutions
- if(internal::is_same<RealScalar,double>::value)
- {
- // This test is not stable with single precision.
- // This is probably because squaring m signicantly affects the precision.
- VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
- }
-
- // check minimal norm solutions
- {
- // generate a full-rank m x n problem with m<n
- enum {
- RankAtCompileTime2 = ColsAtCompileTime==Dynamic ? Dynamic : (ColsAtCompileTime)/2+1,
- RowsAtCompileTime3 = ColsAtCompileTime==Dynamic ? Dynamic : ColsAtCompileTime+1
- };
- typedef Matrix<Scalar, RankAtCompileTime2, ColsAtCompileTime> MatrixType2;
- typedef Matrix<Scalar, RankAtCompileTime2, 1> RhsType2;
- typedef Matrix<Scalar, ColsAtCompileTime, RankAtCompileTime2> MatrixType2T;
- Index rank = RankAtCompileTime2==Dynamic ? internal::random<Index>(1,cols) : Index(RankAtCompileTime2);
- MatrixType2 m2(rank,cols);
- int guard = 0;
- do {
- m2.setRandom();
- } while(m2.jacobiSvd().setThreshold(test_precision<Scalar>()).rank()!=rank && (++guard)<10);
- VERIFY(guard<10);
- RhsType2 rhs2 = RhsType2::Random(rank);
- // use QR to find a reference minimal norm solution
- HouseholderQR<MatrixType2T> qr(m2.adjoint());
- Matrix<Scalar,Dynamic,1> tmp = qr.matrixQR().topLeftCorner(rank,rank).template triangularView<Upper>().adjoint().solve(rhs2);
- tmp.conservativeResize(cols);
- tmp.tail(cols-rank).setZero();
- SolutionType x21 = qr.householderQ() * tmp;
- // now check with SVD
- JacobiSVD<MatrixType2, ColPivHouseholderQRPreconditioner> svd2(m2, computationOptions);
- SolutionType x22 = svd2.solve(rhs2);
- VERIFY_IS_APPROX(m2*x21, rhs2);
- VERIFY_IS_APPROX(m2*x22, rhs2);
- VERIFY_IS_APPROX(x21, x22);
-
- // Now check with a rank deficient matrix
- typedef Matrix<Scalar, RowsAtCompileTime3, ColsAtCompileTime> MatrixType3;
- typedef Matrix<Scalar, RowsAtCompileTime3, 1> RhsType3;
- Index rows3 = RowsAtCompileTime3==Dynamic ? internal::random<Index>(rank+1,2*cols) : Index(RowsAtCompileTime3);
- Matrix<Scalar,RowsAtCompileTime3,Dynamic> C = Matrix<Scalar,RowsAtCompileTime3,Dynamic>::Random(rows3,rank);
- MatrixType3 m3 = C * m2;
- RhsType3 rhs3 = C * rhs2;
- JacobiSVD<MatrixType3, ColPivHouseholderQRPreconditioner> svd3(m3, computationOptions);
- SolutionType x3 = svd3.solve(rhs3);
- VERIFY_IS_APPROX(m3*x3, rhs3);
- VERIFY_IS_APPROX(m3*x21, rhs3);
- VERIFY_IS_APPROX(m2*x3, rhs2);
-
- VERIFY_IS_APPROX(x21, x3);
- }
-}
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_test_all_computation_options(const MatrixType& m)
-{
- if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
- return;
- JacobiSVD<MatrixType, QRPreconditioner> fullSvd(m, ComputeFullU|ComputeFullV);
- CALL_SUBTEST(( jacobisvd_check_full(m, fullSvd) ));
- CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeFullV) ));
-
- #if defined __INTEL_COMPILER
- // remark #111: statement is unreachable
- #pragma warning disable 111
- #endif
- if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
- return;
-
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU, fullSvd) ));
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullV, fullSvd) ));
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, 0, fullSvd) ));
-
- if (MatrixType::ColsAtCompileTime == Dynamic) {
- // thin U/V are only available with dynamic number of columns
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) ));
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinV, fullSvd) ));
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) ));
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU , fullSvd) ));
- CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) ));
- CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeFullU | ComputeThinV) ));
- CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeFullV) ));
- CALL_SUBTEST(( jacobisvd_solve<MatrixType, QRPreconditioner>(m, ComputeThinU | ComputeThinV) ));
-
- // test reconstruction
- typedef typename MatrixType::Index Index;
- Index diagSize = (std::min)(m.rows(), m.cols());
- JacobiSVD<MatrixType, QRPreconditioner> svd(m, ComputeThinU | ComputeThinV);
- VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
- }
-}
+#define SVD_DEFAULT(M) JacobiSVD<M>
+#define SVD_FOR_MIN_NORM(M) JacobiSVD<M,ColPivHouseholderQRPreconditioner>
+#include "svd_common.h"
+// Check all variants of JacobiSVD
template<typename MatrixType>
void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
{
MatrixType m = a;
if(pickrandom)
- {
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef typename MatrixType::Index Index;
- Index diagSize = (std::min)(a.rows(), a.cols());
- RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4;
- s = internal::random<RealScalar>(1,s);
- Matrix<RealScalar,Dynamic,1> d = Matrix<RealScalar,Dynamic,1>::Random(diagSize);
- for(Index k=0; k<diagSize; ++k)
- d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s));
- m = Matrix<Scalar,Dynamic,Dynamic>::Random(a.rows(),diagSize) * d.asDiagonal() * Matrix<Scalar,Dynamic,Dynamic>::Random(diagSize,a.cols());
- // cancel some coeffs
- Index n = internal::random<Index>(0,m.size()-1);
- for(Index i=0; i<n; ++i)
- m(internal::random<Index>(0,m.rows()-1), internal::random<Index>(0,m.cols()-1)) = Scalar(0);
- }
+ svd_fill_random(m);
- CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m) ));
- CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m) ));
- CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m) ));
- CALL_SUBTEST(( jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m) ));
+ CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> >(m, true) )); // check full only
+ CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner> >(m, false) ));
+ CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, HouseholderQRPreconditioner> >(m, false) ));
+ if(m.rows()==m.cols())
+ CALL_SUBTEST(( svd_test_all_computation_options<JacobiSVD<MatrixType, NoQRPreconditioner> >(m, false) ));
}
template<typename MatrixType> void jacobisvd_verify_assert(const MatrixType& m)
{
- typedef typename MatrixType::Scalar Scalar;
+ svd_verify_assert<JacobiSVD<MatrixType> >(m);
typedef typename MatrixType::Index Index;
Index rows = m.rows();
Index cols = m.cols();
enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime
};
- typedef Matrix<Scalar, RowsAtCompileTime, 1> RhsType;
-
- RhsType rhs(rows);
-
- JacobiSVD<MatrixType> svd;
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.singularValues())
- VERIFY_RAISES_ASSERT(svd.matrixV())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
MatrixType a = MatrixType::Zero(rows, cols);
a.setZero();
- svd.compute(a, 0);
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.matrixV())
- svd.singularValues();
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
if (ColsAtCompileTime == Dynamic)
{
- svd.compute(a, ComputeThinU);
- svd.matrixU();
- VERIFY_RAISES_ASSERT(svd.matrixV())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
-
- svd.compute(a, ComputeThinV);
- svd.matrixV();
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
-
JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> svd_fullqr;
VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV))
VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV))
VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV))
}
- else
- {
- VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU))
- VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV))
- }
}
template<typename MatrixType>
@@ -296,128 +70,17 @@ void jacobisvd_method()
VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
}
-// work around stupid msvc error when constructing at compile time an expression that involves
-// a division by zero, even if the numeric type has floating point
-template<typename Scalar>
-EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); }
-
-// workaround aggressive optimization in ICC
-template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
-
-template<typename MatrixType>
-void jacobisvd_inf_nan()
-{
- // all this function does is verify we don't iterate infinitely on nan/inf values
-
- JacobiSVD<MatrixType> svd;
- typedef typename MatrixType::Scalar Scalar;
- Scalar some_inf = Scalar(1) / zero<Scalar>();
- VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
- svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
-
- Scalar some_nan = zero<Scalar>() / zero<Scalar>();
- VERIFY(some_nan != some_nan);
- svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV);
-
- MatrixType m = MatrixType::Zero(10,10);
- m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
- svd.compute(m, ComputeFullU | ComputeFullV);
-
- m = MatrixType::Zero(10,10);
- m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_nan;
- svd.compute(m, ComputeFullU | ComputeFullV);
-}
-
-// Regression test for bug 286: JacobiSVD loops indefinitely with some
-// matrices containing denormal numbers.
-void jacobisvd_underoverflow()
-{
-#if defined __INTEL_COMPILER
-// shut up warning #239: floating point underflow
-#pragma warning push
-#pragma warning disable 239
-#endif
- Matrix2d M;
- M << -7.90884e-313, -4.94e-324,
- 0, 5.60844e-313;
-#if defined __INTEL_COMPILER
-#pragma warning pop
-#endif
- JacobiSVD<Matrix2d> svd;
- svd.compute(M); // just check we don't loop indefinitely
-
- // Check for overflow:
- Matrix3d M3;
- M3 << 4.4331978442502944e+307, -5.8585363752028680e+307, 6.4527017443412964e+307,
- 3.7841695601406358e+307, 2.4331702789740617e+306, -3.5235707140272905e+307,
- -8.7190887618028355e+307, -7.3453213709232193e+307, -2.4367363684472105e+307;
-
- JacobiSVD<Matrix3d> svd3;
- svd3.compute(M3); // just check we don't loop indefinitely
-}
-
-void jacobisvd_preallocate()
-{
- Vector3f v(3.f, 2.f, 1.f);
- MatrixXf m = v.asDiagonal();
-
- internal::set_is_malloc_allowed(false);
- VERIFY_RAISES_ASSERT(VectorXf tmp(10);)
- JacobiSVD<MatrixXf> svd;
- internal::set_is_malloc_allowed(true);
- svd.compute(m);
- VERIFY_IS_APPROX(svd.singularValues(), v);
-
- JacobiSVD<MatrixXf> svd2(3,3);
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
- VERIFY_IS_APPROX(svd2.singularValues(), v);
- VERIFY_RAISES_ASSERT(svd2.matrixU());
- VERIFY_RAISES_ASSERT(svd2.matrixV());
- svd2.compute(m, ComputeFullU | ComputeFullV);
- VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
- VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
-
- JacobiSVD<MatrixXf> svd3(3,3,ComputeFullU|ComputeFullV);
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
- VERIFY_IS_APPROX(svd2.singularValues(), v);
- VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
- VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
- internal::set_is_malloc_allowed(false);
- svd2.compute(m, ComputeFullU|ComputeFullV);
- internal::set_is_malloc_allowed(true);
-}
-
void test_jacobisvd()
{
CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) ));
CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) ));
CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) ));
CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) ));
+
+ svd_all_trivial_2x2(jacobisvd<Matrix2cd>);
+ svd_all_trivial_2x2(jacobisvd<Matrix2d>);
for(int i = 0; i < g_repeat; i++) {
- Matrix2cd m;
- m << 0, 1,
- 0, 1;
- CALL_SUBTEST_1(( jacobisvd(m, false) ));
- m << 1, 0,
- 1, 0;
- CALL_SUBTEST_1(( jacobisvd(m, false) ));
-
- Matrix2d n;
- n << 0, 0,
- 0, 0;
- CALL_SUBTEST_2(( jacobisvd(n, false) ));
- n << 0, 0,
- 0, 1;
- CALL_SUBTEST_2(( jacobisvd(n, false) ));
-
CALL_SUBTEST_3(( jacobisvd<Matrix3f>() ));
CALL_SUBTEST_4(( jacobisvd<Matrix4d>() ));
CALL_SUBTEST_5(( jacobisvd<Matrix<float,3,5> >() ));
@@ -436,7 +99,8 @@ void test_jacobisvd()
(void) c;
// Test on inf/nan matrix
- CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() );
+ CALL_SUBTEST_7( (svd_inf_nan<JacobiSVD<MatrixXf>, MatrixXf>()) );
+ CALL_SUBTEST_10( (svd_inf_nan<JacobiSVD<MatrixXd>, MatrixXd>()) );
}
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
@@ -450,8 +114,7 @@ void test_jacobisvd()
CALL_SUBTEST_7( JacobiSVD<MatrixXf>(10,10) );
// Check that preallocation avoids subsequent mallocs
- CALL_SUBTEST_9( jacobisvd_preallocate() );
+ CALL_SUBTEST_9( svd_preallocate() );
- // Regression check for bug 286
- CALL_SUBTEST_2( jacobisvd_underoverflow() );
+ CALL_SUBTEST_2( svd_underoverflow() );
}
diff --git a/test/linearstructure.cpp b/test/linearstructure.cpp
index 618984d5c..87dfa1b6b 100644
--- a/test/linearstructure.cpp
+++ b/test/linearstructure.cpp
@@ -2,11 +2,15 @@
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+static bool g_called;
+#define EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN { g_called = true; }
+
#include "main.h"
template<typename MatrixType> void linearStructure(const MatrixType& m)
@@ -68,6 +72,24 @@ template<typename MatrixType> void linearStructure(const MatrixType& m)
VERIFY_IS_APPROX(m1.block(0,0,rows,cols) * s1, m1 * s1);
}
+// Make sure that complex * real and real * complex are properly optimized
+template<typename MatrixType> void real_complex(DenseIndex rows = MatrixType::RowsAtCompileTime, DenseIndex cols = MatrixType::ColsAtCompileTime)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+
+ RealScalar s = internal::random<RealScalar>();
+ MatrixType m1 = MatrixType::Random(rows, cols);
+
+ g_called = false;
+ VERIFY_IS_APPROX(s*m1, Scalar(s)*m1);
+ VERIFY(g_called && "real * matrix<complex> not properly optimized");
+
+ g_called = false;
+ VERIFY_IS_APPROX(m1*s, m1*Scalar(s));
+ VERIFY(g_called && "matrix<complex> * real not properly optimized");
+}
+
void test_linearstructure()
{
for(int i = 0; i < g_repeat; i++) {
@@ -80,5 +102,23 @@ void test_linearstructure()
CALL_SUBTEST_7( linearStructure(MatrixXi (internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_8( linearStructure(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
CALL_SUBTEST_9( linearStructure(ArrayXXf (internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+
+ CALL_SUBTEST_10( real_complex<Matrix4cd>() );
+ CALL_SUBTEST_10( real_complex<MatrixXcf>(10,10) );
+ }
+
+#ifdef EIGEN_TEST_PART_4
+ {
+ // make sure that /=scalar and /scalar do not overflow
+ // rational: 1.0/4.94e-320 overflow, but m/4.94e-320 should not
+ Matrix4d m2, m3;
+ m3 = m2 = Matrix4d::Random()*1e-20;
+ m2 = m2 / 4.9e-320;
+ VERIFY_IS_APPROX(m2.cwiseQuotient(m2), Matrix4d::Ones());
+ m3 /= 4.9e-320;
+ VERIFY_IS_APPROX(m3.cwiseQuotient(m3), Matrix4d::Ones());
+
+
}
+#endif
}
diff --git a/test/main.h b/test/main.h
index 7667eaa18..371c7e602 100644
--- a/test/main.h
+++ b/test/main.h
@@ -94,6 +94,9 @@ namespace Eigen
static bool g_has_set_repeat, g_has_set_seed;
}
+#define TRACK std::cerr << __FILE__ << " " << __LINE__ << std::endl
+// #define TRACK while()
+
#define EI_PP_MAKE_STRING2(S) #S
#define EI_PP_MAKE_STRING(S) EI_PP_MAKE_STRING2(S)
@@ -311,13 +314,7 @@ inline bool test_isApproxOrLessThan(const long double& a, const long double& b)
template<typename Type1, typename Type2>
inline bool test_isApprox(const Type1& a, const Type2& b)
{
-#ifdef EIGEN_TEST_EVALUATORS
- typename internal::eval<Type1>::type a_eval(a);
- typename internal::eval<Type2>::type b_eval(b);
- return a_eval.isApprox(b_eval, test_precision<typename Type1::Scalar>());
-#else
return a.isApprox(b, test_precision<typename Type1::Scalar>());
-#endif
}
// The idea behind this function is to compare the two scalars a and b where
diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp
index 1e0e2d4c1..048f7255a 100644
--- a/test/mixingtypes.cpp
+++ b/test/mixingtypes.cpp
@@ -53,10 +53,11 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
mf+mf;
VERIFY_RAISES_ASSERT(mf+md);
VERIFY_RAISES_ASSERT(mf+mcf);
- VERIFY_RAISES_ASSERT(vf=vd);
- VERIFY_RAISES_ASSERT(vf+=vd);
- VERIFY_RAISES_ASSERT(mcd=md);
-
+ // the following do not even compile since the introduction of evaluators
+// VERIFY_RAISES_ASSERT(vf=vd);
+// VERIFY_RAISES_ASSERT(vf+=vd);
+// VERIFY_RAISES_ASSERT(mcd=md);
+
// check scalar products
VERIFY_IS_APPROX(vcf * sf , vcf * complex<float>(sf));
VERIFY_IS_APPROX(sd * vcd, complex<double>(sd) * vcd);
diff --git a/test/nesting_ops.cpp b/test/nesting_ops.cpp
index 1e8523283..6e772c70f 100644
--- a/test/nesting_ops.cpp
+++ b/test/nesting_ops.cpp
@@ -11,7 +11,7 @@
template <typename MatrixType> void run_nesting_ops(const MatrixType& _m)
{
- typename MatrixType::Nested m(_m);
+ typename internal::nested_eval<MatrixType,2>::type m(_m);
// Make really sure that we are in debug mode!
VERIFY_RAISES_ASSERT(eigen_assert(false));
diff --git a/test/product.h b/test/product.h
index 856b234ac..0b3abe402 100644
--- a/test/product.h
+++ b/test/product.h
@@ -139,4 +139,12 @@ template<typename MatrixType> void product(const MatrixType& m)
// inner product
Scalar x = square2.row(c) * square2.col(c2);
VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum());
+
+ // outer product
+ VERIFY_IS_APPROX(m1.col(c) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.row(r).transpose() * m1.col(c).transpose(), m1.block(r,0,1,cols).transpose() * m1.block(0,c,rows,1).transpose());
+ VERIFY_IS_APPROX(m1.block(0,c,rows,1) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.col(c) * m1.block(r,0,1,cols), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.leftCols(1) * m1.row(r), m1.block(0,0,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.col(c) * m1.topRows(1), m1.block(0,c,rows,1) * m1.block(0,0,1,cols));
}
diff --git a/test/product_mmtr.cpp b/test/product_mmtr.cpp
index 7d6746800..92e6b668f 100644
--- a/test/product_mmtr.cpp
+++ b/test/product_mmtr.cpp
@@ -13,7 +13,8 @@
ref2 = ref1 = DEST; \
DEST.template triangularView<TRI>() OP; \
ref1 OP; \
- ref2.template triangularView<TRI>() = ref1; \
+ ref2.template triangularView<TRI>() \
+ = ref1.template triangularView<TRI>(); \
VERIFY_IS_APPROX(DEST,ref2); \
}
diff --git a/test/product_notemporary.cpp b/test/product_notemporary.cpp
index 3a9df618b..805cc8939 100644
--- a/test/product_notemporary.cpp
+++ b/test/product_notemporary.cpp
@@ -113,8 +113,7 @@ template<typename MatrixType> void product_notemporary(const MatrixType& m)
VERIFY_EVALUATION_COUNT( Scalar tmp = 0; tmp += Scalar(RealScalar(1)) / (m3.transpose() * m3).diagonal().array().abs().sum(), 0 );
// Zero temporaries for ... CoeffBasedProductMode
- // - does not work with GCC because of the <..>, we'ld need variadic macros ...
- //VERIFY_EVALUATION_COUNT( m3.col(0).head<5>() * m3.col(0).transpose() + m3.col(0).head<5>() * m3.col(0).transpose(), 0 );
+ VERIFY_EVALUATION_COUNT( m3.col(0).template head<5>() * m3.col(0).transpose() + m3.col(0).template head<5>() * m3.col(0).transpose(), 0 );
// Check matrix * vectors
VERIFY_EVALUATION_COUNT( cvres.noalias() = m1 * cv1, 0 );
diff --git a/test/qr_fullpivoting.cpp b/test/qr_fullpivoting.cpp
index 511f2473f..601773404 100644
--- a/test/qr_fullpivoting.cpp
+++ b/test/qr_fullpivoting.cpp
@@ -40,7 +40,11 @@ template<typename MatrixType> void qr()
MatrixType c = qr.matrixQ() * r * qr.colsPermutation().inverse();
VERIFY_IS_APPROX(m1, c);
-
+
+ // stress the ReturnByValue mechanism
+ MatrixType tmp;
+ VERIFY_IS_APPROX(tmp.noalias() = qr.matrixQ() * r, (qr.matrixQ() * r).eval());
+
MatrixType m2 = MatrixType::Random(cols,cols2);
MatrixType m3 = m1*m2;
m2 = MatrixType::Random(cols,cols2);
diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp
index 4c9b9111e..c86534bad 100644
--- a/test/sparse_basic.cpp
+++ b/test/sparse_basic.cpp
@@ -201,9 +201,9 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
VERIFY(m3.innerVector(j0).nonZeros() == m3.transpose().innerVector(j0).nonZeros());
- //m2.innerVector(j0) = 2*m2.innerVector(j1);
- //refMat2.col(j0) = 2*refMat2.col(j1);
- //VERIFY_IS_APPROX(m2, refMat2);
+// m2.innerVector(j0) = 2*m2.innerVector(j1);
+// refMat2.col(j0) = 2*refMat2.col(j1);
+// VERIFY_IS_APPROX(m2, refMat2);
}
// test innerVectors()
@@ -239,7 +239,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
VERIFY_IS_APPROX(m2, refMat2);
}
-
+
// test basic computations
{
DenseMatrix refM1 = DenseMatrix::Zero(rows, rows);
@@ -255,6 +255,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
initSparse<Scalar>(density, refM3, m3);
initSparse<Scalar>(density, refM4, m4);
+ VERIFY_IS_APPROX(m1*s1, refM1*s1);
VERIFY_IS_APPROX(m1+m2, refM1+refM2);
VERIFY_IS_APPROX(m1+m2+m3, refM1+refM2+refM3);
VERIFY_IS_APPROX(m3.cwiseProduct(m1+m2), refM3.cwiseProduct(refM1+refM2));
diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp
index 0f52164c8..fa9be5440 100644
--- a/test/sparse_product.cpp
+++ b/test/sparse_product.cpp
@@ -19,7 +19,7 @@ template<typename SparseMatrixType> void sparse_product()
typedef typename SparseMatrixType::Scalar Scalar;
enum { Flags = SparseMatrixType::Flags };
- double density = (std::max)(8./(rows*cols), 0.1);
+ double density = (std::max)(8./(rows*cols), 0.2);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
typedef Matrix<Scalar,1,Dynamic> RowDenseVector;
@@ -77,17 +77,27 @@ template<typename SparseMatrixType> void sparse_product()
m4 = m2; refMat4 = refMat2;
VERIFY_IS_APPROX(m4=m4*m3, refMat4=refMat4*refMat3);
- // sparse * dense
+ // sparse * dense matrix
VERIFY_IS_APPROX(dm4=m2*refMat3, refMat4=refMat2*refMat3);
VERIFY_IS_APPROX(dm4=m2*refMat3t.transpose(), refMat4=refMat2*refMat3t.transpose());
VERIFY_IS_APPROX(dm4=m2t.transpose()*refMat3, refMat4=refMat2t.transpose()*refMat3);
VERIFY_IS_APPROX(dm4=m2t.transpose()*refMat3t.transpose(), refMat4=refMat2t.transpose()*refMat3t.transpose());
+ VERIFY_IS_APPROX(dm4=m2*refMat3, refMat4=refMat2*refMat3);
+ VERIFY_IS_APPROX(dm4=dm4+m2*refMat3, refMat4=refMat4+refMat2*refMat3);
VERIFY_IS_APPROX(dm4=m2*(refMat3+refMat3), refMat4=refMat2*(refMat3+refMat3));
VERIFY_IS_APPROX(dm4=m2t.transpose()*(refMat3+refMat5)*0.5, refMat4=refMat2t.transpose()*(refMat3+refMat5)*0.5);
+
+ // sparse * dense vector
+ VERIFY_IS_APPROX(dm4.col(0)=m2*refMat3.col(0), refMat4.col(0)=refMat2*refMat3.col(0));
+ VERIFY_IS_APPROX(dm4.col(0)=m2*refMat3t.transpose().col(0), refMat4.col(0)=refMat2*refMat3t.transpose().col(0));
+ VERIFY_IS_APPROX(dm4.col(0)=m2t.transpose()*refMat3.col(0), refMat4.col(0)=refMat2t.transpose()*refMat3.col(0));
+ VERIFY_IS_APPROX(dm4.col(0)=m2t.transpose()*refMat3t.transpose().col(0), refMat4.col(0)=refMat2t.transpose()*refMat3t.transpose().col(0));
// dense * sparse
VERIFY_IS_APPROX(dm4=refMat2*m3, refMat4=refMat2*refMat3);
+ VERIFY_IS_APPROX(dm4=dm4+refMat2*m3, refMat4=refMat4+refMat2*refMat3);
+ VERIFY_IS_APPROX(dm4+=refMat2*m3, refMat4+=refMat2*refMat3);
VERIFY_IS_APPROX(dm4=refMat2*m3t.transpose(), refMat4=refMat2*refMat3t.transpose());
VERIFY_IS_APPROX(dm4=refMat2t.transpose()*m3, refMat4=refMat2t.transpose()*refMat3);
VERIFY_IS_APPROX(dm4=refMat2t.transpose()*m3t.transpose(), refMat4=refMat2t.transpose()*refMat3t.transpose());
@@ -99,7 +109,7 @@ template<typename SparseMatrixType> void sparse_product()
Index c1 = internal::random<Index>(0,cols-1);
Index r1 = internal::random<Index>(0,depth-1);
DenseMatrix dm5 = DenseMatrix::Random(depth, cols);
-
+
VERIFY_IS_APPROX( m4=m2.col(c)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose());
VERIFY_IS_EQUAL(m4.nonZeros(), (refMat4.array()!=0).count());
VERIFY_IS_APPROX( m4=m2.middleCols(c,1)*dm5.col(c1).transpose(), refMat4=refMat2.col(c)*dm5.col(c1).transpose());
@@ -143,11 +153,11 @@ template<typename SparseMatrixType> void sparse_product()
RowSpVector rv0(depth), rv1;
RowDenseVector drv0(depth), drv1(rv1);
initSparse(2*density,drv0, rv0);
-
- VERIFY_IS_APPROX(cv1=rv0*m3, dcv1=drv0*refMat3);
+
+ VERIFY_IS_APPROX(cv1=m3*cv0, dcv1=refMat3*dcv0);
VERIFY_IS_APPROX(rv1=rv0*m3, drv1=drv0*refMat3);
- VERIFY_IS_APPROX(cv1=m3*cv0, dcv1=refMat3*dcv0);
VERIFY_IS_APPROX(cv1=m3t.adjoint()*cv0, dcv1=refMat3t.adjoint()*dcv0);
+ VERIFY_IS_APPROX(cv1=rv0*m3, dcv1=drv0*refMat3);
VERIFY_IS_APPROX(rv1=m3*cv0, drv1=refMat3*dcv0);
}
diff --git a/test/sparse_vector.cpp b/test/sparse_vector.cpp
index 0c9476803..5eea9edfd 100644
--- a/test/sparse_vector.cpp
+++ b/test/sparse_vector.cpp
@@ -71,6 +71,7 @@ template<typename Scalar,typename Index> void sparse_vector(int rows, int cols)
VERIFY_IS_APPROX(v1.dot(v2), refV1.dot(refV2));
VERIFY_IS_APPROX(v1.dot(refV2), refV1.dot(refV2));
+ VERIFY_IS_APPROX(m1*v2, refM1*refV2);
VERIFY_IS_APPROX(v1.dot(m1*v2), refV1.dot(refM1*refV2));
int i = internal::random<int>(0,rows-1);
VERIFY_IS_APPROX(v1.dot(m1.col(i)), refV1.dot(refM1.col(i)));
diff --git a/test/stable_norm.cpp b/test/stable_norm.cpp
index 549f91fbf..6cd65c64a 100644
--- a/test/stable_norm.cpp
+++ b/test/stable_norm.cpp
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -14,6 +14,21 @@ template<typename T> bool isNotNaN(const T& x)
return x==x;
}
+template<typename T> bool isNaN(const T& x)
+{
+ return x!=x;
+}
+
+template<typename T> bool isInf(const T& x)
+{
+ return x > NumTraits<T>::highest();
+}
+
+template<typename T> bool isMinusInf(const T& x)
+{
+ return x < NumTraits<T>::lowest();
+}
+
// workaround aggressive optimization in ICC
template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
@@ -106,6 +121,58 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
VERIFY_IS_APPROX(vrand.rowwise().stableNorm(), vrand.rowwise().norm());
VERIFY_IS_APPROX(vrand.rowwise().blueNorm(), vrand.rowwise().norm());
VERIFY_IS_APPROX(vrand.rowwise().hypotNorm(), vrand.rowwise().norm());
+
+ // test NaN, +inf, -inf
+ MatrixType v;
+ Index i = internal::random<Index>(0,rows-1);
+ Index j = internal::random<Index>(0,cols-1);
+
+ // NaN
+ {
+ v = vrand;
+ v(i,j) = std::numeric_limits<RealScalar>::quiet_NaN();
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isNaN(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isNaN(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isNaN(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isNaN(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isNaN(v.hypotNorm()));
+ }
+
+ // +inf
+ {
+ v = vrand;
+ v(i,j) = std::numeric_limits<RealScalar>::infinity();
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isInf(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isInf(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isInf(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isInf(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isInf(v.hypotNorm()));
+ }
+
+ // -inf
+ {
+ v = vrand;
+ v(i,j) = -std::numeric_limits<RealScalar>::infinity();
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isInf(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isInf(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isInf(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isInf(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isInf(v.hypotNorm()));
+ }
+
+ // mix
+ {
+ Index i2 = internal::random<Index>(0,rows-1);
+ Index j2 = internal::random<Index>(0,cols-1);
+ v = vrand;
+ v(i,j) = -std::numeric_limits<RealScalar>::infinity();
+ v(i2,j2) = std::numeric_limits<RealScalar>::quiet_NaN();
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isNaN(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isNaN(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isNaN(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isNaN(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isNaN(v.hypotNorm()));
+ }
}
void test_stable_norm()
diff --git a/test/svd_common.h b/test/svd_common.h
new file mode 100644
index 000000000..4631939e5
--- /dev/null
+++ b/test/svd_common.h
@@ -0,0 +1,454 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef SVD_DEFAULT
+#error a macro SVD_DEFAULT(MatrixType) must be defined prior to including svd_common.h
+#endif
+
+#ifndef SVD_FOR_MIN_NORM
+#error a macro SVD_FOR_MIN_NORM(MatrixType) must be defined prior to including svd_common.h
+#endif
+
+// Check that the matrix m is properly reconstructed and that the U and V factors are unitary
+// The SVD must have already been computed.
+template<typename SvdType, typename MatrixType>
+void svd_check_full(const MatrixType& m, const SvdType& svd)
+{
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime> MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime> MatrixVType;
+
+ MatrixType sigma = MatrixType::Zero(rows,cols);
+ sigma.diagonal() = svd.singularValues().template cast<Scalar>();
+ MatrixUType u = svd.matrixU();
+ MatrixVType v = svd.matrixV();
+
+ VERIFY_IS_APPROX(m, u * sigma * v.adjoint());
+ VERIFY_IS_UNITARY(u);
+ VERIFY_IS_UNITARY(v);
+}
+
+// Compare partial SVD defined by computationOptions to a full SVD referenceSvd
+template<typename SvdType, typename MatrixType>
+void svd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const SvdType& referenceSvd)
+{
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+ Index diagSize = (std::min)(rows, cols);
+
+ SvdType svd(m, computationOptions);
+
+ VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues());
+ if(computationOptions & ComputeFullU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
+ if(computationOptions & ComputeThinU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
+ if(computationOptions & ComputeFullV) VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV());
+ if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
+}
+
+//
+template<typename SvdType, typename MatrixType>
+void svd_least_square(const MatrixType& m, unsigned int computationOptions)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, Dynamic> RhsType;
+ typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
+
+ RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
+ SvdType svd(m, computationOptions);
+
+ if(internal::is_same<RealScalar,double>::value) svd.setThreshold(1e-8);
+ else if(internal::is_same<RealScalar,float>::value) svd.setThreshold(1e-4);
+
+ SolutionType x = svd.solve(rhs);
+
+ RealScalar residual = (m*x-rhs).norm();
+ // Check that there is no significantly better solution in the neighborhood of x
+ if(!test_isMuchSmallerThan(residual,rhs.norm()))
+ {
+ // If the residual is very small, then we have an exact solution, so we are already good.
+ for(int k=0;k<x.rows();++k)
+ {
+ SolutionType y(x);
+ y.row(k).array() += 2*NumTraits<RealScalar>::epsilon();
+ RealScalar residual_y = (m*y-rhs).norm();
+ VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
+
+ y.row(k) = x.row(k).array() - 2*NumTraits<RealScalar>::epsilon();
+ residual_y = (m*y-rhs).norm();
+ VERIFY( test_isApprox(residual_y,residual) || residual < residual_y );
+ }
+ }
+
+ // evaluate normal equation which works also for least-squares solutions
+ if(internal::is_same<RealScalar,double>::value)
+ {
+ // This test is not stable with single precision.
+ // This is probably because squaring m signicantly affects the precision.
+ VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
+ }
+}
+
+// check minimal norm solutions, the inoput matrix m is only used to recover problem size
+template<typename MatrixType>
+void svd_min_norm(const MatrixType& m, unsigned int computationOptions)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ Index cols = m.cols();
+
+ enum {
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
+
+ // generate a full-rank m x n problem with m<n
+ enum {
+ RankAtCompileTime2 = ColsAtCompileTime==Dynamic ? Dynamic : (ColsAtCompileTime)/2+1,
+ RowsAtCompileTime3 = ColsAtCompileTime==Dynamic ? Dynamic : ColsAtCompileTime+1
+ };
+ typedef Matrix<Scalar, RankAtCompileTime2, ColsAtCompileTime> MatrixType2;
+ typedef Matrix<Scalar, RankAtCompileTime2, 1> RhsType2;
+ typedef Matrix<Scalar, ColsAtCompileTime, RankAtCompileTime2> MatrixType2T;
+ Index rank = RankAtCompileTime2==Dynamic ? internal::random<Index>(1,cols) : Index(RankAtCompileTime2);
+ MatrixType2 m2(rank,cols);
+ int guard = 0;
+ do {
+ m2.setRandom();
+ } while(SVD_FOR_MIN_NORM(MatrixType2)(m2).setThreshold(test_precision<Scalar>()).rank()!=rank && (++guard)<10);
+ VERIFY(guard<10);
+ RhsType2 rhs2 = RhsType2::Random(rank);
+ // use QR to find a reference minimal norm solution
+ HouseholderQR<MatrixType2T> qr(m2.adjoint());
+ Matrix<Scalar,Dynamic,1> tmp = qr.matrixQR().topLeftCorner(rank,rank).template triangularView<Upper>().adjoint().solve(rhs2);
+ tmp.conservativeResize(cols);
+ tmp.tail(cols-rank).setZero();
+ SolutionType x21 = qr.householderQ() * tmp;
+ // now check with SVD
+ SVD_FOR_MIN_NORM(MatrixType2) svd2(m2, computationOptions);
+ SolutionType x22 = svd2.solve(rhs2);
+ VERIFY_IS_APPROX(m2*x21, rhs2);
+ VERIFY_IS_APPROX(m2*x22, rhs2);
+ VERIFY_IS_APPROX(x21, x22);
+
+ // Now check with a rank deficient matrix
+ typedef Matrix<Scalar, RowsAtCompileTime3, ColsAtCompileTime> MatrixType3;
+ typedef Matrix<Scalar, RowsAtCompileTime3, 1> RhsType3;
+ Index rows3 = RowsAtCompileTime3==Dynamic ? internal::random<Index>(rank+1,2*cols) : Index(RowsAtCompileTime3);
+ Matrix<Scalar,RowsAtCompileTime3,Dynamic> C = Matrix<Scalar,RowsAtCompileTime3,Dynamic>::Random(rows3,rank);
+ MatrixType3 m3 = C * m2;
+ RhsType3 rhs3 = C * rhs2;
+ SVD_FOR_MIN_NORM(MatrixType3) svd3(m3, computationOptions);
+ SolutionType x3 = svd3.solve(rhs3);
+ VERIFY_IS_APPROX(m3*x3, rhs3);
+ VERIFY_IS_APPROX(m3*x21, rhs3);
+ VERIFY_IS_APPROX(m2*x3, rhs2);
+
+ VERIFY_IS_APPROX(x21, x3);
+}
+
+// Check full, compare_to_full, least_square, and min_norm for all possible compute-options
+template<typename SvdType, typename MatrixType>
+void svd_test_all_computation_options(const MatrixType& m, bool full_only)
+{
+// if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
+// return;
+ SvdType fullSvd(m, ComputeFullU|ComputeFullV);
+ CALL_SUBTEST(( svd_check_full(m, fullSvd) ));
+ CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeFullU | ComputeFullV) ));
+ CALL_SUBTEST(( svd_min_norm(m, ComputeFullU | ComputeFullV) ));
+
+ #if defined __INTEL_COMPILER
+ // remark #111: statement is unreachable
+ #pragma warning disable 111
+ #endif
+ if(full_only)
+ return;
+
+ CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullU, fullSvd) ));
+ CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullV, fullSvd) ));
+ CALL_SUBTEST(( svd_compare_to_full(m, 0, fullSvd) ));
+
+ if (MatrixType::ColsAtCompileTime == Dynamic) {
+ // thin U/V are only available with dynamic number of columns
+ CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) ));
+ CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinV, fullSvd) ));
+ CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) ));
+ CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU , fullSvd) ));
+ CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) ));
+
+ CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeFullU | ComputeThinV) ));
+ CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeThinU | ComputeFullV) ));
+ CALL_SUBTEST(( svd_least_square<SvdType>(m, ComputeThinU | ComputeThinV) ));
+
+ CALL_SUBTEST(( svd_min_norm(m, ComputeFullU | ComputeThinV) ));
+ CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeFullV) ));
+ CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeThinV) ));
+
+ // test reconstruction
+ typedef typename MatrixType::Index Index;
+ Index diagSize = (std::min)(m.rows(), m.cols());
+ SvdType svd(m, ComputeThinU | ComputeThinV);
+ VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
+ }
+}
+
+template<typename MatrixType>
+void svd_fill_random(MatrixType &m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ Index diagSize = (std::min)(m.rows(), m.cols());
+ RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4;
+ s = internal::random<RealScalar>(1,s);
+ Matrix<RealScalar,Dynamic,1> d = Matrix<RealScalar,Dynamic,1>::Random(diagSize);
+ for(Index k=0; k<diagSize; ++k)
+ d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s));
+ m = Matrix<Scalar,Dynamic,Dynamic>::Random(m.rows(),diagSize) * d.asDiagonal() * Matrix<Scalar,Dynamic,Dynamic>::Random(diagSize,m.cols());
+ // cancel some coeffs
+ Index n = internal::random<Index>(0,m.size()-1);
+ for(Index i=0; i<n; ++i)
+ m(internal::random<Index>(0,m.rows()-1), internal::random<Index>(0,m.cols()-1)) = Scalar(0);
+}
+
+
+// work around stupid msvc error when constructing at compile time an expression that involves
+// a division by zero, even if the numeric type has floating point
+template<typename Scalar>
+EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); }
+
+// workaround aggressive optimization in ICC
+template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
+
+// all this function does is verify we don't iterate infinitely on nan/inf values
+template<typename SvdType, typename MatrixType>
+void svd_inf_nan()
+{
+ SvdType svd;
+ typedef typename MatrixType::Scalar Scalar;
+ Scalar some_inf = Scalar(1) / zero<Scalar>();
+ VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
+ svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
+
+ Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
+ VERIFY(nan != nan);
+ svd.compute(MatrixType::Constant(10,10,nan), ComputeFullU | ComputeFullV);
+
+ MatrixType m = MatrixType::Zero(10,10);
+ m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+
+ m = MatrixType::Zero(10,10);
+ m(internal::random<int>(0,9), internal::random<int>(0,9)) = nan;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+
+ // regression test for bug 791
+ m.resize(3,3);
+ m << 0, 2*NumTraits<Scalar>::epsilon(), 0.5,
+ 0, -0.5, 0,
+ nan, 0, 0;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+
+ m.resize(4,4);
+ m << 1, 0, 0, 0,
+ 0, 3, 1, 2e-308,
+ 1, 0, 1, nan,
+ 0, nan, nan, 0;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+}
+
+// Regression test for bug 286: JacobiSVD loops indefinitely with some
+// matrices containing denormal numbers.
+void svd_underoverflow()
+{
+#if defined __INTEL_COMPILER
+// shut up warning #239: floating point underflow
+#pragma warning push
+#pragma warning disable 239
+#endif
+ Matrix2d M;
+ M << -7.90884e-313, -4.94e-324,
+ 0, 5.60844e-313;
+ SVD_DEFAULT(Matrix2d) svd;
+ svd.compute(M,ComputeFullU|ComputeFullV);
+ svd_check_full(M,svd);
+
+ // Check all 2x2 matrices made with the following coefficients:
+ VectorXd value_set(9);
+ value_set << 0, 1, -1, 5.60844e-313, -5.60844e-313, 4.94e-324, -4.94e-324, -4.94e-223, 4.94e-223;
+ Array4i id(0,0,0,0);
+ int k = 0;
+ do
+ {
+ M << value_set(id(0)), value_set(id(1)), value_set(id(2)), value_set(id(3));
+ svd.compute(M,ComputeFullU|ComputeFullV);
+ svd_check_full(M,svd);
+
+ id(k)++;
+ if(id(k)>=value_set.size())
+ {
+ while(k<3 && id(k)>=value_set.size()) id(++k)++;
+ id.head(k).setZero();
+ k=0;
+ }
+
+ } while((id<int(value_set.size())).all());
+
+#if defined __INTEL_COMPILER
+#pragma warning pop
+#endif
+
+ // Check for overflow:
+ Matrix3d M3;
+ M3 << 4.4331978442502944e+307, -5.8585363752028680e+307, 6.4527017443412964e+307,
+ 3.7841695601406358e+307, 2.4331702789740617e+306, -3.5235707140272905e+307,
+ -8.7190887618028355e+307, -7.3453213709232193e+307, -2.4367363684472105e+307;
+
+ SVD_DEFAULT(Matrix3d) svd3;
+ svd3.compute(M3,ComputeFullU|ComputeFullV); // just check we don't loop indefinitely
+ svd_check_full(M3,svd3);
+}
+
+// void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
+
+template<typename MatrixType>
+void svd_all_trivial_2x2( void (*cb)(const MatrixType&,bool) )
+{
+ MatrixType M;
+ VectorXd value_set(3);
+ value_set << 0, 1, -1;
+ Array4i id(0,0,0,0);
+ int k = 0;
+ do
+ {
+ M << value_set(id(0)), value_set(id(1)), value_set(id(2)), value_set(id(3));
+
+ cb(M,false);
+
+ id(k)++;
+ if(id(k)>=value_set.size())
+ {
+ while(k<3 && id(k)>=value_set.size()) id(++k)++;
+ id.head(k).setZero();
+ k=0;
+ }
+
+ } while((id<int(value_set.size())).all());
+}
+
+void svd_preallocate()
+{
+ Vector3f v(3.f, 2.f, 1.f);
+ MatrixXf m = v.asDiagonal();
+
+ internal::set_is_malloc_allowed(false);
+ VERIFY_RAISES_ASSERT(VectorXf tmp(10);)
+ SVD_DEFAULT(MatrixXf) svd;
+ internal::set_is_malloc_allowed(true);
+ svd.compute(m);
+ VERIFY_IS_APPROX(svd.singularValues(), v);
+
+ SVD_DEFAULT(MatrixXf) svd2(3,3);
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+ VERIFY_IS_APPROX(svd2.singularValues(), v);
+ VERIFY_RAISES_ASSERT(svd2.matrixU());
+ VERIFY_RAISES_ASSERT(svd2.matrixV());
+ svd2.compute(m, ComputeFullU | ComputeFullV);
+ VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
+ VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+
+ SVD_DEFAULT(MatrixXf) svd3(3,3,ComputeFullU|ComputeFullV);
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+ VERIFY_IS_APPROX(svd2.singularValues(), v);
+ VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
+ VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m, ComputeFullU|ComputeFullV);
+ internal::set_is_malloc_allowed(true);
+}
+
+template<typename SvdType,typename MatrixType>
+void svd_verify_assert(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, 1> RhsType;
+ RhsType rhs(rows);
+ SvdType svd;
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.singularValues())
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ MatrixType a = MatrixType::Zero(rows, cols);
+ a.setZero();
+ svd.compute(a, 0);
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ svd.singularValues();
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+
+ if (ColsAtCompileTime == Dynamic)
+ {
+ svd.compute(a, ComputeThinU);
+ svd.matrixU();
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ svd.compute(a, ComputeThinV);
+ svd.matrixV();
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ }
+ else
+ {
+ VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU))
+ VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV))
+ }
+}
+
+#undef SVD_DEFAULT
+#undef SVD_FOR_MIN_NORM
diff --git a/test/upperbidiagonalization.cpp b/test/upperbidiagonalization.cpp
index d15bf588b..847b34b55 100644
--- a/test/upperbidiagonalization.cpp
+++ b/test/upperbidiagonalization.cpp
@@ -35,7 +35,7 @@ void test_upperbidiagonalization()
CALL_SUBTEST_1( upperbidiag(MatrixXf(3,3)) );
CALL_SUBTEST_2( upperbidiag(MatrixXd(17,12)) );
CALL_SUBTEST_3( upperbidiag(MatrixXcf(20,20)) );
- CALL_SUBTEST_4( upperbidiag(MatrixXcd(16,15)) );
+ CALL_SUBTEST_4( upperbidiag(Matrix<std::complex<double>,Dynamic,Dynamic,RowMajor>(16,15)) );
CALL_SUBTEST_5( upperbidiag(Matrix<float,6,4>()) );
CALL_SUBTEST_6( upperbidiag(Matrix<float,5,5>()) );
CALL_SUBTEST_7( upperbidiag(Matrix<double,4,3>()) );
diff --git a/test/vectorization_logic.cpp b/test/vectorization_logic.cpp
index b069f0771..2f839cf51 100644
--- a/test/vectorization_logic.cpp
+++ b/test/vectorization_logic.cpp
@@ -27,19 +27,37 @@ std::string demangle_unrolling(int t)
if(t==CompleteUnrolling) return "CompleteUnrolling";
return "?";
}
+std::string demangle_flags(int f)
+{
+ std::string res;
+ if(f&RowMajorBit) res += " | RowMajor";
+ if(f&PacketAccessBit) res += " | Packet";
+ if(f&LinearAccessBit) res += " | Linear";
+ if(f&LvalueBit) res += " | Lvalue";
+ if(f&DirectAccessBit) res += " | Direct";
+ if(f&AlignedBit) res += " | Aligned";
+ if(f&NestByRefBit) res += " | NestByRef";
+ if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit";
+
+ return res;
+}
template<typename Dst, typename Src>
bool test_assign(const Dst&, const Src&, int traversal, int unrolling)
{
- internal::assign_traits<Dst,Src>::debug();
- bool res = internal::assign_traits<Dst,Src>::Traversal==traversal
- && internal::assign_traits<Dst,Src>::Unrolling==unrolling;
+ typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar> > traits;
+ bool res = traits::Traversal==traversal && traits::Unrolling==unrolling;
if(!res)
{
+ std::cerr << "Src: " << demangle_flags(Src::Flags) << std::endl;
+ std::cerr << " " << demangle_flags(internal::evaluator<Src>::Flags) << std::endl;
+ std::cerr << "Dst: " << demangle_flags(Dst::Flags) << std::endl;
+ std::cerr << " " << demangle_flags(internal::evaluator<Dst>::Flags) << std::endl;
+ traits::debug();
std::cerr << " Expected Traversal == " << demangle_traversal(traversal)
- << " got " << demangle_traversal(internal::assign_traits<Dst,Src>::Traversal) << "\n";
+ << " got " << demangle_traversal(traits::Traversal) << "\n";
std::cerr << " Expected Unrolling == " << demangle_unrolling(unrolling)
- << " got " << demangle_unrolling(internal::assign_traits<Dst,Src>::Unrolling) << "\n";
+ << " got " << demangle_unrolling(traits::Unrolling) << "\n";
}
return res;
}
@@ -47,15 +65,19 @@ bool test_assign(const Dst&, const Src&, int traversal, int unrolling)
template<typename Dst, typename Src>
bool test_assign(int traversal, int unrolling)
{
- internal::assign_traits<Dst,Src>::debug();
- bool res = internal::assign_traits<Dst,Src>::Traversal==traversal
- && internal::assign_traits<Dst,Src>::Unrolling==unrolling;
+ typedef internal::copy_using_evaluator_traits<internal::evaluator<Dst>,internal::evaluator<Src>, internal::assign_op<typename Dst::Scalar> > traits;
+ bool res = traits::Traversal==traversal && traits::Unrolling==unrolling;
if(!res)
{
+ std::cerr << "Src: " << demangle_flags(Src::Flags) << std::endl;
+ std::cerr << " " << demangle_flags(internal::evaluator<Src>::Flags) << std::endl;
+ std::cerr << "Dst: " << demangle_flags(Dst::Flags) << std::endl;
+ std::cerr << " " << demangle_flags(internal::evaluator<Dst>::Flags) << std::endl;
+ traits::debug();
std::cerr << " Expected Traversal == " << demangle_traversal(traversal)
- << " got " << demangle_traversal(internal::assign_traits<Dst,Src>::Traversal) << "\n";
+ << " got " << demangle_traversal(traits::Traversal) << "\n";
std::cerr << " Expected Unrolling == " << demangle_unrolling(unrolling)
- << " got " << demangle_unrolling(internal::assign_traits<Dst,Src>::Unrolling) << "\n";
+ << " got " << demangle_unrolling(traits::Unrolling) << "\n";
}
return res;
}
@@ -63,10 +85,15 @@ bool test_assign(int traversal, int unrolling)
template<typename Xpr>
bool test_redux(const Xpr&, int traversal, int unrolling)
{
- typedef internal::redux_traits<internal::scalar_sum_op<typename Xpr::Scalar>,Xpr> traits;
+ typedef internal::redux_traits<internal::scalar_sum_op<typename Xpr::Scalar>,internal::redux_evaluator<Xpr> > traits;
+
bool res = traits::Traversal==traversal && traits::Unrolling==unrolling;
if(!res)
{
+ std::cerr << demangle_flags(Xpr::Flags) << std::endl;
+ std::cerr << demangle_flags(internal::evaluator<Xpr>::Flags) << std::endl;
+ traits::debug();
+
std::cerr << " Expected Traversal == " << demangle_traversal(traversal)
<< " got " << demangle_traversal(traits::Traversal) << "\n";
std::cerr << " Expected Unrolling == " << demangle_unrolling(unrolling)
diff --git a/test/vectorwiseop.cpp b/test/vectorwiseop.cpp
index 6cd1acdda..1631d54c4 100644
--- a/test/vectorwiseop.cpp
+++ b/test/vectorwiseop.cpp
@@ -104,8 +104,8 @@ template<typename ArrayType> void vectorwiseop_array(const ArrayType& m)
m2 = m1;
// yes, there might be an aliasing issue there but ".rowwise() /="
- // is suppposed to evaluate " m2.colwise().sum()" into to temporary to avoid
- // evaluating the reducions multiple times
+ // is supposed to evaluate " m2.colwise().sum()" into a temporary to avoid
+ // evaluating the reduction multiple times
if(ArrayType::RowsAtCompileTime>2 || ArrayType::RowsAtCompileTime==Dynamic)
{
m2.rowwise() /= m2.colwise().sum();