aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-09-15 09:18:16 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-09-15 09:18:16 -0700
commit10a79ca3a396f040c2324a5078c7e666bc904bed (patch)
tree7fb700dc5686d7b0f2687b71132bc3daa595eb9f
parentefdff157493826bbcc023a85e08596fd58d7997a (diff)
parent9452eb38f812194a676edc1b9eb9d08b7bc0f297 (diff)
Merged latest updates from the Eigen trunk.
-rw-r--r--Eigen/Core2
-rw-r--r--Eigen/SVD1
-rw-r--r--Eigen/src/Core/ArrayWrapper.h10
-rw-r--r--Eigen/src/Core/GeneralProduct.h4
-rw-r--r--Eigen/src/Core/MathFunctions.h24
-rw-r--r--Eigen/src/Core/MatrixBase.h1
-rw-r--r--Eigen/src/Core/Redux.h2
-rw-r--r--Eigen/src/Core/SelfCwiseBinaryOp.h10
-rw-r--r--Eigen/src/Core/StableNorm.h22
-rw-r--r--Eigen/src/Core/arch/AVX/PacketMath.h4
-rw-r--r--Eigen/src/Core/arch/NEON/PacketMath.h4
-rw-r--r--Eigen/src/Core/functors/BinaryFunctors.h14
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h2
-rw-r--r--Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h4
-rw-r--r--Eigen/src/Core/util/MKL_support.h32
-rw-r--r--Eigen/src/Core/util/Macros.h9
-rw-r--r--Eigen/src/Core/util/Memory.h2
-rw-r--r--Eigen/src/Core/util/XprHelper.h10
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h18
-rw-r--r--Eigen/src/Geometry/Homogeneous.h4
-rw-r--r--Eigen/src/IterativeLinearSolvers/BiCGSTAB.h1
-rw-r--r--Eigen/src/SVD/JacobiSVD.h202
-rw-r--r--Eigen/src/SVD/SVDBase.h (renamed from unsupported/Eigen/src/SVD/SVDBase.h)139
-rw-r--r--Eigen/src/SVD/UpperBidiagonalization.h30
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h99
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h9
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h3
-rw-r--r--Eigen/src/SparseCore/SparseVector.h14
-rw-r--r--Eigen/src/SparseQR/SparseQR.h19
-rw-r--r--Eigen/src/plugins/ArrayCwiseUnaryOps.h18
-rw-r--r--bench/bench_norm.cpp33
-rw-r--r--cmake/FindEigen3.cmake9
-rw-r--r--doc/CMakeLists.txt1
-rw-r--r--doc/examples/CMakeLists.txt4
-rw-r--r--doc/snippets/CMakeLists.txt6
-rw-r--r--doc/snippets/DirectionWise_hnormalized.cpp7
-rw-r--r--doc/snippets/MatrixBase_hnormalized.cpp6
-rw-r--r--doc/snippets/MatrixBase_homogeneous.cpp6
-rw-r--r--doc/snippets/VectorwiseOp_homogeneous.cpp7
-rw-r--r--doc/special_examples/CMakeLists.txt16
-rw-r--r--test/eigensolver_selfadjoint.cpp43
-rw-r--r--test/jacobisvd.cpp52
-rw-r--r--test/linearstructure.cpp26
-rw-r--r--test/main.h31
-rw-r--r--test/product.h8
-rw-r--r--test/sparseqr.cpp2
-rw-r--r--test/stable_norm.cpp69
-rw-r--r--test/upperbidiagonalization.cpp2
-rw-r--r--unsupported/Eigen/BDCSVD26
-rw-r--r--unsupported/Eigen/MatrixFunctions21
-rw-r--r--unsupported/Eigen/SVD35
-rw-r--r--unsupported/Eigen/src/BDCSVD/BDCSVD.h (renamed from unsupported/Eigen/src/SVD/BDCSVD.h)48
-rw-r--r--unsupported/Eigen/src/BDCSVD/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt (renamed from unsupported/Eigen/src/SVD/TODOBdcsvd.txt)0
-rw-r--r--unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt (renamed from unsupported/Eigen/src/SVD/doneInBDCSVD.txt)0
-rw-r--r--unsupported/Eigen/src/CMakeLists.txt1
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/GMRES.h156
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h8
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h19
-rw-r--r--unsupported/Eigen/src/SVD/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/SVD/JacobiSVD.h782
-rw-r--r--unsupported/doc/examples/CMakeLists.txt4
-rw-r--r--unsupported/doc/snippets/CMakeLists.txt4
-rw-r--r--unsupported/test/NonLinearOptimization.cpp22
-rw-r--r--unsupported/test/levenberg_marquardt.cpp44
-rw-r--r--unsupported/test/svd_common.h2
66 files changed, 882 insertions, 1343 deletions
diff --git a/Eigen/Core b/Eigen/Core
index 9a73fe37b..776b7faf3 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -42,7 +42,7 @@
#define EIGEN_USING_STD_MATH(FUNC) using std::FUNC;
#endif
-#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(EIGEN_EXCEPTIONS)
#define EIGEN_EXCEPTIONS
#endif
diff --git a/Eigen/SVD b/Eigen/SVD
index 5eee46df5..c3d24286c 100644
--- a/Eigen/SVD
+++ b/Eigen/SVD
@@ -21,6 +21,7 @@
*/
#include "src/misc/Solve.h"
+#include "src/SVD/SVDBase.h"
#include "src/SVD/JacobiSVD.h"
#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
#include "src/SVD/JacobiSVD_MKL.h"
diff --git a/Eigen/src/Core/ArrayWrapper.h b/Eigen/src/Core/ArrayWrapper.h
index 4bb648024..28d7b7bd5 100644
--- a/Eigen/src/Core/ArrayWrapper.h
+++ b/Eigen/src/Core/ArrayWrapper.h
@@ -29,6 +29,11 @@ struct traits<ArrayWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
{
typedef ArrayXpr XprKind;
+ // Let's remove NestByRefBit
+ enum {
+ Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
+ Flags = Flags0 & ~NestByRefBit
+ };
};
}
@@ -166,6 +171,11 @@ struct traits<MatrixWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
{
typedef MatrixXpr XprKind;
+ // Let's remove NestByRefBit
+ enum {
+ Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
+ Flags = Flags0 & ~NestByRefBit
+ };
};
}
diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h
index 624b8b6e8..7179eb124 100644
--- a/Eigen/src/Core/GeneralProduct.h
+++ b/Eigen/src/Core/GeneralProduct.h
@@ -231,7 +231,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
// FIXME not very good if rhs is real and lhs complex while alpha is real too
const Index cols = dest.cols();
for (Index j=0; j<cols; ++j)
- func(dest.col(j), prod.rhs().coeff(j) * prod.lhs());
+ func(dest.col(j), prod.rhs().coeff(0,j) * prod.lhs());
}
// Row major
@@ -242,7 +242,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
// FIXME not very good if lhs is real and rhs complex while alpha is real too
const Index rows = dest.rows();
for (Index i=0; i<rows; ++i)
- func(dest.row(i), prod.lhs().coeff(i) * prod.rhs());
+ func(dest.row(i), prod.lhs().coeff(i,0) * prod.rhs());
}
template<typename Lhs, typename Rhs>
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index 20fc2be74..e9fed2e52 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -12,6 +12,15 @@
namespace Eigen {
+// On WINCE, std::abs is defined for int only, so let's defined our own overloads:
+// This issue has been confirmed with MSVC 2008 only, but the issue might exist for more recent versions too.
+#if defined(_WIN32_WCE) && defined(_MSC_VER) && _MSC_VER<=1500
+long abs(long x) { return (labs(x)); }
+double abs(double x) { return (fabs(x)); }
+float abs(float x) { return (fabsf(x)); }
+long double abs(long double x) { return (fabsl(x)); }
+#endif
+
namespace internal {
/** \internal \struct global_math_functions_filtering_base
@@ -308,10 +317,17 @@ struct hypot_impl
using std::sqrt;
RealScalar _x = abs(x);
RealScalar _y = abs(y);
- RealScalar p = (max)(_x, _y);
- if(p==RealScalar(0)) return 0;
- RealScalar q = (min)(_x, _y);
- RealScalar qp = q/p;
+ Scalar p, qp;
+ if(_x>_y)
+ {
+ p = _x;
+ qp = _y / p;
+ }
+ else
+ {
+ p = _y;
+ qp = _x / p;
+ }
return p * sqrt(RealScalar(1) + qp*qp);
}
};
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index f5987d194..3cb5e04fd 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -81,6 +81,7 @@ template<typename Derived> class MatrixBase
using Base::operator-=;
using Base::operator*=;
using Base::operator/=;
+ using Base::operator*;
typedef typename Base::CoeffReturnType CoeffReturnType;
typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
index 5b82c9a65..c626946ba 100644
--- a/Eigen/src/Core/Redux.h
+++ b/Eigen/src/Core/Redux.h
@@ -207,7 +207,7 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
const Index packetSize = packet_traits<Scalar>::size;
const Index alignedStart = internal::first_aligned(mat);
enum {
- alignment = bool(Derived::Flags & DirectAccessBit) || bool(Derived::Flags & AlignedBit)
+ alignment = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) || bool(Derived::Flags & AlignedBit)
? Aligned : Unaligned
};
const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);
diff --git a/Eigen/src/Core/SelfCwiseBinaryOp.h b/Eigen/src/Core/SelfCwiseBinaryOp.h
index 65864adf8..8abdca4a5 100644
--- a/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -209,15 +209,9 @@ inline Derived& ArrayBase<Derived>::operator-=(const Scalar& other)
template<typename Derived>
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
{
- typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
- internal::scalar_quotient_op<Scalar>,
- internal::scalar_product_op<Scalar> >::type BinOp;
typedef typename Derived::PlainObject PlainObject;
- SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
- Scalar actual_other;
- if(NumTraits<Scalar>::IsInteger) actual_other = other;
- else actual_other = Scalar(1)/other;
- tmp = PlainObject::Constant(rows(),cols(), actual_other);
+ SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
+ tmp = PlainObject::Constant(rows(),cols(), other);
return derived();
}
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
index 525620bad..64d43e1b1 100644
--- a/Eigen/src/Core/StableNorm.h
+++ b/Eigen/src/Core/StableNorm.h
@@ -20,7 +20,7 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
using std::max;
Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
- if (maxCoeff>scale)
+ if(maxCoeff>scale)
{
ssq = ssq * numext::abs2(scale/maxCoeff);
Scalar tmp = Scalar(1)/maxCoeff;
@@ -29,12 +29,21 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
invScale = NumTraits<Scalar>::highest();
scale = Scalar(1)/invScale;
}
+ else if(maxCoeff>NumTraits<Scalar>::highest()) // we got a INF
+ {
+ invScale = Scalar(1);
+ scale = maxCoeff;
+ }
else
{
scale = maxCoeff;
invScale = tmp;
}
}
+ else if(maxCoeff!=maxCoeff) // we got a NaN
+ {
+ scale = maxCoeff;
+ }
// TODO if the maxCoeff is much much smaller than the current scale,
// then we can neglect this sub vector
@@ -55,7 +64,7 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
using std::abs;
const Derived& vec(_vec.derived());
static bool initialized = false;
- static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
+ static RealScalar b1, b2, s1m, s2m, rbig, relerr;
if(!initialized)
{
int ibeta, it, iemin, iemax, iexp;
@@ -84,7 +93,6 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
iexp = - ((iemax+it)/2);
s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
- overfl = rbig*s2m; // overflow boundary for abig
eps = RealScalar(pow(double(ibeta), 1-it));
relerr = sqrt(eps); // tolerance for neglecting asml
initialized = true;
@@ -101,13 +109,13 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
else if(ax < b1) asml += numext::abs2(ax*s1m);
else amed += numext::abs2(ax);
}
+ if(amed!=amed)
+ return amed; // we got a NaN
if(abig > RealScalar(0))
{
abig = sqrt(abig);
- if(abig > overfl)
- {
- return rbig;
- }
+ if(abig > rbig) // overflow, or *this contains INF values
+ return abig; // return INF
if(amed > RealScalar(0))
{
abig = abig/s2m;
diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h
index 170302f7f..1591458a7 100644
--- a/Eigen/src/Core/arch/AVX/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -143,7 +143,7 @@ template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f&
// so let's enforce it to generate a vfmadd231ps instruction since the most common use case is to accumulate
// the result of the product.
Packet8f res = c;
- asm("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
+ __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
return res;
#else
return _mm256_fmadd_ps(a,b,c);
@@ -153,7 +153,7 @@ template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d&
#if defined(__clang__) || defined(__GNUC__)
// see above
Packet4d res = c;
- asm("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
+ __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
return res;
#else
return _mm256_fmadd_pd(a,b,c);
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h
index 380b76ae9..0504c095c 100644
--- a/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -52,12 +52,12 @@ typedef uint32x4_t Packet4ui;
// arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function
// which available on LLVM and GCC (at least)
-#if (defined(__has_builtin) && __has_builtin(__builtin_prefetch)) || defined(__GNUC__)
+#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
#define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
#elif defined __pld
#define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
#elif !defined(__aarch64__)
- #define EIGEN_ARM_PREFETCH(ADDR) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
+ #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
#else
// by default no explicit prefetching
#define EIGEN_ARM_PREFETCH(ADDR)
diff --git a/Eigen/src/Core/functors/BinaryFunctors.h b/Eigen/src/Core/functors/BinaryFunctors.h
index ba094f5d1..157d075a7 100644
--- a/Eigen/src/Core/functors/BinaryFunctors.h
+++ b/Eigen/src/Core/functors/BinaryFunctors.h
@@ -167,9 +167,17 @@ template<typename Scalar> struct scalar_hypot_op {
EIGEN_USING_STD_MATH(max);
EIGEN_USING_STD_MATH(min);
using std::sqrt;
- Scalar p = (max)(_x, _y);
- Scalar q = (min)(_x, _y);
- Scalar qp = q/p;
+ Scalar p, qp;
+ if(_x>_y)
+ {
+ p = _x;
+ qp = _y / p;
+ }
+ else
+ {
+ p = _y;
+ qp = _x / p;
+ }
return p * sqrt(Scalar(1) + qp*qp);
}
};
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h b/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
index 060af328e..b6ae729b2 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
@@ -53,6 +53,8 @@ template< \
int RhsStorageOrder, bool ConjugateRhs> \
struct general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor> \
{ \
+typedef gebp_traits<EIGTYPE,EIGTYPE> Traits; \
+\
static void run(Index rows, Index cols, Index depth, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
index ba41a1c99..4cc56a42f 100644
--- a/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
+++ b/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
@@ -109,7 +109,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
if (rows != depth) { \
\
- int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+ int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
\
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
@@ -223,7 +223,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
if (cols != depth) { \
\
- int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+ int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
\
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
diff --git a/Eigen/src/Core/util/MKL_support.h b/Eigen/src/Core/util/MKL_support.h
index 8acca9c8c..1ef3b61db 100644
--- a/Eigen/src/Core/util/MKL_support.h
+++ b/Eigen/src/Core/util/MKL_support.h
@@ -76,6 +76,38 @@
#include <mkl_lapacke.h>
#define EIGEN_MKL_VML_THRESHOLD 128
+/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */
+/* MKL_BLAS, etc are not defined in 11.2 */
+#ifdef MKL_DOMAIN_ALL
+#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL
+#else
+#define EIGEN_MKL_DOMAIN_ALL MKL_ALL
+#endif
+
+#ifdef MKL_DOMAIN_BLAS
+#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS
+#else
+#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS
+#endif
+
+#ifdef MKL_DOMAIN_FFT
+#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT
+#else
+#define EIGEN_MKL_DOMAIN_FFT MKL_FFT
+#endif
+
+#ifdef MKL_DOMAIN_VML
+#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML
+#else
+#define EIGEN_MKL_DOMAIN_VML MKL_VML
+#endif
+
+#ifdef MKL_DOMAIN_PARDISO
+#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO
+#else
+#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO
+#endif
+
namespace Eigen {
typedef std::complex<double> dcomplex;
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index c80aa5129..8fdd7d898 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -107,6 +107,13 @@
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#endif
+// Cross compiler wrapper around LLVM's __has_builtin
+#ifdef __has_builtin
+# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define EIGEN_HAS_BUILTIN(x) 0
+#endif
+
// A Clang feature extension to determine compiler features.
// We use it to determine 'cxx_rvalue_references'
#ifndef __has_feature
@@ -277,7 +284,7 @@ namespace Eigen {
#if !defined(EIGEN_ASM_COMMENT)
#if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
- #define EIGEN_ASM_COMMENT(X) asm("#" X)
+ #define EIGEN_ASM_COMMENT(X) __asm__("#" X)
#else
#define EIGEN_ASM_COMMENT(X)
#endif
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index 810ee786b..30133ba67 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -64,7 +64,7 @@
// Currently, let's include it only on unix systems:
#if defined(__unix__) || defined(__unix)
#include <unistd.h>
- #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+ #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
#define EIGEN_HAS_POSIX_MEMALIGN 1
#endif
#endif
diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h
index 1b3e122e1..7c77b2263 100644
--- a/Eigen/src/Core/util/XprHelper.h
+++ b/Eigen/src/Core/util/XprHelper.h
@@ -383,13 +383,21 @@ struct special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public DenseCo
const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
operator*(const OtherScalar& scalar) const
{
+#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
+ EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
+#endif
return CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
(*static_cast<const Derived*>(this), scalar_multiple2_op<Scalar,OtherScalar>(scalar));
}
inline friend const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
operator*(const OtherScalar& scalar, const Derived& matrix)
- { return static_cast<const special_scalar_op_base&>(matrix).operator*(scalar); }
+ {
+#ifdef EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
+ EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN
+#endif
+ return static_cast<const special_scalar_op_base&>(matrix).operator*(scalar);
+ }
};
template<typename XprType, typename CastType> struct cast_return_type
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index fc8ecaa6f..a6bbdac6b 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -605,7 +605,6 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
if(computeEigenvectors)
{
Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
- safeNorm2 *= safeNorm2;
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{
eivecs.setIdentity();
@@ -619,7 +618,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0);
int k = d0 > d1 ? 2 : 0;
- d0 = d0 > d1 ? d1 : d0;
+ d0 = d0 > d1 ? d0 : d1;
tmp.diagonal().array () -= eivals(k);
VectorType cross;
@@ -627,19 +626,25 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(k) = cross / sqrt(n);
+ }
else
{
n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(k) = cross / sqrt(n);
+ }
else
{
n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(k) = cross / sqrt(n);
+ }
else
{
// the input matrix and/or the eigenvaues probably contains some inf/NaN,
@@ -659,12 +664,16 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
tmp.diagonal().array() -= eivals(1);
if(d0<=Eigen::NumTraits<Scalar>::epsilon())
+ {
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
+ }
else
{
- n = (cross = eivecs.col(k).cross(tmp.row(0).normalized())).squaredNorm();
+ n = (cross = eivecs.col(k).cross(tmp.row(0))).squaredNorm();
if(n>safeNorm2)
+ {
eivecs.col(1) = cross / sqrt(n);
+ }
else
{
n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
@@ -678,13 +687,14 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
else
{
// we should never reach this point,
- // if so the last two eigenvalues are likely to ve very closed to each other
+ // if so the last two eigenvalues are likely to be very close to each other
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
}
}
}
// make sure that eivecs[1] is orthogonal to eivecs[2]
+ // FIXME: this step should not be needed
Scalar d = eivecs.col(1).dot(eivecs.col(k));
eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
}
diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h
index 00e71d190..97dd21d15 100644
--- a/Eigen/src/Geometry/Homogeneous.h
+++ b/Eigen/src/Geometry/Homogeneous.h
@@ -120,7 +120,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
* Example: \include MatrixBase_homogeneous.cpp
* Output: \verbinclude MatrixBase_homogeneous.out
*
- * \sa class Homogeneous
+ * \sa VectorwiseOp::homogeneous(), class Homogeneous
*/
template<typename Derived>
inline typename MatrixBase<Derived>::HomogeneousReturnType
@@ -137,7 +137,7 @@ MatrixBase<Derived>::homogeneous() const
* Example: \include VectorwiseOp_homogeneous.cpp
* Output: \verbinclude VectorwiseOp_homogeneous.out
*
- * \sa MatrixBase::homogeneous() */
+ * \sa MatrixBase::homogeneous(), class Homogeneous */
template<typename ExpressionType, int Direction>
inline Homogeneous<ExpressionType,Direction>
VectorwiseOp<ExpressionType,Direction>::homogeneous() const
diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
index dc524c225..27824b9d5 100644
--- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -39,7 +39,6 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
int maxIters = iters;
int n = mat.cols();
- x = precond.solve(x);
VectorType r = rhs - mat * x;
VectorType r0 = r;
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 412daa746..3ab8a4c8a 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -424,24 +425,31 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
- if(t == RealScalar(0))
+
+ if(d == RealScalar(0))
{
- rot1.c() = RealScalar(0);
- rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+ rot1.s() = RealScalar(0);
+ rot1.c() = RealScalar(1);
}
else
{
- RealScalar t2d2 = numext::hypot(t,d);
- rot1.c() = abs(t)/t2d2;
- rot1.s() = d/t2d2;
- if(t<RealScalar(0))
- rot1.s() = -rot1.s();
+ // If d!=0, then t/d cannot overflow because the magnitude of the
+ // entries forming d are not too small compared to the ones forming t.
+ RealScalar u = t / d;
+ rot1.s() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
+ rot1.c() = rot1.s() * u;
}
m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1);
*j_left = rot1 * j_right->transpose();
}
+template<typename _MatrixType, int QRPreconditioner>
+struct traits<JacobiSVD<_MatrixType,QRPreconditioner> >
+{
+ typedef _MatrixType MatrixType;
+};
+
} // end namespace internal
/** \ingroup SVD_Module
@@ -498,7 +506,9 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
* \sa MatrixBase::jacobiSvd()
*/
template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
+ : public SVDBase<JacobiSVD<_MatrixType,QRPreconditioner> >
{
+ typedef SVDBase<JacobiSVD> Base;
public:
typedef _MatrixType MatrixType;
@@ -515,13 +525,10 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
MatrixOptions = MatrixType::Options
};
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
- MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
- MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
- MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
- MatrixVType;
- typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename Base::MatrixUType MatrixUType;
+ typedef typename Base::MatrixVType MatrixVType;
+ typedef typename Base::SingularValuesType SingularValuesType;
+
typedef typename internal::plain_row_type<MatrixType>::type RowType;
typedef typename internal::plain_col_type<MatrixType>::type ColType;
typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
@@ -534,11 +541,6 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
* perform decompositions via JacobiSVD::compute(const MatrixType&).
*/
JacobiSVD()
- : m_isInitialized(false),
- m_isAllocated(false),
- m_usePrescribedThreshold(false),
- m_computationOptions(0),
- m_rows(-1), m_cols(-1), m_diagSize(0)
{}
@@ -549,11 +551,6 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
* \sa JacobiSVD()
*/
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
- : m_isInitialized(false),
- m_isAllocated(false),
- m_usePrescribedThreshold(false),
- m_computationOptions(0),
- m_rows(-1), m_cols(-1)
{
allocate(rows, cols, computationOptions);
}
@@ -569,11 +566,6 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
- : m_isInitialized(false),
- m_isAllocated(false),
- m_usePrescribedThreshold(false),
- m_computationOptions(0),
- m_rows(-1), m_cols(-1)
{
compute(matrix, computationOptions);
}
@@ -601,54 +593,6 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
return compute(matrix, m_computationOptions);
}
- /** \returns the \a U matrix.
- *
- * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
- * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
- *
- * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
- *
- * This method asserts that you asked for \a U to be computed.
- */
- const MatrixUType& matrixU() const
- {
- eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
- eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?");
- return m_matrixU;
- }
-
- /** \returns the \a V matrix.
- *
- * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
- * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
- *
- * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
- *
- * This method asserts that you asked for \a V to be computed.
- */
- const MatrixVType& matrixV() const
- {
- eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
- eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?");
- return m_matrixV;
- }
-
- /** \returns the vector of singular values.
- *
- * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
- * returned vector has size \a m. Singular values are always sorted in decreasing order.
- */
- const SingularValuesType& singularValues() const
- {
- eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
- return m_singularValues;
- }
-
- /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
- inline bool computeU() const { return m_computeFullU || m_computeThinU; }
- /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
- inline bool computeV() const { return m_computeFullV || m_computeThinV; }
-
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
*
* \param b the right-hand-side of the equation to solve.
@@ -666,94 +610,31 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
}
-
- /** \returns the number of singular values that are not exactly 0 */
- Index nonzeroSingularValues() const
- {
- eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
- return m_nonzeroSingularValues;
- }
- /** \returns the rank of the matrix of which \c *this is the SVD.
- *
- * \note This method has to determine which singular values should be considered nonzero.
- * For that, it uses the threshold value that you can control by calling
- * setThreshold(const RealScalar&).
- */
- inline Index rank() const
- {
- using std::abs;
- eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
- if(m_singularValues.size()==0) return 0;
- RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold();
- Index i = m_nonzeroSingularValues-1;
- while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
- return i+1;
- }
+ using Base::computeU;
+ using Base::computeV;
- /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
- * which need to determine when singular values are to be considered nonzero.
- * This is not used for the SVD decomposition itself.
- *
- * When it needs to get the threshold value, Eigen calls threshold().
- * The default is \c NumTraits<Scalar>::epsilon()
- *
- * \param threshold The new value to use as the threshold.
- *
- * A singular value will be considered nonzero if its value is strictly greater than
- * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
- *
- * If you want to come back to the default behavior, call setThreshold(Default_t)
- */
- JacobiSVD& setThreshold(const RealScalar& threshold)
- {
- m_usePrescribedThreshold = true;
- m_prescribedThreshold = threshold;
- return *this;
- }
-
- /** Allows to come back to the default behavior, letting Eigen use its default formula for
- * determining the threshold.
- *
- * You should pass the special object Eigen::Default as parameter here.
- * \code svd.setThreshold(Eigen::Default); \endcode
- *
- * See the documentation of setThreshold(const RealScalar&).
- */
- JacobiSVD& setThreshold(Default_t)
- {
- m_usePrescribedThreshold = false;
- return *this;
- }
-
- /** Returns the threshold that will be used by certain methods such as rank().
- *
- * See the documentation of setThreshold(const RealScalar&).
- */
- RealScalar threshold() const
- {
- eigen_assert(m_isInitialized || m_usePrescribedThreshold);
- return m_usePrescribedThreshold ? m_prescribedThreshold
- : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
- }
-
- inline Index rows() const { return m_rows; }
- inline Index cols() const { return m_cols; }
-
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
protected:
- MatrixUType m_matrixU;
- MatrixVType m_matrixV;
- SingularValuesType m_singularValues;
+ using Base::m_matrixU;
+ using Base::m_matrixV;
+ using Base::m_singularValues;
+ using Base::m_isInitialized;
+ using Base::m_isAllocated;
+ using Base::m_usePrescribedThreshold;
+ using Base::m_computeFullU;
+ using Base::m_computeThinU;
+ using Base::m_computeFullV;
+ using Base::m_computeThinV;
+ using Base::m_computationOptions;
+ using Base::m_nonzeroSingularValues;
+ using Base::m_rows;
+ using Base::m_cols;
+ using Base::m_diagSize;
+ using Base::m_prescribedThreshold;
WorkMatrixType m_workMatrix;
- bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
- bool m_computeFullU, m_computeThinU;
- bool m_computeFullV, m_computeThinV;
- unsigned int m_computationOptions;
- Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
- RealScalar m_prescribedThreshold;
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct internal::svd_precondition_2x2_block_to_be_real;
@@ -861,7 +742,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
EIGEN_USING_STD_MATH(max);
RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
abs(m_workMatrix.coeff(q,q))));
- if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
+ // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791)
+ if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{
finished = false;
diff --git a/unsupported/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h
index fd8af3b8c..61b01fb8a 100644
--- a/unsupported/Eigen/src/SVD/SVDBase.h
+++ b/Eigen/src/SVD/SVDBase.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
@@ -12,8 +13,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_SVD_H
-#define EIGEN_SVD_H
+#ifndef EIGEN_SVDBASE_H
+#define EIGEN_SVDBASE_H
namespace Eigen {
/** \ingroup SVD_Module
@@ -21,9 +22,10 @@ namespace Eigen {
*
* \class SVDBase
*
- * \brief Mother class of SVD classes algorithms
+ * \brief Base class of SVD algorithms
+ *
+ * \tparam Derived the type of the actual SVD decomposition
*
- * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
@@ -42,12 +44,12 @@ namespace Eigen {
* terminate in finite (and reasonable) time.
* \sa MatrixBase::genericSvd()
*/
-template<typename _MatrixType>
+template<typename Derived>
class SVDBase
{
public:
- typedef _MatrixType MatrixType;
+ typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
@@ -61,46 +63,16 @@ public:
MatrixOptions = MatrixType::Options
};
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
- MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
- MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
- MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
- MatrixVType;
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
- typedef typename internal::plain_row_type<MatrixType>::type RowType;
- typedef typename internal::plain_col_type<MatrixType>::type ColType;
- typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
- MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
- WorkMatrixType;
-
-
-
-
- /** \brief Method performing the decomposition of given matrix using custom options.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non-default) FullPivHouseholderQR preconditioner.
- */
- SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions);
-
- /** \brief Method performing the decomposition of given matrix using current options.
- *
- * \param matrix the matrix to decompose
- *
- * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
- */
- //virtual SVDBase& compute(const MatrixType& matrix) = 0;
- SVDBase& compute(const MatrixType& matrix);
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \returns the \a U matrix.
*
- * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
* the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
*
* The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
@@ -141,26 +113,84 @@ public:
return m_singularValues;
}
-
-
/** \returns the number of singular values that are not exactly 0 */
Index nonzeroSingularValues() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
return m_nonzeroSingularValues;
}
+
+ /** \returns the rank of the matrix of which \c *this is the SVD.
+ *
+ * \note This method has to determine which singular values should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index rank() const
+ {
+ using std::abs;
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ if(m_singularValues.size()==0) return 0;
+ RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold();
+ Index i = m_nonzeroSingularValues-1;
+ while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
+ return i+1;
+ }
+
+ /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
+ * which need to determine when singular values are to be considered nonzero.
+ * This is not used for the SVD decomposition itself.
+ *
+ * When it needs to get the threshold value, Eigen calls threshold().
+ * The default is \c NumTraits<Scalar>::epsilon()
+ *
+ * \param threshold The new value to use as the threshold.
+ *
+ * A singular value will be considered nonzero if its value is strictly greater than
+ * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
+ *
+ * If you want to come back to the default behavior, call setThreshold(Default_t)
+ */
+ Derived& setThreshold(const RealScalar& threshold)
+ {
+ m_usePrescribedThreshold = true;
+ m_prescribedThreshold = threshold;
+ return derived();
+ }
+
+ /** Allows to come back to the default behavior, letting Eigen use its default formula for
+ * determining the threshold.
+ *
+ * You should pass the special object Eigen::Default as parameter here.
+ * \code svd.setThreshold(Eigen::Default); \endcode
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ Derived& setThreshold(Default_t)
+ {
+ m_usePrescribedThreshold = false;
+ return derived();
+ }
+ /** Returns the threshold that will be used by certain methods such as rank().
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ RealScalar threshold() const
+ {
+ eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ return m_usePrescribedThreshold ? m_prescribedThreshold
+ : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
+ }
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
inline bool computeU() const { return m_computeFullU || m_computeThinU; }
/** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
inline bool computeV() const { return m_computeFullV || m_computeThinV; }
-
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
-
protected:
// return true if already allocated
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
@@ -168,12 +198,12 @@ protected:
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
- bool m_isInitialized, m_isAllocated;
+ bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
-
+ RealScalar m_prescribedThreshold;
/** \brief Default Constructor.
*
@@ -182,8 +212,9 @@ protected:
SVDBase()
: m_isInitialized(false),
m_isAllocated(false),
+ m_usePrescribedThreshold(false),
m_computationOptions(0),
- m_rows(-1), m_cols(-1)
+ m_rows(-1), m_cols(-1), m_diagSize(0)
{}
@@ -220,17 +251,13 @@ bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computat
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
- m_matrixU.resize(m_rows, m_computeFullU ? m_rows
- : m_computeThinU ? m_diagSize
- : 0);
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0);
if(ColsAtCompileTime==Dynamic)
- m_matrixV.resize(m_cols, m_computeFullV ? m_cols
- : m_computeThinV ? m_diagSize
- : 0);
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);
return false;
}
}// end namespace
-#endif // EIGEN_SVD_H
+#endif // EIGEN_SVDBASE_H
diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h
index 40067682c..64906bf0c 100644
--- a/Eigen/src/SVD/UpperBidiagonalization.h
+++ b/Eigen/src/SVD/UpperBidiagonalization.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -153,14 +154,19 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
typename MatrixType::Index bs,
- Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic> > X,
- Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic> > Y)
+ Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
+ traits<MatrixType>::Flags & RowMajorBit> > X,
+ Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
+ traits<MatrixType>::Flags & RowMajorBit> > Y)
{
typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
- typedef Ref<Matrix<Scalar, Dynamic, 1> > SubColumnType;
- typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, InnerStride<> > SubRowType;
- typedef Ref<Matrix<Scalar, Dynamic, Dynamic> > SubMatType;
+ enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
+ typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
+ typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
+ typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType;
+ typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType;
+ typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;
Index brows = A.rows();
Index bcols = A.cols();
@@ -287,8 +293,18 @@ void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagona
Index cols = A.cols();
Index size = (std::min)(rows, cols);
- Matrix<Scalar,MatrixType::RowsAtCompileTime,Dynamic,ColMajor,MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
- Matrix<Scalar,MatrixType::ColsAtCompileTime,Dynamic,ColMajor,MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
+ // X and Y are work space
+ enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
+ Matrix<Scalar,
+ MatrixType::RowsAtCompileTime,
+ Dynamic,
+ StorageOrder,
+ MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
+ Matrix<Scalar,
+ MatrixType::ColsAtCompileTime,
+ Dynamic,
+ StorageOrder,
+ MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
Index blockSize = (std::min)(maxBlockSize,size);
Index k = 0;
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 5c320e2d2..67bc33a93 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -15,7 +15,7 @@ namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
-static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
{
typedef typename remove_all<Lhs>::type::Scalar Scalar;
typedef typename remove_all<Lhs>::type::Index Index;
@@ -24,10 +24,12 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
-
- std::vector<bool> mask(rows,false);
- Matrix<Scalar,Dynamic,1> values(rows);
- Matrix<Index,Dynamic,1> indices(rows);
+
+ ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, values, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
+
+ std::memset(mask,0,sizeof(bool)*rows);
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
@@ -64,53 +66,51 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
values[i] += x * y;
}
}
-
- // unordered insertion
- for(Index k=0; k<nnz; ++k)
- {
- Index i = indices[k];
- res.insertBackByOuterInnerUnordered(j,i) = values[i];
- mask[i] = false;
- }
-
-#if 0
- // alternative ordered insertion code:
-
- Index t200 = rows/(log2(200)*1.39);
- Index t = (rows*100)/139;
-
- // FIXME reserve nnz non zeros
- // FIXME implement fast sort algorithms for very small nnz
- // if the result is sparse enough => use a quick sort
- // otherwise => loop through the entire vector
- // In order to avoid to perform an expensive log2 when the
- // result is clearly very sparse we use a linear bound up to 200.
- //if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
- //res.startVec(j);
- if(true)
+ if(!sortedInsertion)
{
- if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+ // unordered insertion
for(Index k=0; k<nnz; ++k)
{
Index i = indices[k];
- res.insertBackByOuterInner(j,i) = values[i];
+ res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
}
else
{
- // dense path
- for(Index i=0; i<rows; ++i)
+ // alternative ordered insertion code:
+ const Index t200 = rows/11; // 11 == (log2(200)*1.39)
+ const Index t = (rows*100)/139;
+
+ // FIXME reserve nnz non zeros
+ // FIXME implement faster sorting algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+ if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
{
- if(mask[i])
+ if(nnz>1) std::sort(indices,indices+nnz);
+ for(Index k=0; k<nnz; ++k)
{
- mask[i] = false;
+ Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
+ mask[i] = false;
+ }
+ }
+ else
+ {
+ // dense path
+ for(Index i=0; i<rows; ++i)
+ {
+ if(mask[i])
+ {
+ mask[i] = false;
+ res.insertBackByOuterInner(j,i) = values[i];
+ }
}
}
}
-#endif
-
}
res.finalize();
}
@@ -135,12 +135,25 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
- ColMajorMatrix resCol(lhs.rows(),rhs.cols());
- internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
- // sort the non zeros:
- RowMajorMatrix resRow(resCol);
- res = resRow;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrixAux;
+ typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime>::type ColMajorMatrix;
+
+ // FIXME, the following heuristic is probably not very good.
+ if(lhs.rows()>=rhs.cols())
+ {
+ ColMajorMatrix resCol(lhs.rows(),rhs.cols());
+ // perform sorted insertion
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
+ res = resCol.markAsRValue();
+ }
+ else
+ {
+ ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
+ // ressort to transpose to sort the entries
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
+ RowMajorMatrix resRow(resCol);
+ res = resRow.markAsRValue();
+ }
}
};
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index 4a7813296..d40e966c1 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -318,15 +318,6 @@ class DenseTimeSparseProduct
DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
};
-// sparse * dense
-template<typename Derived>
-template<typename OtherDerived>
-inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
-SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
-{
- return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 1050cf3f1..fb5025049 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -358,7 +358,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
/** sparse * dense (returns a dense object unless it is an outer product) */
template<typename OtherDerived>
const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
- operator*(const MatrixBase<OtherDerived> &other) const;
+ operator*(const MatrixBase<OtherDerived> &other) const
+ { return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived()); }
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 7e15c814b..0b1b389ce 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -109,7 +109,7 @@ class SparseVector
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
- return coeff(IsColVector ? row : col);
+ return coeffRef(IsColVector ? row : col);
}
/** \returns a reference to the coefficient value at given index \a i
@@ -151,6 +151,18 @@ class SparseVector
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
+
+ Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ return insertBackUnordered(inner);
+ }
+ inline Scalar& insertBackUnordered(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
inline Scalar& insert(Index row, Index col)
{
diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h
index 4c6553bf2..002b4824b 100644
--- a/Eigen/src/SparseQR/SparseQR.h
+++ b/Eigen/src/SparseQR/SparseQR.h
@@ -75,7 +75,7 @@ class SparseQR
typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
public:
- SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{ }
/** Construct a QR factorization of the matrix \a mat.
@@ -84,7 +84,7 @@ class SparseQR
*
* \sa compute()
*/
- SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{
compute(mat);
}
@@ -262,6 +262,7 @@ class SparseQR
IndexVector m_etree; // Column elimination tree
IndexVector m_firstRowElt; // First element in each row
bool m_isQSorted; // whether Q is sorted or not
+ bool m_isEtreeOk; // whether the elimination tree match the initial input matrix
template <typename, typename > friend struct SparseQR_QProduct;
template <typename > friend struct SparseQRMatrixQReturnType;
@@ -297,6 +298,7 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
// Compute the column elimination tree of the permuted matrix
m_outputPerm_c = m_perm_c.inverse();
internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+ m_isEtreeOk = true;
m_R.resize(m, n);
m_Q.resize(m, diagSize);
@@ -330,6 +332,15 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
ScalarVector tval(m); // The dense vector used to compute the current column
RealScalar pivotThreshold = m_threshold;
+
+ m_R.setZero();
+ m_Q.setZero();
+ if(!m_isEtreeOk)
+ {
+ m_outputPerm_c = m_perm_c.inverse();
+ internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+ m_isEtreeOk = true;
+ }
m_pmat = mat;
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
@@ -447,7 +458,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
}
} // End update current column
- Scalar tau;
+ Scalar tau = RealScalar(0);
RealScalar beta = 0;
if(nonzeroCol < diagSize)
@@ -461,7 +472,6 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
{
- tau = RealScalar(0);
beta = numext::real(c0);
tval(Qidx(0)) = 1;
}
@@ -514,6 +524,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
// Recompute the column elimination tree
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
+ m_isEtreeOk = false;
}
}
diff --git a/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
index ce462e951..f6d7d8944 100644
--- a/Eigen/src/plugins/ArrayCwiseUnaryOps.h
+++ b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -30,6 +30,9 @@ abs2() const
/** \returns an expression of the coefficient-wise exponential of *this.
*
+ * This function computes the coefficient-wise exponential. The function MatrixBase::exp() in the
+ * unsupported module MatrixFunctions computes the matrix exponential.
+ *
* Example: \include Cwise_exp.cpp
* Output: \verbinclude Cwise_exp.out
*
@@ -44,6 +47,9 @@ exp() const
/** \returns an expression of the coefficient-wise logarithm of *this.
*
+ * This function computes the coefficient-wise logarithm. The function MatrixBase::log() in the
+ * unsupported module MatrixFunctions computes the matrix logarithm.
+ *
* Example: \include Cwise_log.cpp
* Output: \verbinclude Cwise_log.out
*
@@ -58,6 +64,9 @@ log() const
/** \returns an expression of the coefficient-wise square root of *this.
*
+ * This function computes the coefficient-wise square root. The function MatrixBase::sqrt() in the
+ * unsupported module MatrixFunctions computes the matrix square root.
+ *
* Example: \include Cwise_sqrt.cpp
* Output: \verbinclude Cwise_sqrt.out
*
@@ -72,6 +81,9 @@ sqrt() const
/** \returns an expression of the coefficient-wise cosine of *this.
*
+ * This function computes the coefficient-wise cosine. The function MatrixBase::cos() in the
+ * unsupported module MatrixFunctions computes the matrix cosine.
+ *
* Example: \include Cwise_cos.cpp
* Output: \verbinclude Cwise_cos.out
*
@@ -87,6 +99,9 @@ cos() const
/** \returns an expression of the coefficient-wise sine of *this.
*
+ * This function computes the coefficient-wise sine. The function MatrixBase::sin() in the
+ * unsupported module MatrixFunctions computes the matrix sine.
+ *
* Example: \include Cwise_sin.cpp
* Output: \verbinclude Cwise_sin.out
*
@@ -156,6 +171,9 @@ atan() const
/** \returns an expression of the coefficient-wise power of *this to the given exponent.
*
+ * This function computes the coefficient-wise power. The function MatrixBase::pow() in the
+ * unsupported module MatrixFunctions computes the matrix power.
+ *
* Example: \include Cwise_pow.cpp
* Output: \verbinclude Cwise_pow.out
*
diff --git a/bench/bench_norm.cpp b/bench/bench_norm.cpp
index 398fef835..129afcfb2 100644
--- a/bench/bench_norm.cpp
+++ b/bench/bench_norm.cpp
@@ -6,19 +6,25 @@ using namespace Eigen;
using namespace std;
template<typename T>
-EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(const T& v)
+EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(T& v)
{
return v.norm();
}
template<typename T>
-EIGEN_DONT_INLINE typename T::Scalar hypotNorm(const T& v)
+EIGEN_DONT_INLINE typename T::Scalar stableNorm(T& v)
+{
+ return v.stableNorm();
+}
+
+template<typename T>
+EIGEN_DONT_INLINE typename T::Scalar hypotNorm(T& v)
{
return v.hypotNorm();
}
template<typename T>
-EIGEN_DONT_INLINE typename T::Scalar blueNorm(const T& v)
+EIGEN_DONT_INLINE typename T::Scalar blueNorm(T& v)
{
return v.blueNorm();
}
@@ -217,20 +223,21 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v)
}
#define BENCH_PERF(NRM) { \
+ float af = 0; double ad = 0; std::complex<float> ac = 0; \
Eigen::BenchTimer tf, td, tcf; tf.reset(); td.reset(); tcf.reset();\
for (int k=0; k<tries; ++k) { \
tf.start(); \
- for (int i=0; i<iters; ++i) NRM(vf); \
+ for (int i=0; i<iters; ++i) { af += NRM(vf); } \
tf.stop(); \
} \
for (int k=0; k<tries; ++k) { \
td.start(); \
- for (int i=0; i<iters; ++i) NRM(vd); \
+ for (int i=0; i<iters; ++i) { ad += NRM(vd); } \
td.stop(); \
} \
/*for (int k=0; k<std::max(1,tries/3); ++k) { \
tcf.start(); \
- for (int i=0; i<iters; ++i) NRM(vcf); \
+ for (int i=0; i<iters; ++i) { ac += NRM(vcf); } \
tcf.stop(); \
} */\
std::cout << #NRM << "\t" << tf.value() << " " << td.value() << " " << tcf.value() << "\n"; \
@@ -316,14 +323,17 @@ int main(int argc, char** argv)
std::cout << "\n";
}
+ y = 1;
std::cout.precision(4);
- std::cerr << "Performance (out of cache):\n";
+ int s1 = 1024*1024*32;
+ std::cerr << "Performance (out of cache, " << s1 << "):\n";
{
int iters = 1;
- VectorXf vf = VectorXf::Random(1024*1024*32) * y;
- VectorXd vd = VectorXd::Random(1024*1024*32) * y;
- VectorXcf vcf = VectorXcf::Random(1024*1024*32) * y;
+ VectorXf vf = VectorXf::Random(s1) * y;
+ VectorXd vd = VectorXd::Random(s1) * y;
+ VectorXcf vcf = VectorXcf::Random(s1) * y;
BENCH_PERF(sqsumNorm);
+ BENCH_PERF(stableNorm);
BENCH_PERF(blueNorm);
BENCH_PERF(pblueNorm);
BENCH_PERF(lapackNorm);
@@ -332,13 +342,14 @@ int main(int argc, char** argv)
BENCH_PERF(bl2passNorm);
}
- std::cerr << "\nPerformance (in cache):\n";
+ std::cerr << "\nPerformance (in cache, " << 512 << "):\n";
{
int iters = 100000;
VectorXf vf = VectorXf::Random(512) * y;
VectorXd vd = VectorXd::Random(512) * y;
VectorXcf vcf = VectorXcf::Random(512) * y;
BENCH_PERF(sqsumNorm);
+ BENCH_PERF(stableNorm);
BENCH_PERF(blueNorm);
BENCH_PERF(pblueNorm);
BENCH_PERF(lapackNorm);
diff --git a/cmake/FindEigen3.cmake b/cmake/FindEigen3.cmake
index 9c546a05d..cea1afeab 100644
--- a/cmake/FindEigen3.cmake
+++ b/cmake/FindEigen3.cmake
@@ -9,6 +9,12 @@
# EIGEN3_FOUND - system has eigen lib with correct version
# EIGEN3_INCLUDE_DIR - the eigen include directory
# EIGEN3_VERSION - eigen version
+#
+# This module reads hints about search locations from
+# the following enviroment variables:
+#
+# EIGEN3_ROOT
+# EIGEN3_ROOT_DIR
# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>
# Copyright (c) 2008, 2009 Gael Guennebaud, <g.gael@free.fr>
@@ -62,6 +68,9 @@ if (EIGEN3_INCLUDE_DIR)
else (EIGEN3_INCLUDE_DIR)
find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library
+ HINTS
+ ENV EIGEN3_ROOT
+ ENV EIGEN3_ROOT_DIR
PATHS
${CMAKE_INSTALL_PREFIX}/include
${KDE4_INCLUDE_DIR}
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 1b8aaf9aa..46e5fc9d7 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -97,6 +97,7 @@ add_dependencies(doc-unsupported-prerequisites unsupported_snippets unsupported_
add_custom_target(doc ALL
COMMAND doxygen
COMMAND doxygen Doxyfile-unsupported
+ COMMAND ${CMAKE_COMMAND} -E copy ${Eigen_BINARY_DIR}/doc/html/group__TopicUnalignedArrayAssert.html ${Eigen_BINARY_DIR}/doc/html/TopicUnalignedArrayAssert.html
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc/eigen-doc.tgz eigen-doc
diff --git a/doc/examples/CMakeLists.txt b/doc/examples/CMakeLists.txt
index 71b272a15..08cf8efd7 100644
--- a/doc/examples/CMakeLists.txt
+++ b/doc/examples/CMakeLists.txt
@@ -6,12 +6,10 @@ foreach(example_src ${examples_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- get_target_property(example_executable
- ${example} LOCATION)
add_custom_command(
TARGET ${example}
POST_BUILD
- COMMAND ${example_executable}
+ COMMAND ${example}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
)
add_dependencies(all_examples ${example})
diff --git a/doc/snippets/CMakeLists.txt b/doc/snippets/CMakeLists.txt
index 92a22ea61..1135900cf 100644
--- a/doc/snippets/CMakeLists.txt
+++ b/doc/snippets/CMakeLists.txt
@@ -14,12 +14,10 @@ foreach(snippet_src ${snippets_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- get_target_property(compile_snippet_executable
- ${compile_snippet_target} LOCATION)
add_custom_command(
TARGET ${compile_snippet_target}
POST_BUILD
- COMMAND ${compile_snippet_executable}
+ COMMAND ${compile_snippet_target}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
)
add_dependencies(all_snippets ${compile_snippet_target})
@@ -27,4 +25,4 @@ foreach(snippet_src ${snippets_SRCS})
PROPERTIES OBJECT_DEPENDS ${snippet_src})
endforeach(snippet_src)
-ei_add_target_property(compile_tut_arithmetic_transpose_aliasing COMPILE_FLAGS -DEIGEN_NO_DEBUG) \ No newline at end of file
+ei_add_target_property(compile_tut_arithmetic_transpose_aliasing COMPILE_FLAGS -DEIGEN_NO_DEBUG)
diff --git a/doc/snippets/DirectionWise_hnormalized.cpp b/doc/snippets/DirectionWise_hnormalized.cpp
new file mode 100644
index 000000000..3410790a8
--- /dev/null
+++ b/doc/snippets/DirectionWise_hnormalized.cpp
@@ -0,0 +1,7 @@
+typedef Matrix<double,4,Dynamic> Matrix4Xd;
+Matrix4Xd M = Matrix4Xd::Random(4,5);
+Projective3d P(Matrix4d::Random());
+cout << "The matrix M is:" << endl << M << endl << endl;
+cout << "M.colwise().hnormalized():" << endl << M.colwise().hnormalized() << endl << endl;
+cout << "P*M:" << endl << P*M << endl << endl;
+cout << "(P*M).colwise().hnormalized():" << endl << (P*M).colwise().hnormalized() << endl << endl; \ No newline at end of file
diff --git a/doc/snippets/MatrixBase_hnormalized.cpp b/doc/snippets/MatrixBase_hnormalized.cpp
new file mode 100644
index 000000000..652cd77c0
--- /dev/null
+++ b/doc/snippets/MatrixBase_hnormalized.cpp
@@ -0,0 +1,6 @@
+Vector4d v = Vector4d::Random();
+Projective3d P(Matrix4d::Random());
+cout << "v = " << v.transpose() << "]^T" << endl;
+cout << "v.hnormalized() = " << v.hnormalized().transpose() << "]^T" << endl;
+cout << "P*v = " << (P*v).transpose() << "]^T" << endl;
+cout << "(P*v).hnormalized() = " << (P*v).hnormalized().transpose() << "]^T" << endl; \ No newline at end of file
diff --git a/doc/snippets/MatrixBase_homogeneous.cpp b/doc/snippets/MatrixBase_homogeneous.cpp
new file mode 100644
index 000000000..457c28f91
--- /dev/null
+++ b/doc/snippets/MatrixBase_homogeneous.cpp
@@ -0,0 +1,6 @@
+Vector3d v = Vector3d::Random(), w;
+Projective3d P(Matrix4d::Random());
+cout << "v = [" << v.transpose() << "]^T" << endl;
+cout << "h.homogeneous() = [" << v.homogeneous().transpose() << "]^T" << endl;
+cout << "(P * v.homogeneous()) = [" << (P * v.homogeneous()).transpose() << "]^T" << endl;
+cout << "(P * v.homogeneous()).hnormalized() = [" << (P * v.homogeneous()).eval().hnormalized().transpose() << "]^T" << endl; \ No newline at end of file
diff --git a/doc/snippets/VectorwiseOp_homogeneous.cpp b/doc/snippets/VectorwiseOp_homogeneous.cpp
new file mode 100644
index 000000000..aba4fed0e
--- /dev/null
+++ b/doc/snippets/VectorwiseOp_homogeneous.cpp
@@ -0,0 +1,7 @@
+typedef Matrix<double,3,Dynamic> Matrix3Xd;
+Matrix3Xd M = Matrix3Xd::Random(3,5);
+Projective3d P(Matrix4d::Random());
+cout << "The matrix M is:" << endl << M << endl << endl;
+cout << "M.colwise().homogeneous():" << endl << M.colwise().homogeneous() << endl << endl;
+cout << "P * M.colwise().homogeneous():" << endl << P * M.colwise().homogeneous() << endl << endl;
+cout << "P * M.colwise().homogeneous().hnormalized(): " << endl << (P * M.colwise().homogeneous()).colwise().hnormalized() << endl << endl; \ No newline at end of file
diff --git a/doc/special_examples/CMakeLists.txt b/doc/special_examples/CMakeLists.txt
index 45e2339e3..aab80a55d 100644
--- a/doc/special_examples/CMakeLists.txt
+++ b/doc/special_examples/CMakeLists.txt
@@ -1,4 +1,3 @@
-
if(NOT EIGEN_TEST_NOQT)
find_package(Qt4)
if(QT4_FOUND)
@@ -6,17 +5,16 @@ if(NOT EIGEN_TEST_NOQT)
endif()
endif(NOT EIGEN_TEST_NOQT)
-
if(QT4_FOUND)
add_executable(Tutorial_sparse_example Tutorial_sparse_example.cpp Tutorial_sparse_example_details.cpp)
target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY})
-
+
add_custom_command(
TARGET Tutorial_sparse_example
POST_BUILD
COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
)
-
+
add_dependencies(all_examples Tutorial_sparse_example)
endif(QT4_FOUND)
@@ -26,15 +24,11 @@ if(EIGEN_COMPILER_SUPPORT_CPP11)
target_link_libraries(random_cpp11 ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
add_dependencies(all_examples random_cpp11)
ei_add_target_property(random_cpp11 COMPILE_FLAGS "-std=c++11")
-
- get_target_property(random_cpp11_exec
- random_cpp11 LOCATION)
+
add_custom_command(
TARGET random_cpp11
POST_BUILD
- COMMAND ${random_cpp11_exec}
+ COMMAND random_cpp11
ARGS >${CMAKE_CURRENT_BINARY_DIR}/random_cpp11.out
)
-
-
-endif() \ No newline at end of file
+endif()
diff --git a/test/eigensolver_selfadjoint.cpp b/test/eigensolver_selfadjoint.cpp
index 06a6a8654..3851f9df2 100644
--- a/test/eigensolver_selfadjoint.cpp
+++ b/test/eigensolver_selfadjoint.cpp
@@ -29,7 +29,21 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
MatrixType a = MatrixType::Random(rows,cols);
MatrixType a1 = MatrixType::Random(rows,cols);
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
+ MatrixType symmC = symmA;
+
+ // randomly nullify some rows/columns
+ {
+ Index count = 1;//internal::random<Index>(-cols,cols);
+ for(Index k=0; k<count; ++k)
+ {
+ Index i = internal::random<Index>(0,cols-1);
+ symmA.row(i).setZero();
+ symmA.col(i).setZero();
+ }
+ }
+
symmA.template triangularView<StrictlyUpper>().setZero();
+ symmC.template triangularView<StrictlyUpper>().setZero();
MatrixType b = MatrixType::Random(rows,cols);
MatrixType b1 = MatrixType::Random(rows,cols);
@@ -40,7 +54,7 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
SelfAdjointEigenSolver<MatrixType> eiDirect;
eiDirect.computeDirect(symmA);
// generalized eigen pb
- GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmA, symmB);
+ GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);
VERIFY_IS_EQUAL(eiSymm.info(), Success);
VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox(
@@ -57,27 +71,28 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
// generalized eigen problem Ax = lBx
- eiSymmGen.compute(symmA, symmB,Ax_lBx);
+ eiSymmGen.compute(symmC, symmB,Ax_lBx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
+ VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem BAx = lx
- eiSymmGen.compute(symmA, symmB,BAx_lx);
+ eiSymmGen.compute(symmC, symmB,BAx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmB.template selfadjointView<Lower>() * (symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
+ VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem ABx = lx
- eiSymmGen.compute(symmA, symmB,ABx_lx);
+ eiSymmGen.compute(symmC, symmB,ABx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
- VERIFY((symmA.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
+ VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
+ eiSymm.compute(symmC);
MatrixType sqrtSymmA = eiSymm.operatorSqrt();
- VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
- VERIFY_IS_APPROX(sqrtSymmA, symmA.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
+ VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
+ VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
MatrixType id = MatrixType::Identity(rows, cols);
VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));
@@ -95,9 +110,9 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
// test Tridiagonalization's methods
- Tridiagonalization<MatrixType> tridiag(symmA);
+ Tridiagonalization<MatrixType> tridiag(symmC);
// FIXME tridiag.matrixQ().adjoint() does not work
- VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
+ VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
// Test computation of eigenvalues from tridiagonal matrix
if(rows > 1)
@@ -111,8 +126,8 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
if (rows > 1)
{
// Test matrix with NaN
- symmA(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
- SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmA);
+ symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
+ SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
}
}
@@ -122,8 +137,10 @@ void test_eigensolver_selfadjoint()
int s = 0;
for(int i = 0; i < g_repeat; i++) {
// very important to test 3x3 and 2x2 matrices since we provide special paths for them
+ CALL_SUBTEST_1( selfadjointeigensolver(Matrix2f()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix2d()) );
CALL_SUBTEST_1( selfadjointeigensolver(Matrix3f()) );
+ CALL_SUBTEST_1( selfadjointeigensolver(Matrix3d()) );
CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) );
diff --git a/test/jacobisvd.cpp b/test/jacobisvd.cpp
index 36721b496..cd04db5be 100644
--- a/test/jacobisvd.cpp
+++ b/test/jacobisvd.cpp
@@ -315,16 +315,30 @@ void jacobisvd_inf_nan()
VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
- Scalar some_nan = zero<Scalar>() / zero<Scalar>();
- VERIFY(some_nan != some_nan);
- svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV);
+ Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
+ VERIFY(nan != nan);
+ svd.compute(MatrixType::Constant(10,10,nan), ComputeFullU | ComputeFullV);
MatrixType m = MatrixType::Zero(10,10);
m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
svd.compute(m, ComputeFullU | ComputeFullV);
m = MatrixType::Zero(10,10);
- m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_nan;
+ m(internal::random<int>(0,9), internal::random<int>(0,9)) = nan;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+
+ // regression test for bug 791
+ m.resize(3,3);
+ m << 0, 2*NumTraits<Scalar>::epsilon(), 0.5,
+ 0, -0.5, 0,
+ nan, 0, 0;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+
+ m.resize(4,4);
+ m << 1, 0, 0, 0,
+ 0, 3, 1, 2e-308,
+ 1, 0, 1, nan,
+ 0, nan, nan, 0;
svd.compute(m, ComputeFullU | ComputeFullV);
}
@@ -340,11 +354,33 @@ void jacobisvd_underoverflow()
Matrix2d M;
M << -7.90884e-313, -4.94e-324,
0, 5.60844e-313;
+ JacobiSVD<Matrix2d> svd;
+ svd.compute(M,ComputeFullU|ComputeFullV);
+ jacobisvd_check_full(M,svd);
+
+ VectorXd value_set(9);
+ value_set << 0, 1, -1, 5.60844e-313, -5.60844e-313, 4.94e-324, -4.94e-324, -4.94e-223, 4.94e-223;
+ Array4i id(0,0,0,0);
+ int k = 0;
+ do
+ {
+ M << value_set(id(0)), value_set(id(1)), value_set(id(2)), value_set(id(3));
+ svd.compute(M,ComputeFullU|ComputeFullV);
+ jacobisvd_check_full(M,svd);
+
+ id(k)++;
+ if(id(k)>=value_set.size())
+ {
+ while(k<3 && id(k)>=value_set.size()) id(++k)++;
+ id.head(k).setZero();
+ k=0;
+ }
+
+ } while((id<int(value_set.size())).all());
+
#if defined __INTEL_COMPILER
#pragma warning pop
#endif
- JacobiSVD<Matrix2d> svd;
- svd.compute(M); // just check we don't loop indefinitely
// Check for overflow:
Matrix3d M3;
@@ -353,7 +389,8 @@ void jacobisvd_underoverflow()
-8.7190887618028355e+307, -7.3453213709232193e+307, -2.4367363684472105e+307;
JacobiSVD<Matrix3d> svd3;
- svd3.compute(M3); // just check we don't loop indefinitely
+ svd3.compute(M3,ComputeFullU|ComputeFullV); // just check we don't loop indefinitely
+ jacobisvd_check_full(M3,svd3);
}
void jacobisvd_preallocate()
@@ -437,6 +474,7 @@ void test_jacobisvd()
// Test on inf/nan matrix
CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() );
+ CALL_SUBTEST_10( jacobisvd_inf_nan<MatrixXd>() );
}
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
diff --git a/test/linearstructure.cpp b/test/linearstructure.cpp
index 618984d5c..b627915ce 100644
--- a/test/linearstructure.cpp
+++ b/test/linearstructure.cpp
@@ -2,11 +2,16 @@
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+static bool g_called;
+
+#define EIGEN_SPECIAL_SCALAR_MULTIPLE_PLUGIN { g_called = true; }
+
#include "main.h"
template<typename MatrixType> void linearStructure(const MatrixType& m)
@@ -68,6 +73,24 @@ template<typename MatrixType> void linearStructure(const MatrixType& m)
VERIFY_IS_APPROX(m1.block(0,0,rows,cols) * s1, m1 * s1);
}
+// Make sure that complex * real and real * complex are properly optimized
+template<typename MatrixType> void real_complex(DenseIndex rows = MatrixType::RowsAtCompileTime, DenseIndex cols = MatrixType::ColsAtCompileTime)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+
+ RealScalar s = internal::random<RealScalar>();
+ MatrixType m1 = MatrixType::Random(rows, cols);
+
+ g_called = false;
+ VERIFY_IS_APPROX(s*m1, Scalar(s)*m1);
+ VERIFY(g_called && "real * matrix<complex> not properly optimized");
+
+ g_called = false;
+ VERIFY_IS_APPROX(m1*s, m1*Scalar(s));
+ VERIFY(g_called && "matrix<complex> * real not properly optimized");
+}
+
void test_linearstructure()
{
for(int i = 0; i < g_repeat; i++) {
@@ -80,5 +103,8 @@ void test_linearstructure()
CALL_SUBTEST_7( linearStructure(MatrixXi (internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_8( linearStructure(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
CALL_SUBTEST_9( linearStructure(ArrayXXf (internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
+
+ CALL_SUBTEST_10( real_complex<Matrix4cd>() );
+ CALL_SUBTEST_10( real_complex<MatrixXcf>(10,10) );
}
}
diff --git a/test/main.h b/test/main.h
index 763cec8f9..b504970f3 100644
--- a/test/main.h
+++ b/test/main.h
@@ -17,13 +17,36 @@
#include <sstream>
#include <vector>
#include <typeinfo>
+
+// The following includes of STL headers have to be done _before_ the
+// definition of macros min() and max(). The reason is that many STL
+// implementations will not work properly as the min and max symbols collide
+// with the STL functions std:min() and std::max(). The STL headers may check
+// for the macro definition of min/max and issue a warning or undefine the
+// macros.
+//
+// Still, Windows defines min() and max() in windef.h as part of the regular
+// Windows system interfaces and many other Windows APIs depend on these
+// macros being available. To prevent the macro expansion of min/max and to
+// make Eigen compatible with the Windows environment all function calls of
+// std::min() and std::max() have to be written with parenthesis around the
+// function name.
+//
+// All STL headers used by Eigen should be included here. Because main.h is
+// included before any Eigen header and because the STL headers are guarded
+// against multiple inclusions, no STL header will see our own min/max macro
+// definitions.
#include <limits>
#include <algorithm>
-#include <sstream>
#include <complex>
#include <deque>
#include <queue>
+#include <list>
+// To test that all calls from Eigen code to std::min() and std::max() are
+// protected by parenthesis against macro expansion, the min()/max() macros
+// are defined here and any not-parenthesized min/max call will cause a
+// compiler error.
#define min(A,B) please_protect_your_min_with_parentheses
#define max(A,B) please_protect_your_max_with_parentheses
@@ -76,6 +99,10 @@ namespace Eigen
#define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "")
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
+ #define EIGEN_EXCEPTIONS
+#endif
+
#ifndef EIGEN_NO_ASSERTION_CHECKING
namespace Eigen
@@ -172,7 +199,7 @@ namespace Eigen
#ifndef VERIFY_RAISES_ASSERT
#define VERIFY_RAISES_ASSERT(a) \
- std::cout << "Can't VERIFY_RAISES_ASSERT( " #a " ) with exceptions disabled";
+ std::cout << "Can't VERIFY_RAISES_ASSERT( " #a " ) with exceptions disabled\n";
#endif
#if !defined(__CUDACC__)
diff --git a/test/product.h b/test/product.h
index 856b234ac..0b3abe402 100644
--- a/test/product.h
+++ b/test/product.h
@@ -139,4 +139,12 @@ template<typename MatrixType> void product(const MatrixType& m)
// inner product
Scalar x = square2.row(c) * square2.col(c2);
VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum());
+
+ // outer product
+ VERIFY_IS_APPROX(m1.col(c) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.row(r).transpose() * m1.col(c).transpose(), m1.block(r,0,1,cols).transpose() * m1.block(0,c,rows,1).transpose());
+ VERIFY_IS_APPROX(m1.block(0,c,rows,1) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.col(c) * m1.block(r,0,1,cols), m1.block(0,c,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.leftCols(1) * m1.row(r), m1.block(0,0,rows,1) * m1.block(r,0,1,cols));
+ VERIFY_IS_APPROX(m1.col(c) * m1.topRows(1), m1.block(0,c,rows,1) * m1.block(0,0,1,cols));
}
diff --git a/test/sparseqr.cpp b/test/sparseqr.cpp
index 1fe4a98ee..8e6887dd3 100644
--- a/test/sparseqr.cpp
+++ b/test/sparseqr.cpp
@@ -54,6 +54,8 @@ template<typename Scalar> void test_sparseqr_scalar()
b = dA * DenseVector::Random(A.cols());
solver.compute(A);
+ if(internal::random<float>(0,1)>0.5)
+ solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
if (solver.info() != Success)
{
std::cerr << "sparse QR factorization failed\n";
diff --git a/test/stable_norm.cpp b/test/stable_norm.cpp
index 549f91fbf..f76919af4 100644
--- a/test/stable_norm.cpp
+++ b/test/stable_norm.cpp
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -14,6 +14,21 @@ template<typename T> bool isNotNaN(const T& x)
return x==x;
}
+template<typename T> bool isNaN(const T& x)
+{
+ return x!=x;
+}
+
+template<typename T> bool isInf(const T& x)
+{
+ return x > NumTraits<T>::highest();
+}
+
+template<typename T> bool isMinusInf(const T& x)
+{
+ return x < NumTraits<T>::lowest();
+}
+
// workaround aggressive optimization in ICC
template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
@@ -106,6 +121,58 @@ template<typename MatrixType> void stable_norm(const MatrixType& m)
VERIFY_IS_APPROX(vrand.rowwise().stableNorm(), vrand.rowwise().norm());
VERIFY_IS_APPROX(vrand.rowwise().blueNorm(), vrand.rowwise().norm());
VERIFY_IS_APPROX(vrand.rowwise().hypotNorm(), vrand.rowwise().norm());
+
+ // test NaN, +inf, -inf
+ MatrixType v;
+ Index i = internal::random<Index>(0,rows-1);
+ Index j = internal::random<Index>(0,cols-1);
+
+ // NaN
+ {
+ v = vrand;
+ v(i,j) = RealScalar(0)/RealScalar(0);
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isNaN(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isNaN(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isNaN(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isNaN(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isNaN(v.hypotNorm()));
+ }
+
+ // +inf
+ {
+ v = vrand;
+ v(i,j) = RealScalar(1)/RealScalar(0);
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isInf(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isInf(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isInf(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isInf(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isInf(v.hypotNorm()));
+ }
+
+ // -inf
+ {
+ v = vrand;
+ v(i,j) = RealScalar(-1)/RealScalar(0);
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isInf(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isInf(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isInf(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isInf(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isInf(v.hypotNorm()));
+ }
+
+ // mix
+ {
+ Index i2 = internal::random<Index>(0,rows-1);
+ Index j2 = internal::random<Index>(0,cols-1);
+ v = vrand;
+ v(i,j) = RealScalar(-1)/RealScalar(0);
+ v(i2,j2) = RealScalar(0)/RealScalar(0);
+ VERIFY(!isFinite(v.squaredNorm())); VERIFY(isNaN(v.squaredNorm()));
+ VERIFY(!isFinite(v.norm())); VERIFY(isNaN(v.norm()));
+ VERIFY(!isFinite(v.stableNorm())); VERIFY(isNaN(v.stableNorm()));
+ VERIFY(!isFinite(v.blueNorm())); VERIFY(isNaN(v.blueNorm()));
+ VERIFY(!isFinite(v.hypotNorm())); VERIFY(isNaN(v.hypotNorm()));
+ }
}
void test_stable_norm()
diff --git a/test/upperbidiagonalization.cpp b/test/upperbidiagonalization.cpp
index d15bf588b..847b34b55 100644
--- a/test/upperbidiagonalization.cpp
+++ b/test/upperbidiagonalization.cpp
@@ -35,7 +35,7 @@ void test_upperbidiagonalization()
CALL_SUBTEST_1( upperbidiag(MatrixXf(3,3)) );
CALL_SUBTEST_2( upperbidiag(MatrixXd(17,12)) );
CALL_SUBTEST_3( upperbidiag(MatrixXcf(20,20)) );
- CALL_SUBTEST_4( upperbidiag(MatrixXcd(16,15)) );
+ CALL_SUBTEST_4( upperbidiag(Matrix<std::complex<double>,Dynamic,Dynamic,RowMajor>(16,15)) );
CALL_SUBTEST_5( upperbidiag(Matrix<float,6,4>()) );
CALL_SUBTEST_6( upperbidiag(Matrix<float,5,5>()) );
CALL_SUBTEST_7( upperbidiag(Matrix<double,4,3>()) );
diff --git a/unsupported/Eigen/BDCSVD b/unsupported/Eigen/BDCSVD
new file mode 100644
index 000000000..44649dbd0
--- /dev/null
+++ b/unsupported/Eigen/BDCSVD
@@ -0,0 +1,26 @@
+#ifndef EIGEN_BDCSVD_MODULE_H
+#define EIGEN_BDCSVD_MODULE_H
+
+#include <Eigen/SVD>
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+/** \defgroup BDCSVD_Module BDCSVD module
+ *
+ *
+ *
+ * This module provides Divide & Conquer SVD decomposition for matrices (both real and complex).
+ * This decomposition is accessible via the following MatrixBase method:
+ * - MatrixBase::bdcSvd()
+ *
+ * \code
+ * #include <Eigen/BDCSVD>
+ * \endcode
+ */
+
+#include "src/BDCSVD/BDCSVD.h"
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_BDCSVD_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions
index 0b12aaffb..0320606c1 100644
--- a/unsupported/Eigen/MatrixFunctions
+++ b/unsupported/Eigen/MatrixFunctions
@@ -82,7 +82,9 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
\param[in] M a square matrix.
\returns expression representing \f$ \cos(M) \f$.
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos().
+This function computes the matrix cosine. Use ArrayBase::cos() for computing the entry-wise cosine.
+
+The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos().
\sa \ref matrixbase_sin "sin()" for an example.
@@ -123,6 +125,9 @@ differential equations: the solution of \f$ y' = My \f$ with the
initial condition \f$ y(0) = y_0 \f$ is given by
\f$ y(t) = \exp(M) y_0 \f$.
+The matrix exponential is different from applying the exp function to all the entries in the matrix.
+Use ArrayBase::exp() if you want to do the latter.
+
The cost of the computation is approximately \f$ 20 n^3 \f$ for
matrices of size \f$ n \f$. The number 20 depends weakly on the
norm of the matrix.
@@ -177,6 +182,9 @@ the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have
multiple solutions; this function returns a matrix whose eigenvalues
have imaginary part in the interval \f$ (-\pi,\pi] \f$.
+The matrix logarithm is different from applying the log function to all the entries in the matrix.
+Use ArrayBase::log() if you want to do the latter.
+
In the real case, the matrix \f$ M \f$ should be invertible and
it should have no eigenvalues which are real and negative (pairs of
complex conjugate eigenvalues are allowed). In the complex case, it
@@ -232,7 +240,8 @@ const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(RealScalar p) con
The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$,
where exp denotes the matrix exponential, and log denotes the matrix
-logarithm.
+logarithm. This is different from raising all the entries in the matrix
+to the p-th power. Use ArrayBase::pow() if you want to do the latter.
If \p p is complex, the scalar type of \p M should be the type of \p
p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$.
@@ -391,7 +400,9 @@ const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
\param[in] M a square matrix.
\returns expression representing \f$ \sin(M) \f$.
-This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin().
+This function computes the matrix sine. Use ArrayBase::sin() for computing the entry-wise sine.
+
+The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin().
Example: \include MatrixSine.cpp
Output: \verbinclude MatrixSine.out
@@ -428,7 +439,9 @@ const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$
whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then
-\f$ S^2 = M \f$.
+\f$ S^2 = M \f$. This is different from taking the square root of all
+the entries in the matrix; use ArrayBase::sqrt() if you want to do the
+latter.
In the <b>real case</b>, the matrix \f$ M \f$ should be invertible and
it should have no eigenvalues which are real and negative (pairs of
diff --git a/unsupported/Eigen/SVD b/unsupported/Eigen/SVD
deleted file mode 100644
index 900a8aa60..000000000
--- a/unsupported/Eigen/SVD
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef EIGEN_SVD_MODULE_H
-#define EIGEN_SVD_MODULE_H
-
-#include <Eigen/QR>
-#include <Eigen/Householder>
-#include <Eigen/Jacobi>
-
-#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup SVD_Module SVD module
- *
- *
- *
- * This module provides SVD decomposition for matrices (both real and complex).
- * This decomposition is accessible via the following MatrixBase method:
- * - MatrixBase::jacobiSvd()
- *
- * \code
- * #include <Eigen/SVD>
- * \endcode
- */
-
-#include "../../Eigen/src/misc/Solve.h"
-#include "../../Eigen/src/SVD/UpperBidiagonalization.h"
-#include "src/SVD/SVDBase.h"
-#include "src/SVD/JacobiSVD.h"
-#include "src/SVD/BDCSVD.h"
-#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
-#include "../../Eigen/src/SVD/JacobiSVD_MKL.h"
-#endif
-
-#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_SVD_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/src/SVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h
index 80006fd60..a7c369633 100644
--- a/unsupported/Eigen/src/SVD/BDCSVD.h
+++ b/unsupported/Eigen/src/BDCSVD/BDCSVD.h
@@ -24,6 +24,20 @@
#define ALGOSWAP 16
namespace Eigen {
+
+template<typename _MatrixType> class BDCSVD;
+
+namespace internal {
+
+template<typename _MatrixType>
+struct traits<BDCSVD<_MatrixType> >
+{
+ typedef _MatrixType MatrixType;
+};
+
+} // end namespace internal
+
+
/** \ingroup SVD_Module
*
*
@@ -36,9 +50,9 @@ namespace Eigen {
* It should be used to speed up the calcul of SVD for big matrices.
*/
template<typename _MatrixType>
-class BDCSVD : public SVDBase<_MatrixType>
+class BDCSVD : public SVDBase<BDCSVD<_MatrixType> >
{
- typedef SVDBase<_MatrixType> Base;
+ typedef SVDBase<BDCSVD> Base;
public:
using Base::rows;
@@ -77,9 +91,7 @@ public:
* The default constructor is useful in cases in which the user intends to
* perform decompositions via BDCSVD::compute(const MatrixType&).
*/
- BDCSVD()
- : SVDBase<_MatrixType>::SVDBase(),
- algoswap(ALGOSWAP), m_numIters(0)
+ BDCSVD() : algoswap(ALGOSWAP), m_numIters(0)
{}
@@ -90,8 +102,7 @@ public:
* \sa BDCSVD()
*/
BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase(),
- algoswap(ALGOSWAP), m_numIters(0)
+ : algoswap(ALGOSWAP), m_numIters(0)
{
allocate(rows, cols, computationOptions);
}
@@ -107,8 +118,7 @@ public:
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase(),
- algoswap(ALGOSWAP), m_numIters(0)
+ : algoswap(ALGOSWAP), m_numIters(0)
{
compute(matrix, computationOptions);
}
@@ -116,6 +126,7 @@ public:
~BDCSVD()
{
}
+
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
@@ -126,7 +137,7 @@ public:
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
- SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
+ BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
@@ -134,7 +145,7 @@ public:
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
- SVDBase<MatrixType>& compute(const MatrixType& matrix)
+ BDCSVD& compute(const MatrixType& matrix)
{
return compute(matrix, this->m_computationOptions);
}
@@ -160,8 +171,8 @@ public:
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
- eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() &&
- "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ eigen_assert(computeU() && computeV() &&
+ "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
}
@@ -195,6 +206,9 @@ public:
return this->m_matrixV;
}
}
+
+ using Base::computeU;
+ using Base::computeV;
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
@@ -229,7 +243,7 @@ template<typename MatrixType>
void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
isTranspose = (cols > rows);
- if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
+ if (Base::allocate(rows, cols, computationOptions)) return;
m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
if (isTranspose){
compU = this->computeU();
@@ -262,8 +276,7 @@ void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computati
// Methode which compute the BDCSVD for the int
template<>
-SVDBase<Matrix<int, Dynamic, Dynamic> >&
-BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
+BDCSVD<Matrix<int, Dynamic, Dynamic> >& BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
allocate(matrix.rows(), matrix.cols(), computationOptions);
this->m_nonzeroSingularValues = 0;
m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
@@ -279,8 +292,7 @@ BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsign
// Methode which compute the BDCSVD
template<typename MatrixType>
-SVDBase<MatrixType>&
-BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
+BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
allocate(matrix.rows(), matrix.cols(), computationOptions);
using std::abs;
diff --git a/unsupported/Eigen/src/BDCSVD/CMakeLists.txt b/unsupported/Eigen/src/BDCSVD/CMakeLists.txt
new file mode 100644
index 000000000..73b89ea18
--- /dev/null
+++ b/unsupported/Eigen/src/BDCSVD/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_BDCSVD_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_BDCSVD_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/BDCSVD COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt b/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt
index 0bc9a46e6..0bc9a46e6 100644
--- a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
+++ b/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt
diff --git a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt b/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt
index 8563ddab8..8563ddab8 100644
--- a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
+++ b/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt
diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt
index 8eb2808e3..654a2327f 100644
--- a/unsupported/Eigen/src/CMakeLists.txt
+++ b/unsupported/Eigen/src/CMakeLists.txt
@@ -12,3 +12,4 @@ ADD_SUBDIRECTORY(Skyline)
ADD_SUBDIRECTORY(SparseExtra)
ADD_SUBDIRECTORY(KroneckerProduct)
ADD_SUBDIRECTORY(Splines)
+ADD_SUBDIRECTORY(BDCSVD)
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
index c8c84069e..67498705b 100644
--- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_GMRES_H
#define EIGEN_GMRES_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
@@ -27,11 +27,11 @@ namespace internal {
* \param iters on input: maximum number of iterations to perform
* on output: number of iterations performed
* \param restart number of iterations for a restart
- * \param tol_error on input: residual tolerance
+ * \param tol_error on input: relative residual tolerance
* on output: residuum achieved
*
- * \sa IterativeMethods::bicgstab()
- *
+ * \sa IterativeMethods::bicgstab()
+ *
*
* For references, please see:
*
@@ -70,18 +70,24 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
const int m = mat.rows();
- VectorType p0 = rhs - mat*x;
+ // residual and preconditioned residual
+ const VectorType p0 = rhs - mat*x;
VectorType r0 = precond.solve(p0);
-
+
+ const RealScalar r0Norm = r0.norm();
+
// is initial guess already good enough?
- if(abs(r0.norm()) < tol) {
- return true;
+ if(r0Norm == 0) {
+ tol_error=0;
+ return true;
}
+ // storage for Hessenberg matrix and Householder data
+ FMatrixType H = FMatrixType::Zero(m, restart + 1);
VectorType w = VectorType::Zero(restart + 1);
-
- FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
VectorType tau = VectorType::Zero(restart + 1);
+
+ // storage for Jacobi rotations
std::vector < JacobiRotation < Scalar > > G(restart);
// generate first Householder vector
@@ -112,11 +118,10 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
}
if (v.tail(m - k).norm() != 0.0) {
-
if (k <= restart) {
// generate new Householder vector
- VectorType e(m - k - 1);
+ VectorType e(m - k - 1);
RealScalar beta;
v.tail(m - k).makeHouseholder(e, tau.coeffRef(k), beta);
H.col(k).tail(m - k - 1) = e;
@@ -125,78 +130,77 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
v.tail(m - k).applyHouseholderOnTheLeft(H.col(k).tail(m - k - 1), tau.coeffRef(k), workspace.data());
}
- }
+ }
- if (k > 1) {
- for (int i = 0; i < k - 1; ++i) {
- // apply old Givens rotations to v
- v.applyOnTheLeft(i, i + 1, G[i].adjoint());
- }
- }
+ if (k > 1) {
+ for (int i = 0; i < k - 1; ++i) {
+ // apply old Givens rotations to v
+ v.applyOnTheLeft(i, i + 1, G[i].adjoint());
+ }
+ }
- if (k<m && v(k) != (Scalar) 0) {
- // determine next Givens rotation
- G[k - 1].makeGivens(v(k - 1), v(k));
+ if (k<m && v(k) != (Scalar) 0) {
- // apply Givens rotation to v and w
- v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
- w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ // determine next Givens rotation
+ G[k - 1].makeGivens(v(k - 1), v(k));
- }
+ // apply Givens rotation to v and w
+ v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ }
- // insert coefficients into upper matrix triangle
- H.col(k - 1).head(k) = v.head(k);
+ // insert coefficients into upper matrix triangle
+ H.col(k - 1).head(k) = v.head(k);
- bool stop=(k==m || abs(w(k)) < tol || iters == maxIters);
+ bool stop=(k==m || abs(w(k)) < tol * r0Norm || iters == maxIters);
- if (stop || k == restart) {
+ if (stop || k == restart) {
- // solve upper triangular system
- VectorType y = w.head(k);
- H.topLeftCorner(k, k).template triangularView < Eigen::Upper > ().solveInPlace(y);
+ // solve upper triangular system
+ VectorType y = w.head(k);
+ H.topLeftCorner(k, k).template triangularView < Eigen::Upper > ().solveInPlace(y);
- // use Horner-like scheme to calculate solution vector
- VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1);
+ // use Horner-like scheme to calculate solution vector
+ VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1);
- // apply Householder reflection H_{k} to x_new
- x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
+ // apply Householder reflection H_{k} to x_new
+ x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
- for (int i = k - 2; i >= 0; --i) {
- x_new += y(i) * VectorType::Unit(m, i);
- // apply Householder reflection H_{i} to x_new
- x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
- }
+ for (int i = k - 2; i >= 0; --i) {
+ x_new += y(i) * VectorType::Unit(m, i);
+ // apply Householder reflection H_{i} to x_new
+ x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
- x += x_new;
+ x += x_new;
- if (stop) {
- return true;
- } else {
- k=0;
+ if (stop) {
+ return true;
+ } else {
+ k=0;
- // reset data for a restart r0 = rhs - mat * x;
- VectorType p0=mat*x;
- VectorType p1=precond.solve(p0);
- r0 = rhs - p1;
-// r0_sqnorm = r0.squaredNorm();
- w = VectorType::Zero(restart + 1);
- H = FMatrixType::Zero(m, restart + 1);
- tau = VectorType::Zero(restart + 1);
+ // reset data for restart
+ const VectorType p0 = rhs - mat*x;
+ r0 = precond.solve(p0);
- // generate first Householder vector
- RealScalar beta;
- r0.makeHouseholder(e, tau.coeffRef(0), beta);
- w(0)=(Scalar) beta;
- H.bottomLeftCorner(m - 1, 1) = e;
+ // clear Hessenberg matrix and Householder data
+ H = FMatrixType::Zero(m, restart + 1);
+ w = VectorType::Zero(restart + 1);
+ tau = VectorType::Zero(restart + 1);
- }
+ // generate first Householder vector
+ RealScalar beta;
+ r0.makeHouseholder(e, tau.coeffRef(0), beta);
+ w(0)=(Scalar) beta;
+ H.bottomLeftCorner(m - 1, 1) = e;
- }
+ }
+ }
}
-
+
return false;
}
@@ -230,7 +234,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
- *
+ *
* This class can be used as the direct solver classes. Here is a typical usage example:
* \code
* int n = 10000;
@@ -244,7 +248,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* // update b, and solve again
* x = solver.solve(b);
* \endcode
- *
+ *
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method. Here is a step by
* step execution example starting with a random guess and printing the evolution
@@ -260,7 +264,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* } while (solver.info()!=Success && i<100);
* \endcode
* Note that such a step by step excution is slightly slower.
- *
+ *
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, typename _Preconditioner>
@@ -272,10 +276,10 @@ class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
-
+
private:
int m_restart;
-
+
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
@@ -289,10 +293,10 @@ public:
GMRES() : Base(), m_restart(30) {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
- *
+ *
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
- *
+ *
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
@@ -301,16 +305,16 @@ public:
GMRES(const MatrixType& A) : Base(A), m_restart(30) {}
~GMRES() {}
-
+
/** Get the number of iterations after that a restart is performed.
*/
int get_restart() { return m_restart; }
-
+
/** Set the number of iterations after that a restart is performed.
* \param restart number of iterations for a restarti, default is 30.
*/
void set_restart(const int restart) { m_restart=restart; }
-
+
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
* \a x0 as an initial solution.
*
@@ -326,17 +330,17 @@ public:
return internal::solve_retval_with_guess
<GMRES, Rhs, Guess>(*this, b.derived(), x0);
}
-
+
/** \internal */
template<typename Rhs,typename Dest>
void _solveWithGuess(const Rhs& b, Dest& x) const
- {
+ {
bool failed = false;
for(int j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
-
+
typename Dest::ColXpr xj(x,j);
if(!internal::gmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
failed = true;
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
index 51dd1d3c4..7cebe4e06 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
@@ -144,11 +144,13 @@ class LevenbergMarquardt : internal::no_assignment_operator
/** Sets the default parameters */
void resetParameters()
- {
+ {
+ using std::sqrt;
+
m_factor = 100.;
m_maxfev = 400;
- m_ftol = std::sqrt(NumTraits<RealScalar>::epsilon());
- m_xtol = std::sqrt(NumTraits<RealScalar>::epsilon());
+ m_ftol = sqrt(NumTraits<RealScalar>::epsilon());
+ m_xtol = sqrt(NumTraits<RealScalar>::epsilon());
m_gtol = 0. ;
m_epsfcn = 0. ;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index bfeb26fc9..69106ddc5 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -45,18 +45,24 @@ namespace LevenbergMarquardtSpace {
template<typename FunctorType, typename Scalar=double>
class LevenbergMarquardt
{
+ static Scalar sqrt_epsilon()
+ {
+ using std::sqrt;
+ return sqrt(NumTraits<Scalar>::epsilon());
+ }
+
public:
LevenbergMarquardt(FunctorType &_functor)
: functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; }
typedef DenseIndex Index;
-
+
struct Parameters {
Parameters()
: factor(Scalar(100.))
, maxfev(400)
- , ftol(std::sqrt(NumTraits<Scalar>::epsilon()))
- , xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
+ , ftol(sqrt_epsilon())
+ , xtol(sqrt_epsilon())
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
@@ -72,7 +78,7 @@ public:
LevenbergMarquardtSpace::Status lmder1(
FVectorType &x,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_epsilon()
);
LevenbergMarquardtSpace::Status minimize(FVectorType &x);
@@ -83,12 +89,12 @@ public:
FunctorType &functor,
FVectorType &x,
Index *nfev,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_epsilon()
);
LevenbergMarquardtSpace::Status lmstr1(
FVectorType &x,
- const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = sqrt_epsilon()
);
LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x);
@@ -109,6 +115,7 @@ public:
Scalar lm_param(void) { return par; }
private:
+
FunctorType &functor;
Index n;
Index m;
diff --git a/unsupported/Eigen/src/SVD/CMakeLists.txt b/unsupported/Eigen/src/SVD/CMakeLists.txt
deleted file mode 100644
index b40baf092..000000000
--- a/unsupported/Eigen/src/SVD/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_SVD_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_SVD_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/SVD COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/SVD/JacobiSVD.h b/unsupported/Eigen/src/SVD/JacobiSVD.h
deleted file mode 100644
index 02fac409e..000000000
--- a/unsupported/Eigen/src/SVD/JacobiSVD.h
+++ /dev/null
@@ -1,782 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_JACOBISVD_H
-#define EIGEN_JACOBISVD_H
-
-namespace Eigen {
-
-namespace internal {
-// forward declaration (needed by ICC)
-// the empty body is required by MSVC
-template<typename MatrixType, int QRPreconditioner,
- bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
-struct svd_precondition_2x2_block_to_be_real {};
-
-/*** QR preconditioners (R-SVD)
- ***
- *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
- *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
- *** JacobiSVD which by itself is only able to work on square matrices.
- ***/
-
-enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
-
-template<typename MatrixType, int QRPreconditioner, int Case>
-struct qr_preconditioner_should_do_anything
-{
- enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
- MatrixType::ColsAtCompileTime != Dynamic &&
- MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
- b = MatrixType::RowsAtCompileTime != Dynamic &&
- MatrixType::ColsAtCompileTime != Dynamic &&
- MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
- ret = !( (QRPreconditioner == NoQRPreconditioner) ||
- (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
- (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
- };
-};
-
-template<typename MatrixType, int QRPreconditioner, int Case,
- bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
-> struct qr_preconditioner_impl {};
-
-template<typename MatrixType, int QRPreconditioner, int Case>
-class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
-{
-public:
- typedef typename MatrixType::Index Index;
- void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
- bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
- {
- return false;
- }
-};
-
-/*** preconditioner using FullPivHouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
- };
- typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
-
- void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.rows(), svd.cols());
- }
- if (svd.m_computeFullU) m_workspace.resize(svd.rows());
- }
-
- bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.rows() > matrix.cols())
- {
- m_qr.compute(matrix);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
- if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
- return true;
- }
- return false;
- }
-private:
- typedef FullPivHouseholderQR<MatrixType> QRType;
- QRType m_qr;
- WorkspaceType m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- Options = MatrixType::Options
- };
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
-
- void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.cols(), svd.rows());
- }
- m_adjoint.resize(svd.cols(), svd.rows());
- if (svd.m_computeFullV) m_workspace.resize(svd.cols());
- }
-
- bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.cols() > matrix.rows())
- {
- m_adjoint = matrix.adjoint();
- m_qr.compute(m_adjoint);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
- if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
- return true;
- }
- else return false;
- }
-private:
- typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
- QRType m_qr;
- TransposeTypeWithSameStorageOrder m_adjoint;
- typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** preconditioner using ColPivHouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
- typedef typename MatrixType::Index Index;
-
- void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.rows(), svd.cols());
- }
- if (svd.m_computeFullU) m_workspace.resize(svd.rows());
- else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
- }
-
- bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.rows() > matrix.cols())
- {
- m_qr.compute(matrix);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
- else if(svd.m_computeThinU)
- {
- svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
- }
- if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
- return true;
- }
- return false;
- }
-
-private:
- typedef ColPivHouseholderQR<MatrixType> QRType;
- QRType m_qr;
- typename internal::plain_col_type<MatrixType>::type m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- Options = MatrixType::Options
- };
-
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
-
- void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
- {
- if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.cols(), svd.rows());
- }
- if (svd.m_computeFullV) m_workspace.resize(svd.cols());
- else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
- m_adjoint.resize(svd.cols(), svd.rows());
- }
-
- bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.cols() > matrix.rows())
- {
- m_adjoint = matrix.adjoint();
- m_qr.compute(m_adjoint);
-
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
- else if(svd.m_computeThinV)
- {
- svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
- }
- if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
- return true;
- }
- else return false;
- }
-
-private:
- typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
- QRType m_qr;
- TransposeTypeWithSameStorageOrder m_adjoint;
- typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** preconditioner using HouseholderQR ***/
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
-{
-public:
- typedef typename MatrixType::Index Index;
-
- void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
- {
- if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.rows(), svd.cols());
- }
- if (svd.m_computeFullU) m_workspace.resize(svd.rows());
- else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
- }
-
- bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.rows() > matrix.cols())
- {
- m_qr.compute(matrix);
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
- if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
- else if(svd.m_computeThinU)
- {
- svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
- }
- if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
- return true;
- }
- return false;
- }
-private:
- typedef HouseholderQR<MatrixType> QRType;
- QRType m_qr;
- typename internal::plain_col_type<MatrixType>::type m_workspace;
-};
-
-template<typename MatrixType>
-class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
-{
-public:
- typedef typename MatrixType::Index Index;
- typedef typename MatrixType::Scalar Scalar;
- enum
- {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- Options = MatrixType::Options
- };
-
- typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
- TransposeTypeWithSameStorageOrder;
-
- void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
- {
- if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
- {
- m_qr.~QRType();
- ::new (&m_qr) QRType(svd.cols(), svd.rows());
- }
- if (svd.m_computeFullV) m_workspace.resize(svd.cols());
- else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
- m_adjoint.resize(svd.cols(), svd.rows());
- }
-
- bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
- {
- if(matrix.cols() > matrix.rows())
- {
- m_adjoint = matrix.adjoint();
- m_qr.compute(m_adjoint);
-
- svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
- if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
- else if(svd.m_computeThinV)
- {
- svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
- m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
- }
- if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
- return true;
- }
- else return false;
- }
-
-private:
- typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
- QRType m_qr;
- TransposeTypeWithSameStorageOrder m_adjoint;
- typename internal::plain_row_type<MatrixType>::type m_workspace;
-};
-
-/*** 2x2 SVD implementation
- ***
- *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
- ***/
-
-template<typename MatrixType, int QRPreconditioner>
-struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
-{
- typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
- typedef typename SVD::Index Index;
- static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
-};
-
-template<typename MatrixType, int QRPreconditioner>
-struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
-{
- typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef typename SVD::Index Index;
- static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
- {
- using std::sqrt;
- Scalar z;
- JacobiRotation<Scalar> rot;
- RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
- if(n==0)
- {
- z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
- work_matrix.row(p) *= z;
- if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
- z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
- work_matrix.row(q) *= z;
- if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
- }
- else
- {
- rot.c() = conj(work_matrix.coeff(p,p)) / n;
- rot.s() = work_matrix.coeff(q,p) / n;
- work_matrix.applyOnTheLeft(p,q,rot);
- if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
- if(work_matrix.coeff(p,q) != Scalar(0))
- {
- Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
- work_matrix.col(q) *= z;
- if(svd.computeV()) svd.m_matrixV.col(q) *= z;
- }
- if(work_matrix.coeff(q,q) != Scalar(0))
- {
- z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
- work_matrix.row(q) *= z;
- if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
- }
- }
- }
-};
-
-template<typename MatrixType, typename RealScalar, typename Index>
-void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
- JacobiRotation<RealScalar> *j_left,
- JacobiRotation<RealScalar> *j_right)
-{
- using std::sqrt;
- Matrix<RealScalar,2,2> m;
- m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
- numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
- JacobiRotation<RealScalar> rot1;
- RealScalar t = m.coeff(0,0) + m.coeff(1,1);
- RealScalar d = m.coeff(1,0) - m.coeff(0,1);
- if(t == RealScalar(0))
- {
- rot1.c() = RealScalar(0);
- rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
- }
- else
- {
- RealScalar u = d / t;
- rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
- rot1.s() = rot1.c() * u;
- }
- m.applyOnTheLeft(0,1,rot1);
- j_right->makeJacobi(m,0,1);
- *j_left = rot1 * j_right->transpose();
-}
-
-} // end namespace internal
-
-/** \ingroup SVD_Module
- *
- *
- * \class JacobiSVD
- *
- * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
- *
- * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
- * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
- * for the R-SVD step for non-square matrices. See discussion of possible values below.
- *
- * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
- * \f[ A = U S V^* \f]
- * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
- * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
- * and right \em singular \em vectors of \a A respectively.
- *
- * Singular values are always sorted in decreasing order.
- *
- * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
- *
- * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
- * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
- * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
- * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
- *
- * Here's an example demonstrating basic usage:
- * \include JacobiSVD_basic.cpp
- * Output: \verbinclude JacobiSVD_basic.out
- *
- * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
- * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
- * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
- * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
- *
- * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
- * terminate in finite (and reasonable) time.
- *
- * The possible values for QRPreconditioner are:
- * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
- * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
- * Contrary to other QRs, it doesn't allow computing thin unitaries.
- * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
- * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
- * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
- * process is more reliable than the optimized bidiagonal SVD iterations.
- * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
- * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
- * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
- * if QR preconditioning is needed before applying it anyway.
- *
- * \sa MatrixBase::jacobiSvd()
- */
-template<typename _MatrixType, int QRPreconditioner>
-class JacobiSVD : public SVDBase<_MatrixType>
-{
- public:
-
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename MatrixType::Index Index;
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
- MatrixOptions = MatrixType::Options
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
- MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
- MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
- MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
- MatrixVType;
- typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
- typedef typename internal::plain_row_type<MatrixType>::type RowType;
- typedef typename internal::plain_col_type<MatrixType>::type ColType;
- typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
- MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
- WorkMatrixType;
-
- /** \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via JacobiSVD::compute(const MatrixType&).
- */
- JacobiSVD()
- : SVDBase<_MatrixType>::SVDBase()
- {}
-
-
- /** \brief Default Constructor with memory preallocation
- *
- * Like the default constructor but with preallocation of the internal data
- * according to the specified problem size.
- * \sa JacobiSVD()
- */
- JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase()
- {
- allocate(rows, cols, computationOptions);
- }
-
- /** \brief Constructor performing the decomposition of given matrix.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non-default) FullPivHouseholderQR preconditioner.
- */
- JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
- : SVDBase<_MatrixType>::SVDBase()
- {
- compute(matrix, computationOptions);
- }
-
- /** \brief Method performing the decomposition of given matrix using custom options.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non-default) FullPivHouseholderQR preconditioner.
- */
- SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
-
- /** \brief Method performing the decomposition of given matrix using current options.
- *
- * \param matrix the matrix to decompose
- *
- * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
- */
- SVDBase<MatrixType>& compute(const MatrixType& matrix)
- {
- return compute(matrix, this->m_computationOptions);
- }
-
- /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
- *
- * \param b the right-hand-side of the equation to solve.
- *
- * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
- *
- * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
- * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
- */
- template<typename Rhs>
- inline const internal::solve_retval<JacobiSVD, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized.");
- eigen_assert(SVDBase<MatrixType>::computeU() && SVDBase<MatrixType>::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
- return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
- }
-
-
-
- private:
- void allocate(Index rows, Index cols, unsigned int computationOptions);
-
- protected:
- WorkMatrixType m_workMatrix;
-
- template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
- friend struct internal::svd_precondition_2x2_block_to_be_real;
- template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
- friend struct internal::qr_preconditioner_impl;
-
- internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
- internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
-};
-
-template<typename MatrixType, int QRPreconditioner>
-void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
-{
- if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
-
- if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
- {
- eigen_assert(!(this->m_computeThinU || this->m_computeThinV) &&
- "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
- "Use the ColPivHouseholderQR preconditioner instead.");
- }
-
- m_workMatrix.resize(this->m_diagSize, this->m_diagSize);
-
- if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this);
- if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this);
-}
-
-template<typename MatrixType, int QRPreconditioner>
-SVDBase<MatrixType>&
-JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
-{
- using std::abs;
- allocate(matrix.rows(), matrix.cols(), computationOptions);
-
- // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
- // only worsening the precision of U and V as we accumulate more rotations
- const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
-
- // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
- const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
-
- /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
-
- if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
- {
- m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize);
- if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows);
- if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize);
- if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols);
- if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize);
- }
-
- /*** step 2. The main Jacobi SVD iteration. ***/
-
- bool finished = false;
- while(!finished)
- {
- finished = true;
-
- // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
-
- for(Index p = 1; p < this->m_diagSize; ++p)
- {
- for(Index q = 0; q < p; ++q)
- {
- // if this 2x2 sub-matrix is not diagonal already...
- // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
- // keep us iterating forever. Similarly, small denormal numbers are considered zero.
- using std::max;
- RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
- abs(m_workMatrix.coeff(q,q))));
- if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
- {
- finished = false;
-
- // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
- internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
- JacobiRotation<RealScalar> j_left, j_right;
- internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
-
- // accumulate resulting Jacobi rotations
- m_workMatrix.applyOnTheLeft(p,q,j_left);
- if(SVDBase<MatrixType>::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose());
-
- m_workMatrix.applyOnTheRight(p,q,j_right);
- if(SVDBase<MatrixType>::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right);
- }
- }
- }
- }
-
- /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
-
- for(Index i = 0; i < this->m_diagSize; ++i)
- {
- RealScalar a = abs(m_workMatrix.coeff(i,i));
- this->m_singularValues.coeffRef(i) = a;
- if(SVDBase<MatrixType>::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a;
- }
-
- /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
-
- this->m_nonzeroSingularValues = this->m_diagSize;
- for(Index i = 0; i < this->m_diagSize; i++)
- {
- Index pos;
- RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos);
- if(maxRemainingSingularValue == RealScalar(0))
- {
- this->m_nonzeroSingularValues = i;
- break;
- }
- if(pos)
- {
- pos += i;
- std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos));
- if(SVDBase<MatrixType>::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i));
- if(SVDBase<MatrixType>::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i));
- }
- }
-
- this->m_isInitialized = true;
- return *this;
-}
-
-namespace internal {
-template<typename _MatrixType, int QRPreconditioner, typename Rhs>
-struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
- : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
-{
- typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
- EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- eigen_assert(rhs().rows() == dec().rows());
-
- // A = U S V^*
- // So A^{-1} = V S^{-1} U^*
-
- Index diagSize = (std::min)(dec().rows(), dec().cols());
- typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
-
- Index nonzeroSingVals = dec().nonzeroSingularValues();
- invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
- invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
- dst = dec().matrixV().leftCols(diagSize)
- * invertedSingVals.asDiagonal()
- * dec().matrixU().leftCols(diagSize).adjoint()
- * rhs();
- }
-};
-} // end namespace internal
-
-/** \svd_module
- *
- * \return the singular value decomposition of \c *this computed by two-sided
- * Jacobi transformations.
- *
- * \sa class JacobiSVD
- */
-template<typename Derived>
-JacobiSVD<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
-{
- return JacobiSVD<PlainObject>(*this, computationOptions);
-}
-
-} // end namespace Eigen
-
-#endif // EIGEN_JACOBISVD_H
diff --git a/unsupported/doc/examples/CMakeLists.txt b/unsupported/doc/examples/CMakeLists.txt
index 978f9afd8..c47646dfc 100644
--- a/unsupported/doc/examples/CMakeLists.txt
+++ b/unsupported/doc/examples/CMakeLists.txt
@@ -10,12 +10,10 @@ FOREACH(example_src ${examples_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(example_${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- GET_TARGET_PROPERTY(example_executable
- example_${example} LOCATION)
ADD_CUSTOM_COMMAND(
TARGET example_${example}
POST_BUILD
- COMMAND ${example_executable}
+ COMMAND example_${example}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
)
ADD_DEPENDENCIES(unsupported_examples example_${example})
diff --git a/unsupported/doc/snippets/CMakeLists.txt b/unsupported/doc/snippets/CMakeLists.txt
index 4a4157933..f0c5cc2a8 100644
--- a/unsupported/doc/snippets/CMakeLists.txt
+++ b/unsupported/doc/snippets/CMakeLists.txt
@@ -14,12 +14,10 @@ FOREACH(snippet_src ${snippets_SRCS})
if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
endif()
- GET_TARGET_PROPERTY(compile_snippet_executable
- ${compile_snippet_target} LOCATION)
ADD_CUSTOM_COMMAND(
TARGET ${compile_snippet_target}
POST_BUILD
- COMMAND ${compile_snippet_executable}
+ COMMAND ${compile_snippet_target}
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
)
ADD_DEPENDENCIES(unsupported_snippets ${compile_snippet_target})
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
index d7376b0f5..75974f84f 100644
--- a/unsupported/test/NonLinearOptimization.cpp
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -1022,7 +1022,8 @@ void testNistLanczos1(void)
VERIFY_IS_EQUAL(lm.nfev, 79);
VERIFY_IS_EQUAL(lm.njev, 72);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ std::cout.precision(30);
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4290986055242372e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
@@ -1043,7 +1044,7 @@ void testNistLanczos1(void)
VERIFY_IS_EQUAL(lm.nfev, 9);
VERIFY_IS_EQUAL(lm.njev, 8);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.430571737783119393e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
@@ -1262,8 +1263,8 @@ void testNistBoxBOD(void)
// check return value
VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev, 31);
- VERIFY_IS_EQUAL(lm.njev, 25);
+ VERIFY(lm.nfev < 31); // 31
+ VERIFY(lm.njev < 25); // 25
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
// check x
@@ -1342,10 +1343,6 @@ void testNistMGH17(void)
lm.parameters.maxfev = 1000;
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 2);
- VERIFY_IS_EQUAL(lm.nfev, 602 );
- VERIFY_IS_EQUAL(lm.njev, 545 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
// check x
@@ -1354,6 +1351,11 @@ void testNistMGH17(void)
VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ VERIFY(lm.nfev < 650); // 602
+ VERIFY(lm.njev < 600); // 545
/*
* Second try
@@ -1832,8 +1834,8 @@ void test_NonLinearOptimization()
// NIST tests, level of difficulty = "Average"
CALL_SUBTEST/*_5*/(testNistHahn1());
CALL_SUBTEST/*_6*/(testNistMisra1d());
-// CALL_SUBTEST/*_7*/(testNistMGH17());
-// CALL_SUBTEST/*_8*/(testNistLanczos1());
+ CALL_SUBTEST/*_7*/(testNistMGH17());
+ CALL_SUBTEST/*_8*/(testNistLanczos1());
// // NIST tests, level of difficulty = "Higher"
CALL_SUBTEST/*_9*/(testNistRat42());
diff --git a/unsupported/test/levenberg_marquardt.cpp b/unsupported/test/levenberg_marquardt.cpp
index 04464727d..1fa1c3c22 100644
--- a/unsupported/test/levenberg_marquardt.cpp
+++ b/unsupported/test/levenberg_marquardt.cpp
@@ -787,16 +787,17 @@ void testNistMGH10(void)
LevenbergMarquardt<MGH10_functor> lm(functor);
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 284 );
- VERIFY_IS_EQUAL(lm.njev(), 249 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01);
// check x
VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+
+ // check return value
+ //VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 284 );
+ VERIFY_IS_EQUAL(lm.njev(), 249 );
/*
* Second try
@@ -805,16 +806,17 @@ void testNistMGH10(void)
// do the computation
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 126);
- VERIFY_IS_EQUAL(lm.njev(), 116);
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01);
// check x
VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+
+ // check return value
+ //VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 126);
+ VERIFY_IS_EQUAL(lm.njev(), 116);
}
@@ -866,15 +868,16 @@ void testNistBoxBOD(void)
lm.setFactor(10);
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 31);
- VERIFY_IS_EQUAL(lm.njev(), 25);
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.1680088766E+03);
// check x
VERIFY_IS_APPROX(x[0], 2.1380940889E+02);
VERIFY_IS_APPROX(x[1], 5.4723748542E-01);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY(lm.nfev() < 31); // 31
+ VERIFY(lm.njev() < 25); // 25
/*
* Second try
@@ -948,10 +951,6 @@ void testNistMGH17(void)
lm.setMaxfev(1000);
info = lm.minimize(x);
- // check return value
-// VERIFY_IS_EQUAL(info, 2); //FIXME Use (lm.info() == Success)
-// VERIFY_IS_EQUAL(lm.nfev(), 602 );
- VERIFY_IS_EQUAL(lm.njev(), 545 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.4648946975E-05);
// check x
@@ -960,6 +959,11 @@ void testNistMGH17(void)
VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+
+ // check return value
+// VERIFY_IS_EQUAL(info, 2); //FIXME Use (lm.info() == Success)
+ VERIFY(lm.nfev() < 700 ); // 602
+ VERIFY(lm.njev() < 600 ); // 545
/*
* Second try
@@ -1035,10 +1039,6 @@ void testNistMGH09(void)
lm.setMaxfev(1000);
info = lm.minimize(x);
- // check return value
- VERIFY_IS_EQUAL(info, 1);
- VERIFY_IS_EQUAL(lm.nfev(), 490 );
- VERIFY_IS_EQUAL(lm.njev(), 376 );
// check norm^2
VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 3.0750560385E-04);
// check x
@@ -1046,6 +1046,10 @@ void testNistMGH09(void)
VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01
VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01
VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY(lm.nfev() < 510 ); // 490
+ VERIFY(lm.njev() < 400 ); // 376
/*
* Second try
diff --git a/unsupported/test/svd_common.h b/unsupported/test/svd_common.h
index b40c23a2b..6a96e7c74 100644
--- a/unsupported/test/svd_common.h
+++ b/unsupported/test/svd_common.h
@@ -18,7 +18,7 @@
#define EIGEN_RUNTIME_NO_MALLOC
#include "main.h"
-#include <unsupported/Eigen/SVD>
+#include <unsupported/Eigen/BDCSVD>
#include <Eigen/LU>