aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2015-02-06 05:25:03 -0800
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2015-02-06 05:25:03 -0800
commitc739102ef9a52fcb194dcc77f785aa55879987e4 (patch)
tree22d19d1df4cb20baea532fa1ce13208329ff53e3 /unsupported
parent2559fa9b0f20ea138cfb019d441ad1757221568d (diff)
parenta8f2c6eec788c5cccc6beb9b5837544ea98a7154 (diff)
Pulled the latest changes from the trunk
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/AlignedVector325
-rw-r--r--unsupported/Eigen/BDCSVD26
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h3
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h16
-rw-r--r--unsupported/Eigen/IterativeSolvers3
-rw-r--r--unsupported/Eigen/MPRealSupport4
-rw-r--r--unsupported/Eigen/OpenGLSupport38
-rw-r--r--unsupported/Eigen/SparseExtra3
-rw-r--r--unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h1
-rw-r--r--unsupported/Eigen/src/BDCSVD/BDCSVD.h949
-rw-r--r--unsupported/Eigen/src/BDCSVD/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt29
-rw-r--r--unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt21
-rw-r--r--unsupported/Eigen/src/CMakeLists.txt1
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/DGMRES.h39
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/GMRES.h41
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h37
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h39
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/MINRES.h46
-rw-r--r--unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h56
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h9
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h5
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h11
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h28
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h4
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h8
-rw-r--r--unsupported/Eigen/src/Polynomials/PolynomialUtils.h2
-rw-r--r--unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h32
-rw-r--r--unsupported/test/CMakeLists.txt8
-rw-r--r--unsupported/test/NonLinearOptimization.cpp16
-rw-r--r--unsupported/test/bdcsvd.cpp213
-rw-r--r--unsupported/test/jacobisvd.cpp198
-rw-r--r--unsupported/test/kronecker_product.cpp12
-rw-r--r--unsupported/test/svd_common.h261
34 files changed, 218 insertions, 1972 deletions
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
index 7b45e6cce..1fce00525 100644
--- a/unsupported/Eigen/AlignedVector3
+++ b/unsupported/Eigen/AlignedVector3
@@ -57,6 +57,11 @@ template<typename _Scalar> class AlignedVector3
inline Index rows() const { return 3; }
inline Index cols() const { return 1; }
+
+ Scalar* data() { return m_coeffs.data(); }
+ const Scalar* data() const { return m_coeffs.data(); }
+ Index innerStride() const { return 1; }
+ Index outerStride() const { return m_coeffs.outerStride(); }
inline const Scalar& coeff(Index row, Index col) const
{ return m_coeffs.coeff(row, col); }
@@ -181,8 +186,28 @@ template<typename _Scalar> class AlignedVector3
{
return m_coeffs.template head<3>().isApprox(other,eps);
}
+
+ CoeffType& coeffs() { return m_coeffs; }
+ const CoeffType& coeffs() const { return m_coeffs; }
};
+namespace internal {
+
+template<typename Scalar>
+struct evaluator<AlignedVector3<Scalar> >
+ : evaluator<Matrix<Scalar,4,1> >::type
+{
+ typedef AlignedVector3<Scalar> XprType;
+ typedef typename evaluator<Matrix<Scalar,4,1> >::type Base;
+
+ typedef evaluator type;
+ typedef evaluator nestedType;
+
+ evaluator(const XprType &m) : Base(m.coeffs()) {}
+};
+
+}
+
//@}
}
diff --git a/unsupported/Eigen/BDCSVD b/unsupported/Eigen/BDCSVD
deleted file mode 100644
index 44649dbd0..000000000
--- a/unsupported/Eigen/BDCSVD
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef EIGEN_BDCSVD_MODULE_H
-#define EIGEN_BDCSVD_MODULE_H
-
-#include <Eigen/SVD>
-
-#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup BDCSVD_Module BDCSVD module
- *
- *
- *
- * This module provides Divide & Conquer SVD decomposition for matrices (both real and complex).
- * This decomposition is accessible via the following MatrixBase method:
- * - MatrixBase::bdcSvd()
- *
- * \code
- * #include <Eigen/BDCSVD>
- * \endcode
- */
-
-#include "src/BDCSVD/BDCSVD.h"
-
-#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_BDCSVD_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
index 7e448f7c0..a96d705a4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
@@ -89,6 +89,9 @@ class TensorLayoutSwapOp : public TensorBase<TensorLayoutSwapOp<XprType>, WriteA
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE TensorLayoutSwapOp& operator = (const OtherDerived& other)
{
+
+std::cout << "In assignment operator " << std::endl;
+
typedef TensorAssignOp<TensorLayoutSwapOp, const OtherDerived> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice, false>::run(assign, DefaultDevice());
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
index 022d20360..a844a4d68 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
@@ -157,50 +157,50 @@ struct eval<const TensorRef<PlainObjectType>, Eigen::Dense>
template <typename Scalar_, std::size_t NumIndices_, int Options_>
-struct nested<Tensor<Scalar_, NumIndices_, Options_>, 1, typename eval<Tensor<Scalar_, NumIndices_, Options_> >::type>
+struct nested<Tensor<Scalar_, NumIndices_, Options_> >
{
typedef const Tensor<Scalar_, NumIndices_, Options_>& type;
};
template <typename Scalar_, std::size_t NumIndices_, int Options_>
-struct nested<const Tensor<Scalar_, NumIndices_, Options_>, 1, typename eval<const Tensor<Scalar_, NumIndices_, Options_> >::type>
+struct nested<const Tensor<Scalar_, NumIndices_, Options_> >
{
typedef const Tensor<Scalar_, NumIndices_, Options_>& type;
};
template <typename Scalar_, typename Dimensions, int Options>
-struct nested<TensorFixedSize<Scalar_, Dimensions, Options>, 1, typename eval<TensorFixedSize<Scalar_, Dimensions, Options> >::type>
+struct nested<TensorFixedSize<Scalar_, Dimensions, Options> >
{
typedef const TensorFixedSize<Scalar_, Dimensions, Options>& type;
};
template <typename Scalar_, typename Dimensions, int Options>
-struct nested<const TensorFixedSize<Scalar_, Dimensions, Options>, 1, typename eval<const TensorFixedSize<Scalar_, Dimensions, Options> >::type>
+struct nested<const TensorFixedSize<Scalar_, Dimensions, Options> >
{
typedef const TensorFixedSize<Scalar_, Dimensions, Options>& type;
};
template <typename PlainObjectType, int Options>
-struct nested<TensorMap<PlainObjectType, Options>, 1, typename eval<TensorMap<PlainObjectType, Options> >::type>
+struct nested<TensorMap<PlainObjectType, Options> >
{
typedef const TensorMap<PlainObjectType, Options>& type;
};
template <typename PlainObjectType, int Options>
-struct nested<const TensorMap<PlainObjectType, Options>, 1, typename eval<TensorMap<PlainObjectType, Options> >::type>
+struct nested<const TensorMap<PlainObjectType, Options> >
{
typedef const TensorMap<PlainObjectType, Options>& type;
};
template <typename PlainObjectType>
-struct nested<TensorRef<PlainObjectType>, 1, typename eval<TensorRef<PlainObjectType> >::type>
+struct nested<TensorRef<PlainObjectType> >
{
typedef const TensorRef<PlainObjectType>& type;
};
template <typename PlainObjectType>
-struct nested<const TensorRef<PlainObjectType>, 1, typename eval<TensorRef<PlainObjectType> >::type>
+struct nested<const TensorRef<PlainObjectType> >
{
typedef const TensorRef<PlainObjectType>& type;
};
diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers
index aa15403db..ff0d59b6e 100644
--- a/unsupported/Eigen/IterativeSolvers
+++ b/unsupported/Eigen/IterativeSolvers
@@ -24,9 +24,6 @@
*/
//@{
-#include "../../Eigen/src/misc/Solve.h"
-#include "../../Eigen/src/misc/SparseSolve.h"
-
#ifndef EIGEN_MPL2_ONLY
#include "src/IterativeSolvers/IterationController.h"
#include "src/IterativeSolvers/ConstrainedConjGrad.h"
diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport
index 632de3854..8e42965a3 100644
--- a/unsupported/Eigen/MPRealSupport
+++ b/unsupported/Eigen/MPRealSupport
@@ -159,10 +159,10 @@ int main()
{
if(rows==0 || cols==0 || depth==0)
return;
-
+
mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())),
tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr()));
-
+
if(strideA==-1) strideA = depth;
if(strideB==-1) strideB = depth;
diff --git a/unsupported/Eigen/OpenGLSupport b/unsupported/Eigen/OpenGLSupport
index c4090ab11..6ca1b1217 100644
--- a/unsupported/Eigen/OpenGLSupport
+++ b/unsupported/Eigen/OpenGLSupport
@@ -51,7 +51,7 @@ namespace internal {
typename Scalar = typename XprType::Scalar, \
int Rows = XprType::RowsAtCompileTime, \
int Cols = XprType::ColsAtCompileTime, \
- bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \
+ bool IsGLCompatible = bool(internal::evaluator<XprType>::Flags&LinearAccessBit) \
&& bool(XprType::Flags&DirectAccessBit) \
&& (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \
struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \
@@ -178,11 +178,11 @@ template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Affine>& t)
template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Projective>& t) { glLoadMatrix(t.matrix()); }
template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,AffineCompact>& t) { glLoadMatrix(Transform<Scalar,3,Affine>(t).matrix()); }
-static void glRotate(const Rotation2D<float>& rot)
+inline void glRotate(const Rotation2D<float>& rot)
{
glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f);
}
-static void glRotate(const Rotation2D<double>& rot)
+inline void glRotate(const Rotation2D<double>& rot)
{
glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0);
}
@@ -203,7 +203,7 @@ namespace internal {
typename Scalar = typename XprType::Scalar, \
int Rows = XprType::RowsAtCompileTime, \
int Cols = XprType::ColsAtCompileTime, \
- bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \
+ bool IsGLCompatible = bool(internal::evaluator<XprType>::Flags&LinearAccessBit) \
&& bool(XprType::Flags&DirectAccessBit) \
&& (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \
struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \
@@ -246,18 +246,18 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev)
#ifdef GL_VERSION_2_0
-static void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); }
-static void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); }
+inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); }
+inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); }
-static void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); }
-static void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); }
+inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); }
+inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); }
-static void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); }
-static void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); }
+inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); }
+inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); }
-static void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); }
-static void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); }
-static void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); }
+inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); }
+inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); }
+inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); }
EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const)
@@ -294,9 +294,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix
#ifdef GL_VERSION_3_0
-static void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); }
-static void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); }
-static void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); }
+inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); }
+inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); }
+inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); }
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei)
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei)
@@ -305,9 +305,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei)
#endif
#ifdef GL_ARB_gpu_shader_fp64
-static void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); }
-static void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); }
-static void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); }
+inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); }
+inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); }
+inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); }
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei)
EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei)
diff --git a/unsupported/Eigen/SparseExtra b/unsupported/Eigen/SparseExtra
index b5597902a..819cffa27 100644
--- a/unsupported/Eigen/SparseExtra
+++ b/unsupported/Eigen/SparseExtra
@@ -37,9 +37,6 @@
*/
-#include "../../Eigen/src/misc/Solve.h"
-#include "../../Eigen/src/misc/SparseSolve.h"
-
#include "src/SparseExtra/DynamicSparseMatrix.h"
#include "src/SparseExtra/BlockOfDynamicSparseMatrix.h"
#include "src/SparseExtra/RandomSetter.h"
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
index 590797973..8336c2644 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
@@ -593,7 +593,6 @@ inline const AutoDiffScalar<Matrix<typename internal::traits<DerTypeA>::Scalar,D
atan2(const AutoDiffScalar<DerTypeA>& a, const AutoDiffScalar<DerTypeB>& b)
{
using std::atan2;
- using std::max;
typedef typename internal::traits<DerTypeA>::Scalar Scalar;
typedef AutoDiffScalar<Matrix<Scalar,Dynamic,1> > PlainADS;
PlainADS ret;
diff --git a/unsupported/Eigen/src/BDCSVD/BDCSVD.h b/unsupported/Eigen/src/BDCSVD/BDCSVD.h
deleted file mode 100644
index a7c369633..000000000
--- a/unsupported/Eigen/src/BDCSVD/BDCSVD.h
+++ /dev/null
@@ -1,949 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
-// research report written by Ming Gu and Stanley C.Eisenstat
-// The code variable names correspond to the names they used in their
-// report
-//
-// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
-// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
-// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
-// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
-// Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
-//
-// Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_BDCSVD_H
-#define EIGEN_BDCSVD_H
-
-#define EPSILON 0.0000000000000001
-
-#define ALGOSWAP 16
-
-namespace Eigen {
-
-template<typename _MatrixType> class BDCSVD;
-
-namespace internal {
-
-template<typename _MatrixType>
-struct traits<BDCSVD<_MatrixType> >
-{
- typedef _MatrixType MatrixType;
-};
-
-} // end namespace internal
-
-
-/** \ingroup SVD_Module
- *
- *
- * \class BDCSVD
- *
- * \brief class Bidiagonal Divide and Conquer SVD
- *
- * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
- * We plan to have a very similar interface to JacobiSVD on this class.
- * It should be used to speed up the calcul of SVD for big matrices.
- */
-template<typename _MatrixType>
-class BDCSVD : public SVDBase<BDCSVD<_MatrixType> >
-{
- typedef SVDBase<BDCSVD> Base;
-
-public:
- using Base::rows;
- using Base::cols;
-
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef typename MatrixType::Index Index;
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
- MatrixOptions = MatrixType::Options
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
- MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
- MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
- MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
- MatrixVType;
- typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
- typedef typename internal::plain_row_type<MatrixType>::type RowType;
- typedef typename internal::plain_col_type<MatrixType>::type ColType;
- typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
- typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixXr;
- typedef Matrix<RealScalar, Dynamic, 1> VectorType;
- typedef Array<RealScalar, Dynamic, 1> ArrayXr;
-
- /** \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via BDCSVD::compute(const MatrixType&).
- */
- BDCSVD() : algoswap(ALGOSWAP), m_numIters(0)
- {}
-
-
- /** \brief Default Constructor with memory preallocation
- *
- * Like the default constructor but with preallocation of the internal data
- * according to the specified problem size.
- * \sa BDCSVD()
- */
- BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
- : algoswap(ALGOSWAP), m_numIters(0)
- {
- allocate(rows, cols, computationOptions);
- }
-
- /** \brief Constructor performing the decomposition of given matrix.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non - default) FullPivHouseholderQR preconditioner.
- */
- BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
- : algoswap(ALGOSWAP), m_numIters(0)
- {
- compute(matrix, computationOptions);
- }
-
- ~BDCSVD()
- {
- }
-
- /** \brief Method performing the decomposition of given matrix using custom options.
- *
- * \param matrix the matrix to decompose
- * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
- * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
- * #ComputeFullV, #ComputeThinV.
- *
- * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
- * available with the (non - default) FullPivHouseholderQR preconditioner.
- */
- BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
-
- /** \brief Method performing the decomposition of given matrix using current options.
- *
- * \param matrix the matrix to decompose
- *
- * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
- */
- BDCSVD& compute(const MatrixType& matrix)
- {
- return compute(matrix, this->m_computationOptions);
- }
-
- void setSwitchSize(int s)
- {
- eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3");
- algoswap = s;
- }
-
-
- /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
- *
- * \param b the right - hand - side of the equation to solve.
- *
- * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
- *
- * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving.
- * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
- */
- template<typename Rhs>
- inline const internal::solve_retval<BDCSVD, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
- eigen_assert(computeU() && computeV() &&
- "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
- return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
- }
-
-
- const MatrixUType& matrixU() const
- {
- eigen_assert(this->m_isInitialized && "SVD is not initialized.");
- if (isTranspose){
- eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?");
- return this->m_matrixV;
- }
- else
- {
- eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
- return this->m_matrixU;
- }
-
- }
-
-
- const MatrixVType& matrixV() const
- {
- eigen_assert(this->m_isInitialized && "SVD is not initialized.");
- if (isTranspose){
- eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?");
- return this->m_matrixU;
- }
- else
- {
- eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
- return this->m_matrixV;
- }
- }
-
- using Base::computeU;
- using Base::computeV;
-
-private:
- void allocate(Index rows, Index cols, unsigned int computationOptions);
- void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
- void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
- void computeSingVals(const ArrayXr& col0, const ArrayXr& diag, VectorType& singVals,
- ArrayXr& shifts, ArrayXr& mus);
- void perturbCol0(const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals,
- const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat);
- void computeSingVecs(const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals,
- const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V);
- void deflation43(Index firstCol, Index shift, Index i, Index size);
- void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
- void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
- void copyUV(const typename internal::UpperBidiagonalization<MatrixX>::HouseholderUSequenceType& householderU,
- const typename internal::UpperBidiagonalization<MatrixX>::HouseholderVSequenceType& householderV);
-
-protected:
- MatrixXr m_naiveU, m_naiveV;
- MatrixXr m_computed;
- Index nRec;
- int algoswap;
- bool isTranspose, compU, compV;
-
-public:
- int m_numIters;
-}; //end class BDCSVD
-
-
-// Methode to allocate ans initialize matrix and attributs
-template<typename MatrixType>
-void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
-{
- isTranspose = (cols > rows);
- if (Base::allocate(rows, cols, computationOptions)) return;
- m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
- if (isTranspose){
- compU = this->computeU();
- compV = this->computeV();
- }
- else
- {
- compV = this->computeU();
- compU = this->computeV();
- }
- if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 );
- else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 );
-
- if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize);
-
-
- //should be changed for a cleaner implementation
- if (isTranspose){
- bool aux;
- if (this->computeU()||this->computeV()){
- aux = this->m_computeFullU;
- this->m_computeFullU = this->m_computeFullV;
- this->m_computeFullV = aux;
- aux = this->m_computeThinU;
- this->m_computeThinU = this->m_computeThinV;
- this->m_computeThinV = aux;
- }
- }
-}// end allocate
-
-// Methode which compute the BDCSVD for the int
-template<>
-BDCSVD<Matrix<int, Dynamic, Dynamic> >& BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
- allocate(matrix.rows(), matrix.cols(), computationOptions);
- this->m_nonzeroSingularValues = 0;
- m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
- for (int i=0; i<this->m_diagSize; i++) {
- this->m_singularValues.coeffRef(i) = 0;
- }
- if (this->m_computeFullU) this->m_matrixU = Matrix<int, Dynamic, Dynamic>::Zero(rows(), rows());
- if (this->m_computeFullV) this->m_matrixV = Matrix<int, Dynamic, Dynamic>::Zero(cols(), cols());
- this->m_isInitialized = true;
- return *this;
-}
-
-
-// Methode which compute the BDCSVD
-template<typename MatrixType>
-BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
-{
- allocate(matrix.rows(), matrix.cols(), computationOptions);
- using std::abs;
-
- //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ;
- MatrixType copy;
- if (isTranspose) copy = matrix.adjoint();
- else copy = matrix;
-
- internal::UpperBidiagonalization<MatrixX> bid(copy);
-
- //**** step 2 Divide
- m_computed.topRows(this->m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
- m_computed.template bottomRows<1>().setZero();
- divide(0, this->m_diagSize - 1, 0, 0, 0);
-
- //**** step 3 copy
- for (int i=0; i<this->m_diagSize; i++) {
- RealScalar a = abs(m_computed.coeff(i, i));
- this->m_singularValues.coeffRef(i) = a;
- if (a == 0){
- this->m_nonzeroSingularValues = i;
- this->m_singularValues.tail(this->m_diagSize - i - 1).setZero();
- break;
- }
- else if (i == this->m_diagSize - 1)
- {
- this->m_nonzeroSingularValues = i + 1;
- break;
- }
- }
- copyUV(bid.householderU(), bid.householderV());
- this->m_isInitialized = true;
- return *this;
-}// end compute
-
-
-template<typename MatrixType>
-void BDCSVD<MatrixType>::copyUV(const typename internal::UpperBidiagonalization<MatrixX>::HouseholderUSequenceType& householderU,
- const typename internal::UpperBidiagonalization<MatrixX>::HouseholderVSequenceType& householderV)
-{
- // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
- if (this->computeU()){
- Index Ucols = this->m_computeThinU ? this->m_nonzeroSingularValues : householderU.cols();
- this->m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);
- Index blockCols = this->m_computeThinU ? this->m_nonzeroSingularValues : this->m_diagSize;
- this->m_matrixU.block(0, 0, this->m_diagSize, blockCols) =
- m_naiveV.template cast<Scalar>().block(0, 0, this->m_diagSize, blockCols);
- this->m_matrixU = householderU * this->m_matrixU;
- }
- if (this->computeV()){
- Index Vcols = this->m_computeThinV ? this->m_nonzeroSingularValues : householderV.cols();
- this->m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);
- Index blockCols = this->m_computeThinV ? this->m_nonzeroSingularValues : this->m_diagSize;
- this->m_matrixV.block(0, 0, this->m_diagSize, blockCols) =
- m_naiveU.template cast<Scalar>().block(0, 0, this->m_diagSize, blockCols);
- this->m_matrixV = householderV * this->m_matrixV;
- }
-}
-
-// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
-// place of the submatrix we are currently working on.
-
-//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
-//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
-// lastCol + 1 - firstCol is the size of the submatrix.
-//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
-//@param firstRowW : Same as firstRowW with the column.
-//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
-// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
-template<typename MatrixType>
-void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
- Index firstColW, Index shift)
-{
- // requires nbRows = nbCols + 1;
- using std::pow;
- using std::sqrt;
- using std::abs;
- const Index n = lastCol - firstCol + 1;
- const Index k = n/2;
- RealScalar alphaK;
- RealScalar betaK;
- RealScalar r0;
- RealScalar lambda, phi, c0, s0;
- MatrixXr l, f;
- // We use the other algorithm which is more efficient for small
- // matrices.
- if (n < algoswap){
- JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n),
- ComputeFullU | (ComputeFullV * compV)) ;
- if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU();
- else
- {
- m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0);
- m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n);
- }
- if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV();
- m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
- for (int i=0; i<n; i++)
- {
- m_computed(firstCol + shift + i, firstCol + shift +i) = b.singularValues().coeffRef(i);
- }
- return;
- }
- // We use the divide and conquer algorithm
- alphaK = m_computed(firstCol + k, firstCol + k);
- betaK = m_computed(firstCol + k + 1, firstCol + k);
- // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
- // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
- // right submatrix before the left one.
- divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
- divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
- if (compU)
- {
- lambda = m_naiveU(firstCol + k, firstCol + k);
- phi = m_naiveU(firstCol + k + 1, lastCol + 1);
- }
- else
- {
- lambda = m_naiveU(1, firstCol + k);
- phi = m_naiveU(0, lastCol + 1);
- }
- r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda))
- + abs(betaK * phi) * abs(betaK * phi));
- if (compU)
- {
- l = m_naiveU.row(firstCol + k).segment(firstCol, k);
- f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
- }
- else
- {
- l = m_naiveU.row(1).segment(firstCol, k);
- f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
- }
- if (compV) m_naiveV(firstRowW+k, firstColW) = 1;
- if (r0 == 0)
- {
- c0 = 1;
- s0 = 0;
- }
- else
- {
- c0 = alphaK * lambda / r0;
- s0 = betaK * phi / r0;
- }
- if (compU)
- {
- MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
- // we shiftW Q1 to the right
- for (Index i = firstCol + k - 1; i >= firstCol; i--)
- {
- m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1);
- }
- // we shift q1 at the left with a factor c0
- m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0);
- // last column = q1 * - s0
- m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0));
- // first column = q2 * s0
- m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) <<
- m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0;
- // q2 *= c0
- m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
- }
- else
- {
- RealScalar q1 = (m_naiveU(0, firstCol + k));
- // we shift Q1 to the right
- for (Index i = firstCol + k - 1; i >= firstCol; i--)
- {
- m_naiveU(0, i + 1) = m_naiveU(0, i);
- }
- // we shift q1 at the left with a factor c0
- m_naiveU(0, firstCol) = (q1 * c0);
- // last column = q1 * - s0
- m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
- // first column = q2 * s0
- m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
- // q2 *= c0
- m_naiveU(1, lastCol + 1) *= c0;
- m_naiveU.row(1).segment(firstCol + 1, k).setZero();
- m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
- }
- m_computed(firstCol + shift, firstCol + shift) = r0;
- m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real();
- m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real();
-
-
- // Second part: try to deflate singular values in combined matrix
- deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
-
- // Third part: compute SVD of combined matrix
- MatrixXr UofSVD, VofSVD;
- VectorType singVals;
- computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);
- if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= UofSVD;
- else m_naiveU.block(0, firstCol, 2, n + 1) *= UofSVD;
- if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= VofSVD;
- m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();
- m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;
-}// end divide
-
-// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in
-// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing
-// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except
-// that if compV is false, then V is not computed. Singular values are sorted in decreasing order.
-//
-// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
-// handling of round-off errors, be consistent in ordering
-template <typename MatrixType>
-void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
-{
- // TODO Get rid of these copies (?)
- ArrayXr col0 = m_computed.block(firstCol, firstCol, n, 1);
- ArrayXr diag = m_computed.block(firstCol, firstCol, n, n).diagonal();
- diag(0) = 0;
-
- // compute singular values and vectors (in decreasing order)
- singVals.resize(n);
- U.resize(n+1, n+1);
- if (compV) V.resize(n, n);
-
- if (col0.hasNaN() || diag.hasNaN()) return;
-
- ArrayXr shifts(n), mus(n), zhat(n);
- computeSingVals(col0, diag, singVals, shifts, mus);
- perturbCol0(col0, diag, singVals, shifts, mus, zhat);
- computeSingVecs(zhat, diag, singVals, shifts, mus, U, V);
-
- // Reverse order so that singular values in increased order
- singVals.reverseInPlace();
- U.leftCols(n) = U.leftCols(n).rowwise().reverse().eval();
- if (compV) V = V.rowwise().reverse().eval();
-}
-
-template <typename MatrixType>
-void BDCSVD<MatrixType>::computeSingVals(const ArrayXr& col0, const ArrayXr& diag,
- VectorType& singVals, ArrayXr& shifts, ArrayXr& mus)
-{
- using std::abs;
- using std::swap;
-
- Index n = col0.size();
- for (Index k = 0; k < n; ++k) {
- if (col0(k) == 0) {
- // entry is deflated, so singular value is on diagonal
- singVals(k) = diag(k);
- mus(k) = 0;
- shifts(k) = diag(k);
- continue;
- }
-
- // otherwise, use secular equation to find singular value
- RealScalar left = diag(k);
- RealScalar right = (k != n-1) ? diag(k+1) : (diag(n-1) + col0.matrix().norm());
-
- // first decide whether it's closer to the left end or the right end
- RealScalar mid = left + (right-left) / 2;
- RealScalar fMid = 1 + (col0.square() / ((diag + mid) * (diag - mid))).sum();
-
- RealScalar shift;
- if (k == n-1 || fMid > 0) shift = left;
- else shift = right;
-
- // measure everything relative to shift
- ArrayXr diagShifted = diag - shift;
-
- // initial guess
- RealScalar muPrev, muCur;
- if (shift == left) {
- muPrev = (right - left) * 0.1;
- if (k == n-1) muCur = right - left;
- else muCur = (right - left) * 0.5;
- } else {
- muPrev = -(right - left) * 0.1;
- muCur = -(right - left) * 0.5;
- }
-
- RealScalar fPrev = 1 + (col0.square() / ((diagShifted - muPrev) * (diag + shift + muPrev))).sum();
- RealScalar fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum();
- if (abs(fPrev) < abs(fCur)) {
- swap(fPrev, fCur);
- swap(muPrev, muCur);
- }
-
- // rational interpolation: fit a function of the form a / mu + b through the two previous
- // iterates and use its zero to compute the next iterate
- bool useBisection = false;
- while (abs(muCur - muPrev) > 8 * NumTraits<RealScalar>::epsilon() * (std::max)(abs(muCur), abs(muPrev)) && fCur != fPrev && !useBisection) {
- ++m_numIters;
-
- RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev);
- RealScalar b = fCur - a / muCur;
-
- muPrev = muCur;
- fPrev = fCur;
- muCur = -a / b;
- fCur = 1 + (col0.square() / ((diagShifted - muCur) * (diag + shift + muCur))).sum();
-
- if (shift == left && (muCur < 0 || muCur > right - left)) useBisection = true;
- if (shift == right && (muCur < -(right - left) || muCur > 0)) useBisection = true;
- }
-
- // fall back on bisection method if rational interpolation did not work
- if (useBisection) {
- RealScalar leftShifted, rightShifted;
- if (shift == left) {
- leftShifted = 1e-30;
- if (k == 0) rightShifted = right - left;
- else rightShifted = (right - left) * 0.6; // theoretically we can take 0.5, but let's be safe
- } else {
- leftShifted = -(right - left) * 0.6;
- rightShifted = -1e-30;
- }
-
- RealScalar fLeft = 1 + (col0.square() / ((diagShifted - leftShifted) * (diag + shift + leftShifted))).sum();
- RealScalar fRight = 1 + (col0.square() / ((diagShifted - rightShifted) * (diag + shift + rightShifted))).sum();
- assert(fLeft * fRight < 0);
-
- while (rightShifted - leftShifted > 2 * NumTraits<RealScalar>::epsilon() * (std::max)(abs(leftShifted), abs(rightShifted))) {
- RealScalar midShifted = (leftShifted + rightShifted) / 2;
- RealScalar fMid = 1 + (col0.square() / ((diagShifted - midShifted) * (diag + shift + midShifted))).sum();
- if (fLeft * fMid < 0) {
- rightShifted = midShifted;
- fRight = fMid;
- } else {
- leftShifted = midShifted;
- fLeft = fMid;
- }
- }
-
- muCur = (leftShifted + rightShifted) / 2;
- }
-
- singVals[k] = shift + muCur;
- shifts[k] = shift;
- mus[k] = muCur;
-
- // perturb singular value slightly if it equals diagonal entry to avoid division by zero later
- // (deflation is supposed to avoid this from happening)
- if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();
- if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();
- }
-}
-
-
-// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
-template <typename MatrixType>
-void BDCSVD<MatrixType>::perturbCol0
- (const ArrayXr& col0, const ArrayXr& diag, const VectorType& singVals,
- const ArrayXr& shifts, const ArrayXr& mus, ArrayXr& zhat)
-{
- Index n = col0.size();
- for (Index k = 0; k < n; ++k) {
- if (col0(k) == 0)
- zhat(k) = 0;
- else {
- // see equation (3.6)
- using std::sqrt;
- RealScalar tmp =
- sqrt(
- (singVals(n-1) + diag(k)) * (mus(n-1) + (shifts(n-1) - diag(k)))
- * (
- ((singVals.head(k).array() + diag(k)) * (mus.head(k) + (shifts.head(k) - diag(k))))
- / ((diag.head(k).array() + diag(k)) * (diag.head(k).array() - diag(k)))
- ).prod()
- * (
- ((singVals.segment(k, n-k-1).array() + diag(k)) * (mus.segment(k, n-k-1) + (shifts.segment(k, n-k-1) - diag(k))))
- / ((diag.tail(n-k-1) + diag(k)) * (diag.tail(n-k-1) - diag(k)))
- ).prod()
- );
- if (col0(k) > 0) zhat(k) = tmp;
- else zhat(k) = -tmp;
- }
- }
-}
-
-// compute singular vectors
-template <typename MatrixType>
-void BDCSVD<MatrixType>::computeSingVecs
- (const ArrayXr& zhat, const ArrayXr& diag, const VectorType& singVals,
- const ArrayXr& shifts, const ArrayXr& mus, MatrixXr& U, MatrixXr& V)
-{
- Index n = zhat.size();
- for (Index k = 0; k < n; ++k) {
- if (zhat(k) == 0) {
- U.col(k) = VectorType::Unit(n+1, k);
- if (compV) V.col(k) = VectorType::Unit(n, k);
- } else {
- U.col(k).head(n) = zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k]));
- U(n,k) = 0;
- U.col(k).normalize();
-
- if (compV) {
- V.col(k).tail(n-1) = (diag * zhat / (((diag - shifts(k)) - mus(k)) * (diag + singVals[k]))).tail(n-1);
- V(0,k) = -1;
- V.col(k).normalize();
- }
- }
- }
- U.col(n) = VectorType::Unit(n+1, n);
-}
-
-
-// page 12_13
-// i >= 1, di almost null and zi non null.
-// We use a rotation to zero out zi applied to the left of M
-template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size){
- using std::abs;
- using std::sqrt;
- using std::pow;
- RealScalar c = m_computed(firstCol + shift, firstCol + shift);
- RealScalar s = m_computed(i, firstCol + shift);
- RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
- if (r == 0){
- m_computed(i, i)=0;
- return;
- }
- c/=r;
- s/=r;
- m_computed(firstCol + shift, firstCol + shift) = r;
- m_computed(i, firstCol + shift) = 0;
- m_computed(i, i) = 0;
- if (compU){
- m_naiveU.col(firstCol).segment(firstCol,size) =
- c * m_naiveU.col(firstCol).segment(firstCol, size) -
- s * m_naiveU.col(i).segment(firstCol, size) ;
-
- m_naiveU.col(i).segment(firstCol, size) =
- (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) +
- (s/c) * m_naiveU.col(firstCol).segment(firstCol,size);
- }
-}// end deflation 43
-
-
-// page 13
-// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M)
-// We apply two rotations to have zj = 0;
-template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){
- using std::abs;
- using std::sqrt;
- using std::conj;
- using std::pow;
- RealScalar c = m_computed(firstColm, firstColm + j - 1);
- RealScalar s = m_computed(firstColm, firstColm + i - 1);
- RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
- if (r==0){
- m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
- return;
- }
- c/=r;
- s/=r;
- m_computed(firstColm + i, firstColm) = r;
- m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
- m_computed(firstColm + j, firstColm) = 0;
- if (compU){
- m_naiveU.col(firstColu + i).segment(firstColu, size) =
- c * m_naiveU.col(firstColu + i).segment(firstColu, size) -
- s * m_naiveU.col(firstColu + j).segment(firstColu, size) ;
-
- m_naiveU.col(firstColu + j).segment(firstColu, size) =
- (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) +
- (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size);
- }
- if (compV){
- m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) =
- c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) +
- s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ;
-
- m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) =
- (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) -
- (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1);
- }
-}// end deflation 44
-
-
-// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
-template <typename MatrixType>
-void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){
- //condition 4.1
- using std::sqrt;
- const Index length = lastCol + 1 - firstCol;
- RealScalar norm1 = m_computed.block(firstCol+shift, firstCol+shift, length, 1).squaredNorm();
- RealScalar norm2 = m_computed.block(firstCol+shift, firstCol+shift, length, length).diagonal().squaredNorm();
- RealScalar EPS = 10 * NumTraits<RealScalar>::epsilon() * sqrt(norm1 + norm2);
- if (m_computed(firstCol + shift, firstCol + shift) < EPS){
- m_computed(firstCol + shift, firstCol + shift) = EPS;
- }
-
- //condition 4.2
- for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){
- if (std::abs(m_computed(i, firstCol + shift)) < EPS){
- m_computed(i, firstCol + shift) = 0;
- }
- }
-
- //condition 4.3
- for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){
- if (m_computed(i, i) < EPS){
- deflation43(firstCol, shift, i, length);
- }
- }
-
- //condition 4.4
-
- Index i=firstCol + shift + 1, j=firstCol + shift + k + 1;
- //we stock the final place of each line
- Index *permutation = new Index[length];
-
- for (Index p =1; p < length; p++) {
- if (i> firstCol + shift + k){
- permutation[p] = j;
- j++;
- } else if (j> lastCol + shift)
- {
- permutation[p] = i;
- i++;
- }
- else
- {
- if (m_computed(i, i) < m_computed(j, j)){
- permutation[p] = j;
- j++;
- }
- else
- {
- permutation[p] = i;
- i++;
- }
- }
- }
- //we do the permutation
- RealScalar aux;
- //we stock the current index of each col
- //and the column of each index
- Index *realInd = new Index[length];
- Index *realCol = new Index[length];
- for (int pos = 0; pos< length; pos++){
- realCol[pos] = pos + firstCol + shift;
- realInd[pos] = pos;
- }
- const Index Zero = firstCol + shift;
- VectorType temp;
- for (int i = 1; i < length - 1; i++){
- const Index I = i + Zero;
- const Index realI = realInd[i];
- const Index j = permutation[length - i] - Zero;
- const Index J = realCol[j];
-
- //diag displace
- aux = m_computed(I, I);
- m_computed(I, I) = m_computed(J, J);
- m_computed(J, J) = aux;
-
- //firstrow displace
- aux = m_computed(I, Zero);
- m_computed(I, Zero) = m_computed(J, Zero);
- m_computed(J, Zero) = aux;
-
- // change columns
- if (compU) {
- temp = m_naiveU.col(I - shift).segment(firstCol, length + 1);
- m_naiveU.col(I - shift).segment(firstCol, length + 1) <<
- m_naiveU.col(J - shift).segment(firstCol, length + 1);
- m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp;
- }
- else
- {
- temp = m_naiveU.col(I - shift).segment(0, 2);
- m_naiveU.col(I - shift).segment(0, 2) <<
- m_naiveU.col(J - shift).segment(0, 2);
- m_naiveU.col(J - shift).segment(0, 2) << temp;
- }
- if (compV) {
- const Index CWI = I + firstColW - Zero;
- const Index CWJ = J + firstColW - Zero;
- temp = m_naiveV.col(CWI).segment(firstRowW, length);
- m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length);
- m_naiveV.col(CWJ).segment(firstRowW, length) << temp;
- }
-
- //update real pos
- realCol[realI] = J;
- realCol[j] = I;
- realInd[J - Zero] = realI;
- realInd[I - Zero] = j;
- }
- for (Index i = firstCol + shift + 1; i<lastCol + shift;i++){
- if ((m_computed(i + 1, i + 1) - m_computed(i, i)) < EPS){
- deflation44(firstCol ,
- firstCol + shift,
- firstRowW,
- firstColW,
- i - Zero,
- i + 1 - Zero,
- length);
- }
- }
- delete [] permutation;
- delete [] realInd;
- delete [] realCol;
-}//end deflation
-
-
-namespace internal{
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<BDCSVD<_MatrixType>, Rhs>
- : solve_retval_base<BDCSVD<_MatrixType>, Rhs>
-{
- typedef BDCSVD<_MatrixType> BDCSVDType;
- EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- eigen_assert(rhs().rows() == dec().rows());
- // A = U S V^*
- // So A^{ - 1} = V S^{ - 1} U^*
- Index diagSize = (std::min)(dec().rows(), dec().cols());
- typename BDCSVDType::SingularValuesType invertedSingVals(diagSize);
- Index nonzeroSingVals = dec().nonzeroSingularValues();
- invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
- invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
- dst = dec().matrixV().leftCols(diagSize)
- * invertedSingVals.asDiagonal()
- * dec().matrixU().leftCols(diagSize).adjoint()
- * rhs();
- return;
- }
-};
-
-} //end namespace internal
-
- /** \svd_module
- *
- * \return the singular value decomposition of \c *this computed by
- * BDC Algorithm
- *
- * \sa class BDCSVD
- */
-/*
-template<typename Derived>
-BDCSVD<typename MatrixBase<Derived>::PlainObject>
-MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
-{
- return BDCSVD<PlainObject>(*this, computationOptions);
-}
-*/
-
-} // end namespace Eigen
-
-#endif
diff --git a/unsupported/Eigen/src/BDCSVD/CMakeLists.txt b/unsupported/Eigen/src/BDCSVD/CMakeLists.txt
deleted file mode 100644
index 73b89ea18..000000000
--- a/unsupported/Eigen/src/BDCSVD/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_BDCSVD_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_BDCSVD_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/BDCSVD COMPONENT Devel
- )
diff --git a/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt b/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt
deleted file mode 100644
index 0bc9a46e6..000000000
--- a/unsupported/Eigen/src/BDCSVD/TODOBdcsvd.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-TO DO LIST
-
-
-
-(optional optimization) - do all the allocations in the allocate part
- - support static matrices
- - return a error at compilation time when using integer matrices (int, long, std::complex<int>, ...)
-
-to finish the algorithm :
- -implement the last part of the algorithm as described on the reference paper.
- You may find more information on that part on this paper
-
- -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to
- deflation.
-
-(suggested step by step resolution)
- 0) comment the call to Jacobi in the last part of the divide method and everything right after
- until the end of the method. What is commented can be a guideline to steps 3) 4) and 6)
- 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation
- wich should be uncommented in the divide method
- 2) remember the values of the singular values that are already computed (zi=0)
- 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) )
- in decreasing order
- 4) set the firstcol to zero (except the first element) in m_computed
- 5) compute all the singular vectors when CompV is set to true and only the left vectors when
- CompV is set to false
- 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to
- false, /!\ if CompU is false NaiveU has only 2 rows
- 7) delete everything commented in step 0)
diff --git a/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt b/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt
deleted file mode 100644
index 8563ddab8..000000000
--- a/unsupported/Eigen/src/BDCSVD/doneInBDCSVD.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-This unsupported package is about a divide and conquer algorithm to compute SVD.
-
-The implementation follows as closely as possible the following reference paper :
-http://www.cs.yale.edu/publications/techreports/tr933.pdf
-
-The code documentation uses the same names for variables as the reference paper. The code, deflation included, is
-working but there are a few things that could be optimised as explained in the TODOBdsvd.
-
-In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call
-of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented.
-
-In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it.
-
-The implemented has trouble with fixed size matrices.
-
-In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix.
-
-
-Paper for the third part:
-http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
-
diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt
index 654a2327f..8eb2808e3 100644
--- a/unsupported/Eigen/src/CMakeLists.txt
+++ b/unsupported/Eigen/src/CMakeLists.txt
@@ -12,4 +12,3 @@ ADD_SUBDIRECTORY(Skyline)
ADD_SUBDIRECTORY(SparseExtra)
ADD_SUBDIRECTORY(KroneckerProduct)
ADD_SUBDIRECTORY(Splines)
-ADD_SUBDIRECTORY(BDCSVD)
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
index 9fcc8a8d9..0e1b7d977 100644
--- a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -108,6 +108,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
using Base::m_isInitialized;
using Base::m_tolerance;
public:
+ using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -138,25 +139,9 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
~DGMRES() {}
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<DGMRES, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "DGMRES is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "DGMRES::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <DGMRES, Rhs, Guess>(*this, b.derived(), x0);
- }
-
/** \internal */
template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
+ void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
bool failed = false;
for(int j=0; j<b.cols(); ++j)
@@ -175,10 +160,10 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
/** \internal */
template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, MatrixBase<Dest>& x) const
{
x = b;
- _solveWithGuess(b,x);
+ _solve_with_guess_impl(b,x.derived());
}
/**
* Get the restart value
@@ -522,21 +507,5 @@ int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x,
return 0;
}
-namespace internal {
-
- template<typename _MatrixType, typename _Preconditioner, typename Rhs>
-struct solve_retval<DGMRES<_MatrixType, _Preconditioner>, Rhs>
- : solve_retval_base<DGMRES<_MatrixType, _Preconditioner>, Rhs>
-{
- typedef DGMRES<_MatrixType, _Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-} // end namespace internal
-
} // end namespace Eigen
#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
index 67498705b..cd15ce0bf 100644
--- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -281,6 +281,7 @@ private:
int m_restart;
public:
+ using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -315,25 +316,9 @@ public:
*/
void set_restart(const int restart) { m_restart=restart; }
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<GMRES, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "GMRES is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "GMRES::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <GMRES, Rhs, Guess>(*this, b.derived(), x0);
- }
-
/** \internal */
template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
+ void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
bool failed = false;
for(int j=0; j<b.cols(); ++j)
@@ -353,35 +338,17 @@ public:
/** \internal */
template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const
{
x = b;
if(x.squaredNorm() == 0) return; // Check Zero right hand side
- _solveWithGuess(b,x);
+ _solve_with_guess_impl(b,x.derived());
}
protected:
};
-
-namespace internal {
-
- template<typename _MatrixType, typename _Preconditioner, typename Rhs>
-struct solve_retval<GMRES<_MatrixType, _Preconditioner>, Rhs>
- : solve_retval_base<GMRES<_MatrixType, _Preconditioner>, Rhs>
-{
- typedef GMRES<_MatrixType, _Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-} // end namespace internal
-
} // end namespace Eigen
#endif // EIGEN_GMRES_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
index 661c1f2e0..35cfa315d 100644
--- a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
@@ -27,8 +27,11 @@ namespace Eigen {
*/
template <typename Scalar, int _UpLo = Lower, typename _OrderingType = NaturalOrdering<int> >
-class IncompleteCholesky : internal::noncopyable
+class IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> >
{
+ protected:
+ typedef SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > Base;
+ using Base::m_isInitialized;
public:
typedef SparseMatrix<Scalar,ColMajor> MatrixType;
typedef _OrderingType OrderingType;
@@ -89,7 +92,7 @@ class IncompleteCholesky : internal::noncopyable
}
template<typename Rhs, typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, Dest& x) const
{
eigen_assert(m_factorizationIsOk && "factorize() should be called first");
if (m_perm.rows() == b.rows())
@@ -103,22 +106,13 @@ class IncompleteCholesky : internal::noncopyable
x = m_perm * x;
x = m_scal.asDiagonal() * x;
}
- template<typename Rhs> inline const internal::solve_retval<IncompleteCholesky, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_factorizationIsOk && "IncompleteLLT did not succeed");
- eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
- eigen_assert(cols()==b.rows()
- && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval<IncompleteCholesky, Rhs>(*this, b.derived());
- }
+
protected:
SparseMatrix<Scalar,ColMajor> m_L; // The lower part stored in CSC
ScalarType m_scal; // The vector for scaling the matrix
Scalar m_shift; //The initial shift parameter
bool m_analysisIsOk;
bool m_factorizationIsOk;
- bool m_isInitialized;
ComputationInfo m_info;
PermutationType m_perm;
@@ -132,7 +126,6 @@ template<typename _MatrixType>
void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
{
using std::sqrt;
- using std::min;
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
// Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
@@ -166,7 +159,7 @@ void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType
for (int j = 0; j < n; j++){
for (int k = colPtr[j]; k < colPtr[j+1]; k++)
vals[k] /= (m_scal(j) * m_scal(rowIdx[k]));
- mindiag = (min)(vals[colPtr[j]], mindiag);
+ mindiag = numext::mini(vals[colPtr[j]], mindiag);
}
if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag;
@@ -256,22 +249,6 @@ inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(const Idx
listCol[rowIdx(jk)].push_back(col);
}
}
-namespace internal {
-
-template<typename _Scalar, int _UpLo, typename OrderingType, typename Rhs>
-struct solve_retval<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
- : solve_retval_base<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
-{
- typedef IncompleteCholesky<_Scalar, _UpLo, OrderingType> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
-};
-
-} // end namespace internal
} // end namespace Eigen
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
index 67e780181..7d08c3515 100644
--- a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
@@ -13,8 +13,12 @@
namespace Eigen {
template <typename _Scalar>
-class IncompleteLU
+class IncompleteLU : public SparseSolverBase<IncompleteLU<_Scalar> >
{
+ protected:
+ typedef SparseSolverBase<IncompleteLU<_Scalar> > Base;
+ using Base::m_isInitialized;
+
typedef _Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef typename Vector::Index Index;
@@ -23,10 +27,10 @@ class IncompleteLU
public:
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
- IncompleteLU() : m_isInitialized(false) {}
+ IncompleteLU() {}
template<typename MatrixType>
- IncompleteLU(const MatrixType& mat) : m_isInitialized(false)
+ IncompleteLU(const MatrixType& mat)
{
compute(mat);
}
@@ -71,43 +75,16 @@ class IncompleteLU
}
template<typename Rhs, typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_lu.template triangularView<UnitLower>().solve(b);
x = m_lu.template triangularView<Upper>().solve(x);
}
- template<typename Rhs> inline const internal::solve_retval<IncompleteLU, Rhs>
- solve(const MatrixBase<Rhs>& b) const
- {
- eigen_assert(m_isInitialized && "IncompleteLU is not initialized.");
- eigen_assert(cols()==b.rows()
- && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval<IncompleteLU, Rhs>(*this, b.derived());
- }
-
protected:
FactorType m_lu;
- bool m_isInitialized;
-};
-
-namespace internal {
-
-template<typename _MatrixType, typename Rhs>
-struct solve_retval<IncompleteLU<_MatrixType>, Rhs>
- : solve_retval_base<IncompleteLU<_MatrixType>, Rhs>
-{
- typedef IncompleteLU<_MatrixType> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
};
-} // end namespace internal
-
} // end namespace Eigen
#endif // EIGEN_INCOMPLETE_LU_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
index 98f9ecc17..aaf42c78a 100644
--- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -217,6 +217,7 @@ namespace Eigen {
using Base::m_info;
using Base::m_isInitialized;
public:
+ using Base::_solve_impl;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -244,26 +245,10 @@ namespace Eigen {
/** Destructor. */
~MINRES(){}
-
- /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
- * \a x0 as an initial solution.
- *
- * \sa compute()
- */
- template<typename Rhs,typename Guess>
- inline const internal::solve_retval_with_guess<MINRES, Rhs, Guess>
- solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
- {
- eigen_assert(m_isInitialized && "MINRES is not initialized.");
- eigen_assert(Base::rows()==b.rows()
- && "MINRES::solve(): invalid number of rows of the right hand side matrix b");
- return internal::solve_retval_with_guess
- <MINRES, Rhs, Guess>(*this, b.derived(), x0);
- }
-
+
/** \internal */
template<typename Rhs,typename Dest>
- void _solveWithGuess(const Rhs& b, Dest& x) const
+ void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@@ -284,33 +269,16 @@ namespace Eigen {
/** \internal */
template<typename Rhs,typename Dest>
- void _solve(const Rhs& b, Dest& x) const
+ void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const
{
x.setZero();
- _solveWithGuess(b,x);
+ _solve_with_guess_impl(b,x.derived());
}
protected:
};
-
- namespace internal {
-
- template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
- struct solve_retval<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
- : solve_retval_base<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
- {
- typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec;
- EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
-
- template<typename Dest> void evalTo(Dest& dst) const
- {
- dec()._solve(rhs(),dst);
- }
- };
-
- } // end namespace internal
-
+
} // end namespace Eigen
#endif // EIGEN_MINRES_H
diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
index b8f2cba17..446fcac16 100644
--- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
+++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
@@ -48,8 +48,8 @@ class KroneckerProductBase : public ReturnByValue<Derived>
*/
Scalar coeff(Index row, Index col) const
{
- return m_A.coeff(row / m_B.rows(), col / m_B.cols()) *
- m_B.coeff(row % m_B.rows(), col % m_B.cols());
+ return m_A.coeff(typename Lhs::Index(row / m_B.rows()), typename Lhs::Index(col / m_B.cols())) *
+ m_B.coeff(typename Rhs::Index(row % m_B.rows()), typename Rhs::Index(col % m_B.cols()));
}
/*!
@@ -59,7 +59,7 @@ class KroneckerProductBase : public ReturnByValue<Derived>
Scalar coeff(Index i) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
- return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size());
+ return m_A.coeff(typename Lhs::Index(i / m_A.size())) * m_B.coeff(typename Rhs::Index(i % m_A.size()));
}
protected:
@@ -148,38 +148,53 @@ template<typename Lhs, typename Rhs>
template<typename Dest>
void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
{
- typedef typename Base::Index Index;
- const Index Br = m_B.rows(),
- Bc = m_B.cols();
- dst.resize(this->rows(), this->cols());
+ typedef typename Dest::Index DestIndex;
+ const typename Rhs::Index Br = m_B.rows(),
+ Bc = m_B.cols();
+ eigen_assert(this->rows() <= NumTraits<DestIndex>::highest());
+ eigen_assert(this->cols() <= NumTraits<DestIndex>::highest());
+ dst.resize(DestIndex(this->rows()), DestIndex(this->cols()));
dst.resizeNonZeros(0);
+ // 1 - evaluate the operands if needed:
+ typedef typename internal::nested_eval<Lhs,Dynamic>::type Lhs1;
+ typedef typename internal::remove_all<Lhs1>::type Lhs1Cleaned;
+ const Lhs1 lhs1(m_A);
+ typedef typename internal::nested_eval<Rhs,Dynamic>::type Rhs1;
+ typedef typename internal::remove_all<Rhs1>::type Rhs1Cleaned;
+ const Rhs1 rhs1(m_B);
+
+ // 2 - construct respective iterators
+ typedef Eigen::InnerIterator<Lhs1Cleaned> LhsInnerIterator;
+ typedef Eigen::InnerIterator<Rhs1Cleaned> RhsInnerIterator;
+
// compute number of non-zeros per innervectors of dst
{
VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols());
- for (Index kA=0; kA < m_A.outerSize(); ++kA)
- for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA)
+ for (typename Lhs::Index kA=0; kA < m_A.outerSize(); ++kA)
+ for (LhsInnerIterator itA(lhs1,kA); itA; ++itA)
nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++;
VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols());
- for (Index kB=0; kB < m_B.outerSize(); ++kB)
- for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB)
+ for (typename Rhs::Index kB=0; kB < m_B.outerSize(); ++kB)
+ for (RhsInnerIterator itB(rhs1,kB); itB; ++itB)
nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++;
Matrix<int,Dynamic,Dynamic,ColMajor> nnzAB = nnzB * nnzA.transpose();
dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size()));
}
- for (Index kA=0; kA < m_A.outerSize(); ++kA)
+ for (typename Lhs::Index kA=0; kA < m_A.outerSize(); ++kA)
{
- for (Index kB=0; kB < m_B.outerSize(); ++kB)
+ for (typename Rhs::Index kB=0; kB < m_B.outerSize(); ++kB)
{
- for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA)
+ for (LhsInnerIterator itA(lhs1,kA); itA; ++itA)
{
- for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB)
+ for (RhsInnerIterator itB(rhs1,kB); itB; ++itB)
{
- const Index i = itA.row() * Br + itB.row(),
- j = itA.col() * Bc + itB.col();
+ const DestIndex
+ i = DestIndex(itA.row() * Br + itB.row()),
+ j = DestIndex(itA.col() * Bc + itB.col());
dst.insert(i,j) = itA.value() * itB.value();
}
}
@@ -201,8 +216,7 @@ struct traits<KroneckerProduct<_Lhs,_Rhs> >
Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
- MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret,
- CoeffReadCost = Lhs::CoeffReadCost + Rhs::CoeffReadCost + NumTraits<Scalar>::MulCost
+ MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret
};
typedef Matrix<Scalar,Rows,Cols> ReturnType;
@@ -215,7 +229,7 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
typedef typename remove_all<_Lhs>::type Lhs;
typedef typename remove_all<_Rhs>::type Rhs;
typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
- typedef typename promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind>::ret StorageKind;
+ typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, scalar_product_op<typename Lhs::Scalar, typename Rhs::Scalar> >::ret StorageKind;
typedef typename promote_index_type<typename Lhs::Index, typename Rhs::Index>::type Index;
enum {
@@ -235,7 +249,7 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
CoeffReadCost = Dynamic
};
- typedef SparseMatrix<Scalar> ReturnType;
+ typedef SparseMatrix<Scalar, 0, Index> ReturnType;
};
} // end namespace internal
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
index f5290dee4..db3a0ef2c 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
@@ -19,18 +19,19 @@ namespace Eigen {
namespace internal {
-template <typename Scalar,int Rows, int Cols, typename Index>
+template <typename Scalar,int Rows, int Cols, typename PermIndex>
void lmqrsolv(
Matrix<Scalar,Rows,Cols> &s,
- const PermutationMatrix<Dynamic,Dynamic,Index> &iPerm,
+ const PermutationMatrix<Dynamic,Dynamic,PermIndex> &iPerm,
const Matrix<Scalar,Dynamic,1> &diag,
const Matrix<Scalar,Dynamic,1> &qtb,
Matrix<Scalar,Dynamic,1> &x,
Matrix<Scalar,Dynamic,1> &sdiag)
{
+ typedef typename Matrix<Scalar,Rows,Cols>::Index Index;
/* Local variables */
- Index i, j, k, l;
+ Index i, j, k;
Scalar temp;
Index n = s.cols();
Matrix<Scalar,Dynamic,1> wa(n);
@@ -52,7 +53,7 @@ void lmqrsolv(
/* prepare the row of d to be eliminated, locating the */
/* diagonal element using p from the qr factorization. */
- l = iPerm.indices()(j);
+ const PermIndex l = iPerm.indices()(j);
if (diag[l] == 0.)
break;
sdiag.tail(n-j).setZero();
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index 160120d03..9e0545660 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -392,14 +392,15 @@ template<typename Derived> struct MatrixExponentialReturnValue
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- internal::matrix_exp_compute(m_src, result);
+ const typename internal::nested_eval<Derived, 10>::type tmp(m_src);
+ internal::matrix_exp_compute(tmp, result);
}
Index rows() const { return m_src.rows(); }
Index cols() const { return m_src.cols(); }
protected:
- const typename internal::nested<Derived, 10>::type m_src;
+ const typename internal::nested<Derived>::type m_src;
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index a35c11be5..b68aae5e8 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -485,7 +485,7 @@ template<typename Derived> class MatrixFunctionReturnValue
typedef typename internal::stem_function<Scalar>::type StemFunction;
protected:
- typedef typename internal::nested<Derived, 10>::type DerivedNested;
+ typedef typename internal::nested<Derived>::type DerivedNested;
public:
@@ -503,18 +503,19 @@ template<typename Derived> class MatrixFunctionReturnValue
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- typedef typename internal::remove_all<DerivedNested>::type DerivedNestedClean;
- typedef internal::traits<DerivedNestedClean> Traits;
+ typedef typename internal::nested_eval<Derived, 10>::type NestedEvalType;
+ typedef typename internal::remove_all<NestedEvalType>::type NestedEvalTypeClean;
+ typedef internal::traits<NestedEvalTypeClean> Traits;
static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
- static const int Options = DerivedNestedClean::Options;
+ static const int Options = NestedEvalTypeClean::Options;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
typedef internal::MatrixFunctionAtomic<DynMatrixType> AtomicType;
AtomicType atomic(m_f);
- internal::matrix_function_compute<DerivedNestedClean>::run(m_A, atomic, result);
+ internal::matrix_function_compute<NestedEvalTypeClean>::run(m_A, atomic, result);
}
Index rows() const { return m_A.rows(); }
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
index d46ccc145..22bfdc4ac 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -53,15 +53,20 @@ void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result)
result(1,0) = Scalar(0);
result(1,1) = logA11;
- if (A(0,0) == A(1,1)) {
+ Scalar y = A(1,1) - A(0,0);
+ if (y==Scalar(0))
+ {
result(0,1) = A(0,1) / A(0,0);
- } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
- result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
- } else {
+ }
+ else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1))))
+ {
+ result(0,1) = A(0,1) * (logA11 - logA00) / y;
+ }
+ else
+ {
// computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
- Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0);
- result(0,1) = A(0,1) * (Scalar(2) * numext::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y;
+ result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,2*M_PI*unwindingNumber)) / y;
}
}
@@ -310,7 +315,7 @@ public:
typedef typename Derived::Index Index;
protected:
- typedef typename internal::nested<Derived, 10>::type DerivedNested;
+ typedef typename internal::nested<Derived>::type DerivedNested;
public:
@@ -327,17 +332,18 @@ public:
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- typedef typename internal::remove_all<DerivedNested>::type DerivedNestedClean;
- typedef internal::traits<DerivedNestedClean> Traits;
+ typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
+ typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
+ typedef internal::traits<DerivedEvalTypeClean> Traits;
static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
- static const int Options = DerivedNestedClean::Options;
+ static const int Options = DerivedEvalTypeClean::Options;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
typedef internal::MatrixLogarithmAtomic<DynMatrixType> AtomicType;
AtomicType atomic;
- internal::matrix_function_compute<DerivedNestedClean>::run(m_A, atomic, result);
+ internal::matrix_function_compute<DerivedEvalTypeClean>::run(m_A, atomic, result);
}
Index rows() const { return m_A.rows(); }
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
index ee665c18e..1e5a59c55 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -299,7 +299,7 @@ MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const
ComplexScalar logCurr = log(curr);
ComplexScalar logPrev = log(prev);
int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI));
- ComplexScalar w = numext::atanh2(curr - prev, curr + prev) + ComplexScalar(0, M_PI*unwindingNumber);
+ ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, M_PI*unwindingNumber);
return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev);
}
@@ -311,7 +311,7 @@ MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev
using std::log;
using std::sinh;
- RealScalar w = numext::atanh2(curr - prev, curr + prev);
+ RealScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2);
return 2 * exp(p * (log(curr) + log(prev)) / 2) * sinh(p * w) / (curr - prev);
}
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
index 8ca4f4864..3a4d6eb3f 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
@@ -320,7 +320,7 @@ template<typename Derived> class MatrixSquareRootReturnValue
{
protected:
typedef typename Derived::Index Index;
- typedef typename internal::nested<Derived, 10>::type DerivedNested;
+ typedef typename internal::nested<Derived>::type DerivedNested;
public:
/** \brief Constructor.
@@ -338,8 +338,10 @@ template<typename Derived> class MatrixSquareRootReturnValue
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
- typedef typename internal::remove_all<DerivedNested>::type DerivedNestedClean;
- internal::matrix_sqrt_compute<DerivedNestedClean>::run(m_src, result);
+ typedef typename internal::nested_eval<Derived, 10>::type DerivedEvalType;
+ typedef typename internal::remove_all<DerivedEvalType>::type DerivedEvalTypeClean;
+ DerivedEvalType tmp(m_src);
+ internal::matrix_sqrt_compute<DerivedEvalTypeClean>::run(tmp, result);
}
Index rows() const { return m_src.rows(); }
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
index 2bb8bc84a..40ba65b7e 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
@@ -56,7 +56,7 @@ T poly_eval( const Polynomials& poly, const T& x )
for( DenseIndex i=1; i<poly.size(); ++i ){
val = val*inv_x + poly[i]; }
- return std::pow(x,(T)(poly.size()-1)) * val;
+ return numext::pow(x,(T)(poly.size()-1)) * val;
}
}
diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
index dec16df28..976f9f270 100644
--- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
+++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
@@ -331,6 +331,7 @@ class DynamicSparseMatrix<Scalar,_Options,_Index>::InnerIterator : public Sparse
inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+ inline Index outer() const { return m_outer; }
protected:
const Index m_outer;
@@ -347,11 +348,42 @@ class DynamicSparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator : public
inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+ inline Index outer() const { return m_outer; }
protected:
const Index m_outer;
};
+namespace internal {
+
+template<typename _Scalar, int _Options, typename _Index>
+struct evaluator<DynamicSparseMatrix<_Scalar,_Options,_Index> >
+ : evaluator_base<DynamicSparseMatrix<_Scalar,_Options,_Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef DynamicSparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
+ typedef typename SparseMatrixType::InnerIterator InnerIterator;
+ typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<_Scalar>::ReadCost,
+ Flags = SparseMatrixType::Flags
+ };
+
+ evaluator() : m_matrix(0) {}
+ evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {}
+
+ operator SparseMatrixType&() { return m_matrix->const_cast_derived(); }
+ operator const SparseMatrixType&() const { return *m_matrix; }
+
+ Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); }
+
+ const SparseMatrixType *m_matrix;
+};
+
+}
+
} // end namespace Eigen
#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index 9f44e47f9..7b6751f00 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -5,6 +5,7 @@ add_custom_target(BuildUnsupported)
include_directories(../../test ../../unsupported ../../Eigen
${CMAKE_CURRENT_BINARY_DIR}/../../test)
+
find_package(GoogleHash)
if(GOOGLEHASH_FOUND)
add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
@@ -40,6 +41,7 @@ ei_add_test(matrix_function)
ei_add_test(matrix_power)
ei_add_test(matrix_square_root)
ei_add_test(alignedvector3)
+
ei_add_test(FFT)
find_package(MPFR 2.3.0)
@@ -74,8 +76,9 @@ if(NOT EIGEN_TEST_NO_OPENGL)
find_package(GLUT)
find_package(GLEW)
if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND)
+ include_directories(${OPENGL_INCLUDE_DIR} ${GLUT_INCLUDE_DIR} ${GLEW_INCLUDE_DIRS})
ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ")
- set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES})
+ set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OPENGL_LIBRARIES})
ei_add_test(openglsupport "" "${EIGEN_GL_LIB}" )
else()
ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
@@ -86,12 +89,11 @@ endif()
ei_add_test(polynomialsolver)
ei_add_test(polynomialutils)
-ei_add_test(kronecker_product)
ei_add_test(splines)
ei_add_test(gmres)
ei_add_test(minres)
ei_add_test(levenberg_marquardt)
-ei_add_test(bdcsvd)
+ei_add_test(kronecker_product)
option(EIGEN_TEST_CXX11 "Enable testing of C++11 features (e.g. Tensor module)." ON)
if(EIGEN_TEST_CXX11)
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
index 75974f84f..724ea7b5b 100644
--- a/unsupported/test/NonLinearOptimization.cpp
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -246,9 +246,9 @@ struct hybrj_functor : Functor<double>
int operator()(const VectorXd &x, VectorXd &fvec)
{
double temp, temp1, temp2;
- const int n = x.size();
+ const VectorXd::Index n = x.size();
assert(fvec.size()==n);
- for (int k = 0; k < n; k++)
+ for (VectorXd::Index k = 0; k < n; k++)
{
temp = (3. - 2.*x[k])*x[k];
temp1 = 0.;
@@ -261,12 +261,12 @@ struct hybrj_functor : Functor<double>
}
int df(const VectorXd &x, MatrixXd &fjac)
{
- const int n = x.size();
+ const VectorXd::Index n = x.size();
assert(fjac.rows()==n);
assert(fjac.cols()==n);
- for (int k = 0; k < n; k++)
+ for (VectorXd::Index k = 0; k < n; k++)
{
- for (int j = 0; j < n; j++)
+ for (VectorXd::Index j = 0; j < n; j++)
fjac(k,j) = 0.;
fjac(k,k) = 3.- 4.*x[k];
if (k) fjac(k,k-1) = -1.;
@@ -351,10 +351,10 @@ struct hybrd_functor : Functor<double>
int operator()(const VectorXd &x, VectorXd &fvec) const
{
double temp, temp1, temp2;
- const int n = x.size();
+ const VectorXd::Index n = x.size();
assert(fvec.size()==n);
- for (int k=0; k < n; k++)
+ for (VectorXd::Index k=0; k < n; k++)
{
temp = (3. - 2.*x[k])*x[k];
temp1 = 0.;
@@ -455,7 +455,7 @@ struct lmstr_functor : Functor<double>
assert(jac_row.size()==x.size());
double tmp1, tmp2, tmp3, tmp4;
- int i = rownb-2;
+ VectorXd::Index i = rownb-2;
tmp1 = i+1;
tmp2 = 16 - i - 1;
tmp3 = (i>=8)? tmp2 : tmp1;
diff --git a/unsupported/test/bdcsvd.cpp b/unsupported/test/bdcsvd.cpp
deleted file mode 100644
index 115a649b0..000000000
--- a/unsupported/test/bdcsvd.cpp
+++ /dev/null
@@ -1,213 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
-// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
-// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
-// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/
-
-#include "svd_common.h"
-#include <iostream>
-#include <Eigen/LU>
-
-// check if "svd" is the good image of "m"
-template<typename MatrixType>
-void bdcsvd_check_full(const MatrixType& m, const BDCSVD<MatrixType>& svd)
-{
- svd_check_full< MatrixType, BDCSVD< MatrixType > >(m, svd);
-}
-
-// Compare to a reference value
-template<typename MatrixType>
-void bdcsvd_compare_to_full(const MatrixType& m,
- unsigned int computationOptions,
- const BDCSVD<MatrixType>& referenceSvd)
-{
- svd_compare_to_full< MatrixType, BDCSVD< MatrixType > >(m, computationOptions, referenceSvd);
-} // end bdcsvd_compare_to_full
-
-
-template<typename MatrixType>
-void bdcsvd_solve(const MatrixType& m, unsigned int computationOptions)
-{
- svd_solve< MatrixType, BDCSVD< MatrixType > >(m, computationOptions);
-} // end template bdcsvd_solve
-
-
-// test the computations options
-template<typename MatrixType>
-void bdcsvd_test_all_computation_options(const MatrixType& m)
-{
- BDCSVD<MatrixType> fullSvd(m, ComputeFullU|ComputeFullV);
- svd_test_computation_options_1< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
- svd_test_computation_options_2< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
-} // end bdcsvd_test_all_computation_options
-
-
-// Call a test with all the computations options
-template<typename MatrixType>
-void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
-{
- MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
- bdcsvd_test_all_computation_options<MatrixType>(m);
-} // end template bdcsvd
-
-
-// verify assert
-template<typename MatrixType>
-void bdcsvd_verify_assert(const MatrixType& m)
-{
- svd_verify_assert< MatrixType, BDCSVD< MatrixType > >(m);
-}// end template bdcsvd_verify_assert
-
-
-// test weird values
-template<typename MatrixType>
-void bdcsvd_inf_nan()
-{
- svd_inf_nan< MatrixType, BDCSVD< MatrixType > >();
-}// end template bdcsvd_inf_nan
-
-
-
-void bdcsvd_preallocate()
-{
- svd_preallocate< BDCSVD< MatrixXf > >();
-} // end bdcsvd_preallocate
-
-
-// compare the Singular values returned with Jacobi and Bdc
-template<typename MatrixType>
-void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0)
-{
- std::cout << "debut compare" << std::endl;
- MatrixType m = MatrixType::Random(a.rows(), a.cols());
- BDCSVD<MatrixType> bdc_svd(m);
- JacobiSVD<MatrixType> jacobi_svd(m);
- VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues());
- if(computationOptions & ComputeFullU)
- VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
- if(computationOptions & ComputeThinU)
- VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
- if(computationOptions & ComputeFullV)
- VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
- if(computationOptions & ComputeThinV)
- VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
- std::cout << "fin compare" << std::endl;
-} // end template compare_bdc_jacobi
-
-
-// call the tests
-void test_bdcsvd()
-{
- // test of Dynamic defined Matrix (42, 42) of float
- CALL_SUBTEST_11(( bdcsvd_verify_assert<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(42,42)) ));
- CALL_SUBTEST_11(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(42,42), 0) ));
- CALL_SUBTEST_11(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(42,42)) ));
-
- // test of Dynamic defined Matrix (50, 50) of double
- CALL_SUBTEST_13(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(50,50)) ));
- CALL_SUBTEST_13(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(50,50), 0) ));
- CALL_SUBTEST_13(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(50, 50)) ));
-
- // test of Dynamic defined Matrix (22, 22) of complex double
- CALL_SUBTEST_14(( bdcsvd_verify_assert<Matrix<std::complex<double>,Dynamic,Dynamic> >
- (Matrix<std::complex<double>,Dynamic,Dynamic>(22,22)) ));
- CALL_SUBTEST_14(( compare_bdc_jacobi<Matrix<std::complex<double>,Dynamic,Dynamic> >
- (Matrix<std::complex<double>, Dynamic, Dynamic> (22,22), 0) ));
- CALL_SUBTEST_14(( bdcsvd<Matrix<std::complex<double>,Dynamic,Dynamic> >
- (Matrix<std::complex<double>,Dynamic,Dynamic>(22, 22)) ));
-
- // test of Dynamic defined Matrix (10, 10) of int
- //CALL_SUBTEST_15(( bdcsvd_verify_assert<Matrix<int,Dynamic,Dynamic> >
- // (Matrix<int,Dynamic,Dynamic>(10,10)) ));
- //CALL_SUBTEST_15(( compare_bdc_jacobi<Matrix<int,Dynamic,Dynamic> >
- // (Matrix<int,Dynamic,Dynamic>(10,10), 0) ));
- //CALL_SUBTEST_15(( bdcsvd<Matrix<int,Dynamic,Dynamic> >
- // (Matrix<int,Dynamic,Dynamic>(10, 10)) ));
-
-
- // test of Dynamic defined Matrix (8, 6) of double
-
- CALL_SUBTEST_16(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(8,6)) ));
- CALL_SUBTEST_16(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(8, 6), 0) ));
- CALL_SUBTEST_16(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(8, 6)) ));
-
-
-
- // test of Dynamic defined Matrix (36, 12) of float
- CALL_SUBTEST_17(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(36, 12), 0) ));
- CALL_SUBTEST_17(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
- (Matrix<float,Dynamic,Dynamic>(36, 12)) ));
-
- // test of Dynamic defined Matrix (5, 8) of double
- CALL_SUBTEST_18(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(5, 8), 0) ));
- CALL_SUBTEST_18(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(5, 8)) ));
-
-
- // non regression tests
- CALL_SUBTEST_3(( bdcsvd_verify_assert(Matrix3f()) ));
- CALL_SUBTEST_4(( bdcsvd_verify_assert(Matrix4d()) ));
- CALL_SUBTEST_7(( bdcsvd_verify_assert(MatrixXf(10,12)) ));
- CALL_SUBTEST_8(( bdcsvd_verify_assert(MatrixXcd(7,5)) ));
-
- // SUBTESTS 1 and 2 on specifics matrix
- for(int i = 0; i < g_repeat; i++) {
- Matrix2cd m;
- m << 0, 1,
- 0, 1;
- CALL_SUBTEST_1(( bdcsvd(m, false) ));
- m << 1, 0,
- 1, 0;
- CALL_SUBTEST_1(( bdcsvd(m, false) ));
-
- Matrix2d n;
- n << 0, 0,
- 0, 0;
- CALL_SUBTEST_2(( bdcsvd(n, false) ));
- n << 0, 0,
- 0, 1;
- CALL_SUBTEST_2(( bdcsvd(n, false) ));
-
- // Statics matrix don't work with BDSVD yet
- // bdc algo on a random 3x3 float matrix
- // CALL_SUBTEST_3(( bdcsvd<Matrix3f>() ));
- // bdc algo on a random 4x4 double matrix
- // CALL_SUBTEST_4(( bdcsvd<Matrix4d>() ));
- // bdc algo on a random 3x5 float matrix
- // CALL_SUBTEST_5(( bdcsvd<Matrix<float,3,5> >() ));
-
- int r = internal::random<int>(1, 30),
- c = internal::random<int>(1, 30);
- CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(r,c)) ));
- CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(r,c)) ));
- (void) r;
- (void) c;
-
- // Test on inf/nan matrix
- CALL_SUBTEST_7( bdcsvd_inf_nan<MatrixXf>() );
- }
-
- CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
- CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
-
- // Test problem size constructors
- CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) );
-
-} // end test_bdcsvd
diff --git a/unsupported/test/jacobisvd.cpp b/unsupported/test/jacobisvd.cpp
deleted file mode 100644
index b4e884eee..000000000
--- a/unsupported/test/jacobisvd.cpp
+++ /dev/null
@@ -1,198 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#include "svd_common.h"
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_check_full(const MatrixType& m, const JacobiSVD<MatrixType, QRPreconditioner>& svd)
-{
- svd_check_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner > >(m, svd);
-}
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_compare_to_full(const MatrixType& m,
- unsigned int computationOptions,
- const JacobiSVD<MatrixType, QRPreconditioner>& referenceSvd)
-{
- svd_compare_to_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner> >(m, computationOptions, referenceSvd);
-}
-
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
-{
- svd_solve< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, computationOptions);
-}
-
-
-
-template<typename MatrixType, int QRPreconditioner>
-void jacobisvd_test_all_computation_options(const MatrixType& m)
-{
-
- if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
- return;
-
- JacobiSVD< MatrixType, QRPreconditioner > fullSvd(m, ComputeFullU|ComputeFullV);
- svd_test_computation_options_1< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
-
- if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
- return;
- svd_test_computation_options_2< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
-
-}
-
-template<typename MatrixType>
-void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
-{
- MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
-
- jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m);
- jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m);
- jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m);
- jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m);
-}
-
-
-template<typename MatrixType>
-void jacobisvd_verify_assert(const MatrixType& m)
-{
-
- svd_verify_assert<MatrixType, JacobiSVD< MatrixType > >(m);
-
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- MatrixType a = MatrixType::Zero(rows, cols);
- a.setZero();
-
- if (ColsAtCompileTime == Dynamic)
- {
- JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> svd_fullqr;
- VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV))
- VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV))
- VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV))
- }
-}
-
-template<typename MatrixType>
-void jacobisvd_method()
-{
- enum { Size = MatrixType::RowsAtCompileTime };
- typedef typename MatrixType::RealScalar RealScalar;
- typedef Matrix<RealScalar, Size, 1> RealVecType;
- MatrixType m = MatrixType::Identity();
- VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones());
- VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU());
- VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV());
- VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
-}
-
-
-
-template<typename MatrixType>
-void jacobisvd_inf_nan()
-{
- svd_inf_nan<MatrixType, JacobiSVD< MatrixType > >();
-}
-
-
-// Regression test for bug 286: JacobiSVD loops indefinitely with some
-// matrices containing denormal numbers.
-void jacobisvd_bug286()
-{
-#if defined __INTEL_COMPILER
-// shut up warning #239: floating point underflow
-#pragma warning push
-#pragma warning disable 239
-#endif
- Matrix2d M;
- M << -7.90884e-313, -4.94e-324,
- 0, 5.60844e-313;
-#if defined __INTEL_COMPILER
-#pragma warning pop
-#endif
- JacobiSVD<Matrix2d> svd;
- svd.compute(M); // just check we don't loop indefinitely
-}
-
-
-void jacobisvd_preallocate()
-{
- svd_preallocate< JacobiSVD <MatrixXf> >();
-}
-
-void test_jacobisvd()
-{
- CALL_SUBTEST_11(( jacobisvd<Matrix<double,Dynamic,Dynamic> >
- (Matrix<double,Dynamic,Dynamic>(16, 6)) ));
-
- CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) ));
- CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) ));
- CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) ));
- CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) ));
-
- for(int i = 0; i < g_repeat; i++) {
- Matrix2cd m;
- m << 0, 1,
- 0, 1;
- CALL_SUBTEST_1(( jacobisvd(m, false) ));
- m << 1, 0,
- 1, 0;
- CALL_SUBTEST_1(( jacobisvd(m, false) ));
-
- Matrix2d n;
- n << 0, 0,
- 0, 0;
- CALL_SUBTEST_2(( jacobisvd(n, false) ));
- n << 0, 0,
- 0, 1;
- CALL_SUBTEST_2(( jacobisvd(n, false) ));
-
- CALL_SUBTEST_3(( jacobisvd<Matrix3f>() ));
- CALL_SUBTEST_4(( jacobisvd<Matrix4d>() ));
- CALL_SUBTEST_5(( jacobisvd<Matrix<float,3,5> >() ));
- CALL_SUBTEST_6(( jacobisvd<Matrix<double,Dynamic,2> >(Matrix<double,Dynamic,2>(10,2)) ));
-
- int r = internal::random<int>(1, 30),
- c = internal::random<int>(1, 30);
- CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) ));
- CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) ));
- (void) r;
- (void) c;
-
- // Test on inf/nan matrix
- CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() );
- }
-
- CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
- CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
-
-
- // test matrixbase method
- CALL_SUBTEST_1(( jacobisvd_method<Matrix2cd>() ));
- CALL_SUBTEST_3(( jacobisvd_method<Matrix3f>() ));
-
-
- // Test problem size constructors
- CALL_SUBTEST_7( JacobiSVD<MatrixXf>(10,10) );
-
- // Check that preallocation avoids subsequent mallocs
- CALL_SUBTEST_9( jacobisvd_preallocate() );
-
- // Regression check for bug 286
- CALL_SUBTEST_2( jacobisvd_bug286() );
-}
diff --git a/unsupported/test/kronecker_product.cpp b/unsupported/test/kronecker_product.cpp
index 753a2d417..02411a262 100644
--- a/unsupported/test/kronecker_product.cpp
+++ b/unsupported/test/kronecker_product.cpp
@@ -216,5 +216,17 @@ void test_kronecker_product()
sC2 = kroneckerProduct(sA,sB);
dC = kroneckerProduct(dA,dB);
VERIFY_IS_APPROX(MatrixXf(sC2),dC);
+
+ sC2 = kroneckerProduct(dA,sB);
+ dC = kroneckerProduct(dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC2),dC);
+
+ sC2 = kroneckerProduct(sA,dB);
+ dC = kroneckerProduct(dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC2),dC);
+
+ sC2 = kroneckerProduct(2*sA,sB);
+ dC = kroneckerProduct(2*dA,dB);
+ VERIFY_IS_APPROX(MatrixXf(sC2),dC);
}
}
diff --git a/unsupported/test/svd_common.h b/unsupported/test/svd_common.h
deleted file mode 100644
index 6a96e7c74..000000000
--- a/unsupported/test/svd_common.h
+++ /dev/null
@@ -1,261 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
-// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
-// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
-// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// discard stack allocation as that too bypasses malloc
-#define EIGEN_STACK_ALLOCATION_LIMIT 0
-#define EIGEN_RUNTIME_NO_MALLOC
-
-#include "main.h"
-#include <unsupported/Eigen/BDCSVD>
-#include <Eigen/LU>
-
-
-// check if "svd" is the good image of "m"
-template<typename MatrixType, typename SVD>
-void svd_check_full(const MatrixType& m, const SVD& svd)
-{
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef typename MatrixType::Scalar Scalar;
- typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime> MatrixUType;
- typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime> MatrixVType;
-
-
- MatrixType sigma = MatrixType::Zero(rows, cols);
- sigma.diagonal() = svd.singularValues().template cast<Scalar>();
- MatrixUType u = svd.matrixU();
- MatrixVType v = svd.matrixV();
- VERIFY_IS_APPROX(m, u * sigma * v.adjoint());
- VERIFY_IS_UNITARY(u);
- VERIFY_IS_UNITARY(v);
-} // end svd_check_full
-
-
-
-// Compare to a reference value
-template<typename MatrixType, typename SVD>
-void svd_compare_to_full(const MatrixType& m,
- unsigned int computationOptions,
- const SVD& referenceSvd)
-{
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
- Index diagSize = (std::min)(rows, cols);
-
- SVD svd(m, computationOptions);
-
- VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues());
- if(computationOptions & ComputeFullU)
- VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
- if(computationOptions & ComputeThinU)
- VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
- if(computationOptions & ComputeFullV)
- VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV());
- if(computationOptions & ComputeThinV)
- VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
-} // end svd_compare_to_full
-
-
-
-template<typename MatrixType, typename SVD>
-void svd_solve(const MatrixType& m, unsigned int computationOptions)
-{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, Dynamic> RhsType;
- typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
-
- RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
- SVD svd(m, computationOptions);
- SolutionType x = svd.solve(rhs);
- // evaluate normal equation which works also for least-squares solutions
- VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
-} // end svd_solve
-
-
-// test computations options
-// 2 functions because Jacobisvd can return before the second function
-template<typename MatrixType, typename SVD>
-void svd_test_computation_options_1(const MatrixType& m, const SVD& fullSvd)
-{
- svd_check_full< MatrixType, SVD >(m, fullSvd);
- svd_solve< MatrixType, SVD >(m, ComputeFullU | ComputeFullV);
-}
-
-
-template<typename MatrixType, typename SVD>
-void svd_test_computation_options_2(const MatrixType& m, const SVD& fullSvd)
-{
- svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeFullV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, 0, fullSvd);
-
- if (MatrixType::ColsAtCompileTime == Dynamic) {
- // thin U/V are only available with dynamic number of columns
-
- svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU|ComputeThinV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeFullV, fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU , fullSvd);
- svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeThinV, fullSvd);
- svd_solve<MatrixType, SVD>(m, ComputeFullU | ComputeThinV);
- svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeFullV);
- svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeThinV);
-
- typedef typename MatrixType::Index Index;
- Index diagSize = (std::min)(m.rows(), m.cols());
- SVD svd(m, ComputeThinU | ComputeThinV);
- VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
- }
-}
-
-template<typename MatrixType, typename SVD>
-void svd_verify_assert(const MatrixType& m)
-{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::Index Index;
- Index rows = m.rows();
- Index cols = m.cols();
-
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime
- };
-
- typedef Matrix<Scalar, RowsAtCompileTime, 1> RhsType;
- RhsType rhs(rows);
- SVD svd;
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.singularValues())
- VERIFY_RAISES_ASSERT(svd.matrixV())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
- MatrixType a = MatrixType::Zero(rows, cols);
- a.setZero();
- svd.compute(a, 0);
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.matrixV())
- svd.singularValues();
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
-
- if (ColsAtCompileTime == Dynamic)
- {
- svd.compute(a, ComputeThinU);
- svd.matrixU();
- VERIFY_RAISES_ASSERT(svd.matrixV())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
- svd.compute(a, ComputeThinV);
- svd.matrixV();
- VERIFY_RAISES_ASSERT(svd.matrixU())
- VERIFY_RAISES_ASSERT(svd.solve(rhs))
- }
- else
- {
- VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU))
- VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV))
- }
-}
-
-// work around stupid msvc error when constructing at compile time an expression that involves
-// a division by zero, even if the numeric type has floating point
-template<typename Scalar>
-EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); }
-
-// workaround aggressive optimization in ICC
-template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
-
-
-template<typename MatrixType, typename SVD>
-void svd_inf_nan()
-{
- // all this function does is verify we don't iterate infinitely on nan/inf values
-
- SVD svd;
- typedef typename MatrixType::Scalar Scalar;
- Scalar some_inf = Scalar(1) / zero<Scalar>();
- VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
- svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
-
- Scalar some_nan = zero<Scalar> () / zero<Scalar> ();
- VERIFY(some_nan != some_nan);
- svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV);
-
- MatrixType m = MatrixType::Zero(10,10);
- m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
- svd.compute(m, ComputeFullU | ComputeFullV);
-
- m = MatrixType::Zero(10,10);
- m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_nan;
- svd.compute(m, ComputeFullU | ComputeFullV);
-}
-
-
-template<typename SVD>
-void svd_preallocate()
-{
- Vector3f v(3.f, 2.f, 1.f);
- MatrixXf m = v.asDiagonal();
-
- internal::set_is_malloc_allowed(false);
- VERIFY_RAISES_ASSERT(VectorXf v(10);)
- SVD svd;
- internal::set_is_malloc_allowed(true);
- svd.compute(m);
- VERIFY_IS_APPROX(svd.singularValues(), v);
-
- SVD svd2(3,3);
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
- VERIFY_IS_APPROX(svd2.singularValues(), v);
- VERIFY_RAISES_ASSERT(svd2.matrixU());
- VERIFY_RAISES_ASSERT(svd2.matrixV());
- svd2.compute(m, ComputeFullU | ComputeFullV);
- VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
- VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
-
- SVD svd3(3,3,ComputeFullU|ComputeFullV);
- internal::set_is_malloc_allowed(false);
- svd2.compute(m);
- internal::set_is_malloc_allowed(true);
- VERIFY_IS_APPROX(svd2.singularValues(), v);
- VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
- VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
- internal::set_is_malloc_allowed(false);
- svd2.compute(m, ComputeFullU|ComputeFullV);
- internal::set_is_malloc_allowed(true);
-}
-
-
-
-
-