aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar Mark Borgerding <mark@borgerding.net>2010-02-16 21:41:04 -0500
committerGravatar Mark Borgerding <mark@borgerding.net>2010-02-16 21:41:04 -0500
commitf200c84d9f9ba3e148e9f170d425ed80dd0f1bc5 (patch)
treef5d7ea27ddcb42d7c8c47af9f079bd9c4d2f70f3 /unsupported
parent8f51a4ac9005c29fe99d3c1f70b99853be2a9f15 (diff)
parent319bf3130b1257856408b0481401f7c353f97470 (diff)
merge
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/AlignedVector35
-rw-r--r--unsupported/Eigen/AutoDiff2
-rw-r--r--unsupported/Eigen/FFT4
-rw-r--r--unsupported/Eigen/NonLinearOptimization22
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h230
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h321
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h14
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h362
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h357
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/chkder.h27
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/covar.h41
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/dogleg.h70
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/fdjac1.h30
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/lmpar.h143
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qform.h89
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qrfac.h131
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/qrsolv.h55
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h66
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1updt.h225
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/rwupdt.h103
-rw-r--r--unsupported/Eigen/src/NumericalDiff/NumericalDiff.h2
-rw-r--r--unsupported/doc/examples/MatrixExponential.cpp3
-rw-r--r--unsupported/doc/examples/MatrixFunction.cpp7
-rw-r--r--unsupported/doc/examples/MatrixSine.cpp6
-rw-r--r--unsupported/doc/examples/MatrixSinh.cpp6
-rw-r--r--unsupported/test/FFT.cpp243
-rw-r--r--unsupported/test/FFTW.cpp9
-rw-r--r--unsupported/test/NonLinearOptimization.cpp84
-rw-r--r--unsupported/test/matrix_exponential.cpp13
-rw-r--r--unsupported/test/matrix_function.cpp126
30 files changed, 1116 insertions, 1680 deletions
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
index 37018bfc6..6e9772cf5 100644
--- a/unsupported/Eigen/AlignedVector3
+++ b/unsupported/Eigen/AlignedVector3
@@ -64,7 +64,8 @@ template<typename _Scalar> class AlignedVector3
CoeffType m_coeffs;
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(AlignedVector3)
+ typedef MatrixBase<AlignedVector3<_Scalar> > Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3)
using Base::operator*;
inline int rows() const { return 3; }
@@ -188,7 +189,7 @@ template<typename _Scalar> class AlignedVector3
}
template<typename Derived>
- inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=dummy_precision<Scalar>()) const
+ inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=NumTraits<Scalar>::dummy_precision()) const
{
return m_coeffs.template head<3>().isApprox(other,eps);
}
diff --git a/unsupported/Eigen/AutoDiff b/unsupported/Eigen/AutoDiff
index a8b3ea90a..97164525c 100644
--- a/unsupported/Eigen/AutoDiff
+++ b/unsupported/Eigen/AutoDiff
@@ -25,8 +25,6 @@
#ifndef EIGEN_AUTODIFF_MODULE
#define EIGEN_AUTODIFF_MODULE
-#include <Eigen/Array>
-
namespace Eigen {
/** \ingroup Unsupported_modules
diff --git a/unsupported/Eigen/FFT b/unsupported/Eigen/FFT
index 454308fb7..87c31749b 100644
--- a/unsupported/Eigen/FFT
+++ b/unsupported/Eigen/FFT
@@ -201,7 +201,7 @@ class FFT
{
m_impl.inv( dst,src,nfft );
if ( HasFlag( Unscaled ) == false)
- scale(dst,1./nfft,nfft); // scale the time series
+ scale(dst,Scalar(1./nfft_,nfft); // scale the time series
}
inline
@@ -209,7 +209,7 @@ class FFT
{
m_impl.inv( dst,src,nfft );
if ( HasFlag( Unscaled ) == false)
- scale(dst,1./nfft,nfft); // scale the time series
+ scale(dst,Scalar(1./nfft),nfft); // scale the time series
}
template<typename OutputDerived, typename ComplexDerived>
diff --git a/unsupported/Eigen/NonLinearOptimization b/unsupported/Eigen/NonLinearOptimization
index f15e09386..149b658f8 100644
--- a/unsupported/Eigen/NonLinearOptimization
+++ b/unsupported/Eigen/NonLinearOptimization
@@ -25,6 +25,8 @@
#ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE
#define EIGEN_NONLINEAROPTIMIZATION_MODULE
+#include <vector>
+
#include <Eigen/Core>
#include <Eigen/Jacobi>
#include <Eigen/QR>
@@ -54,7 +56,7 @@ namespace Eigen {
* fortran. Those implementations have been carefully tuned, tested, and used
* for several decades.
*
- * The original fortran code was automatically translated (using f2c) in C and
+ * The original fortran code was automatically translated using f2c (http://en.wikipedia.org/wiki/F2c) in C,
* then c++, and then cleaned by several different authors.
* The last one of those cleanings being our starting point :
* http://devernay.free.fr/hacks/cminpack.html
@@ -108,19 +110,23 @@ namespace Eigen {
* handle the loop: init + loop until a stop condition is met. Those are provided for
* convenience.
*
- * As an example, the method LevenbergMarquardt.minimizeNumericalDiff() is
+ * As an example, the method LevenbergMarquardt::minimize() is
* implemented as follow :
* \code
- * LevenbergMarquardt.minimizeNumericalDiff(Matrix< Scalar, Dynamic, 1 > &x,
- * const int mode )
+ * Status LevenbergMarquardt<FunctorType,Scalar>::minimize(FVectorType &x, const int mode)
* {
- * Status status = minimizeNumericalDiffInit(x, mode);
- * while (status==Running)
- * status = minimizeNumericalDiffOneStep(x, mode);
+ * Status status = minimizeInit(x, mode);
+ * do {
+ * status = minimizeOneStep(x, mode);
+ * } while (status==Running);
* return status;
* }
* \endcode
*
+ * \section examples Examples
+ *
+ * The easiest way to understand how to use this module is by looking at the many examples in the file
+ * unsupported/test/NonLinearOptimization.cpp.
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -129,9 +135,7 @@ namespace Eigen {
#include "src/NonLinearOptimization/r1updt.h"
#include "src/NonLinearOptimization/r1mpyq.h"
#include "src/NonLinearOptimization/rwupdt.h"
-#include "src/NonLinearOptimization/qrfac.h"
#include "src/NonLinearOptimization/fdjac1.h"
-#include "src/NonLinearOptimization/qform.h"
#include "src/NonLinearOptimization/lmpar.h"
#include "src/NonLinearOptimization/dogleg.h"
#include "src/NonLinearOptimization/covar.h"
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index fd1938a87..39c23cdc5 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2009, 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -29,74 +29,32 @@
template <typename Scalar> Scalar log2(Scalar v) { return std::log(v)/std::log(Scalar(2)); }
#endif
-/** \ingroup MatrixFunctions_Module
- *
- * \brief Compute the matrix exponential.
- *
- * \param[in] M matrix whose exponential is to be computed.
- * \param[out] result pointer to the matrix in which to store the result.
- *
- * The matrix exponential of \f$ M \f$ is defined by
- * \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
- * The matrix exponential can be used to solve linear ordinary
- * differential equations: the solution of \f$ y' = My \f$ with the
- * initial condition \f$ y(0) = y_0 \f$ is given by
- * \f$ y(t) = \exp(M) y_0 \f$.
- *
- * The cost of the computation is approximately \f$ 20 n^3 \f$ for
- * matrices of size \f$ n \f$. The number 20 depends weakly on the
- * norm of the matrix.
- *
- * The matrix exponential is computed using the scaling-and-squaring
- * method combined with Pad&eacute; approximation. The matrix is first
- * rescaled, then the exponential of the reduced matrix is computed
- * approximant, and then the rescaling is undone by repeated
- * squaring. The degree of the Pad&eacute; approximant is chosen such
- * that the approximation error is less than the round-off
- * error. However, errors may accumulate during the squaring phase.
- *
- * Details of the algorithm can be found in: Nicholas J. Higham, "The
- * scaling and squaring method for the matrix exponential revisited,"
- * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179&ndash;1193,
- * 2005.
- *
- * Example: The following program checks that
- * \f[ \exp \left[ \begin{array}{ccc}
- * 0 & \frac14\pi & 0 \\
- * -\frac14\pi & 0 & 0 \\
- * 0 & 0 & 0
- * \end{array} \right] = \left[ \begin{array}{ccc}
- * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
- * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
- * 0 & 0 & 1
- * \end{array} \right]. \f]
- * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
- * the z-axis.
- *
- * \include MatrixExponential.cpp
- * Output: \verbinclude MatrixExponential.out
- *
- * \note \p M has to be a matrix of \c float, \c double,
- * \c complex<float> or \c complex<double> .
- */
-template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
- typename MatrixBase<Derived>::PlainMatrixType* result);
/** \ingroup MatrixFunctions_Module
* \brief Class for computing the matrix exponential.
+ * \tparam MatrixType type of the argument of the exponential,
+ * expected to be an instantiation of the Matrix class template.
*/
template <typename MatrixType>
class MatrixExponential {
public:
- /** \brief Compute the matrix exponential.
- *
- * \param M matrix whose exponential is to be computed.
- * \param result pointer to the matrix in which to store the result.
- */
- MatrixExponential(const MatrixType &M, MatrixType *result);
+ /** \brief Constructor.
+ *
+ * The class stores a reference to \p M, so it should not be
+ * changed (or destroyed) before compute() is called.
+ *
+ * \param[in] M matrix whose exponential is to be computed.
+ */
+ MatrixExponential(const MatrixType &M);
+
+ /** \brief Computes the matrix exponential.
+ *
+ * \param[out] result the matrix exponential of \p M in the constructor.
+ */
+ template <typename ResultType>
+ void compute(ResultType &result);
private:
@@ -109,7 +67,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade3(const MatrixType &A);
@@ -118,7 +76,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade5(const MatrixType &A);
@@ -127,7 +85,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade7(const MatrixType &A);
@@ -136,7 +94,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade9(const MatrixType &A);
@@ -145,7 +103,7 @@ class MatrixExponential {
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
*
- * \param A Argument of matrix exponential
+ * \param[in] A Argument of matrix exponential
*/
void pade13(const MatrixType &A);
@@ -171,10 +129,10 @@ class MatrixExponential {
void computeUV(float);
typedef typename ei_traits<MatrixType>::Scalar Scalar;
- typedef typename NumTraits<typename ei_traits<MatrixType>::Scalar>::Real RealScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
- /** \brief Pointer to matrix whose exponential is to be computed. */
- const MatrixType* m_M;
+ /** \brief Reference to matrix whose exponential is to be computed. */
+ const MatrixType& m_M;
/** \brief Even-degree terms in numerator of Pad&eacute; approximant. */
MatrixType m_U;
@@ -199,8 +157,8 @@ class MatrixExponential {
};
template <typename MatrixType>
-MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M, MatrixType *result) :
- m_M(&M),
+MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M) :
+ m_M(M),
m_U(M.rows(),M.cols()),
m_V(M.rows(),M.cols()),
m_tmp1(M.rows(),M.cols()),
@@ -209,12 +167,19 @@ MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M, MatrixType
m_squarings(0),
m_l1norm(static_cast<float>(M.cwiseAbs().colwise().sum().maxCoeff()))
{
+ /* empty body */
+}
+
+template <typename MatrixType>
+template <typename ResultType>
+void MatrixExponential<MatrixType>::compute(ResultType &result)
+{
computeUV(RealScalar());
m_tmp1 = m_U + m_V; // numerator of Pade approximant
m_tmp2 = -m_U + m_V; // denominator of Pade approximant
- *result = m_tmp2.partialPivLu().solve(m_tmp1);
+ result = m_tmp2.partialPivLu().solve(m_tmp1);
for (int i=0; i<m_squarings; i++)
- *result *= *result; // undo scaling by repeated squaring
+ result *= result; // undo scaling by repeated squaring
}
template <typename MatrixType>
@@ -286,13 +251,13 @@ template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(float)
{
if (m_l1norm < 4.258730016922831e-001) {
- pade3(*m_M);
+ pade3(m_M);
} else if (m_l1norm < 1.880152677804762e+000) {
- pade5(*m_M);
+ pade5(m_M);
} else {
const float maxnorm = 3.925724783138660f;
m_squarings = std::max(0, (int)ceil(log2(m_l1norm / maxnorm)));
- MatrixType A = *m_M / std::pow(Scalar(2), Scalar(static_cast<RealScalar>(m_squarings)));
+ MatrixType A = m_M / std::pow(Scalar(2), Scalar(static_cast<RealScalar>(m_squarings)));
pade7(A);
}
}
@@ -301,27 +266,126 @@ template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(double)
{
if (m_l1norm < 1.495585217958292e-002) {
- pade3(*m_M);
+ pade3(m_M);
} else if (m_l1norm < 2.539398330063230e-001) {
- pade5(*m_M);
+ pade5(m_M);
} else if (m_l1norm < 9.504178996162932e-001) {
- pade7(*m_M);
+ pade7(m_M);
} else if (m_l1norm < 2.097847961257068e+000) {
- pade9(*m_M);
+ pade9(m_M);
} else {
const double maxnorm = 5.371920351148152;
m_squarings = std::max(0, (int)ceil(log2(m_l1norm / maxnorm)));
- MatrixType A = *m_M / std::pow(Scalar(2), Scalar(m_squarings));
+ MatrixType A = m_M / std::pow(Scalar(2), Scalar(m_squarings));
pade13(A);
}
}
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix exponential of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix exponential.
+ *
+ * This class holds the argument to the matrix exponential until it
+ * is assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * ei_matrix_exponential() and most of the time this is the only way
+ * it is used.
+ */
+template<typename Derived> struct MatrixExponentialReturnValue
+: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
+{
+ public:
+ /** \brief Constructor.
+ *
+ * \param[in] src %Matrix (expression) forming the argument of the
+ * matrix exponential.
+ */
+ MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
+
+ /** \brief Compute the matrix exponential.
+ *
+ * \param[out] result the matrix exponential of \p src in the
+ * constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ const typename ei_eval<Derived>::type srcEvaluated = m_src.eval();
+ MatrixExponential<typename Derived::PlainMatrixType> me(srcEvaluated);
+ me.compute(result);
+ }
+
+ int rows() const { return m_src.rows(); }
+ int cols() const { return m_src.cols(); }
+
+ protected:
+ const Derived& m_src;
+};
+
+template<typename Derived>
+struct ei_traits<MatrixExponentialReturnValue<Derived> >
+{
+ typedef typename Derived::PlainMatrixType ReturnMatrixType;
+};
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Compute the matrix exponential.
+ *
+ * \param[in] M matrix whose exponential is to be computed.
+ * \returns expression representing the matrix exponential of \p M.
+ *
+ * The matrix exponential of \f$ M \f$ is defined by
+ * \f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
+ * The matrix exponential can be used to solve linear ordinary
+ * differential equations: the solution of \f$ y' = My \f$ with the
+ * initial condition \f$ y(0) = y_0 \f$ is given by
+ * \f$ y(t) = \exp(M) y_0 \f$.
+ *
+ * The cost of the computation is approximately \f$ 20 n^3 \f$ for
+ * matrices of size \f$ n \f$. The number 20 depends weakly on the
+ * norm of the matrix.
+ *
+ * The matrix exponential is computed using the scaling-and-squaring
+ * method combined with Pad&eacute; approximation. The matrix is first
+ * rescaled, then the exponential of the reduced matrix is computed
+ * approximant, and then the rescaling is undone by repeated
+ * squaring. The degree of the Pad&eacute; approximant is chosen such
+ * that the approximation error is less than the round-off
+ * error. However, errors may accumulate during the squaring phase.
+ *
+ * Details of the algorithm can be found in: Nicholas J. Higham, "The
+ * scaling and squaring method for the matrix exponential revisited,"
+ * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179&ndash;1193,
+ * 2005.
+ *
+ * Example: The following program checks that
+ * \f[ \exp \left[ \begin{array}{ccc}
+ * 0 & \frac14\pi & 0 \\
+ * -\frac14\pi & 0 & 0 \\
+ * 0 & 0 & 0
+ * \end{array} \right] = \left[ \begin{array}{ccc}
+ * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ * 0 & 0 & 1
+ * \end{array} \right]. \f]
+ * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+ * the z-axis.
+ *
+ * \include MatrixExponential.cpp
+ * Output: \verbinclude MatrixExponential.out
+ *
+ * \note \p M has to be a matrix of \c float, \c double,
+ * \c complex<float> or \c complex<double> .
+ */
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase<Derived> &M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixExponentialReturnValue<Derived>
+ei_matrix_exponential(const MatrixBase<Derived> &M)
{
ei_assert(M.rows() == M.cols());
- MatrixExponential<typename MatrixBase<Derived>::PlainMatrixType>(M, result);
+ return MatrixExponentialReturnValue<Derived>(M.derived());
}
#endif // EIGEN_MATRIX_EXPONENTIAL
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index fbc55507f..d63bcbce9 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -28,66 +28,11 @@
#include "StemFunction.h"
#include "MatrixFunctionAtomic.h"
-/** \ingroup MatrixFunctions_Module
- *
- * \brief Compute a matrix function.
- *
- * \param[in] M argument of matrix function, should be a square matrix.
- * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(M) \f$.
- *
- * This function computes \f$ f(A) \f$ and stores the result in the
- * matrix pointed to by \p result.
- *
- * Suppose that \p M is a matrix whose entries have type \c Scalar.
- * Then, the second argument, \p f, should be a function with prototype
- * \code
- * ComplexScalar f(ComplexScalar, int)
- * \endcode
- * where \c ComplexScalar = \c std::complex<Scalar> if \c Scalar is
- * real (e.g., \c float or \c double) and \c ComplexScalar =
- * \c Scalar if \c Scalar is complex. The return value of \c f(x,n)
- * should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x.
- *
- * This routine uses the algorithm described in:
- * Philip Davies and Nicholas J. Higham,
- * "A Schur-Parlett algorithm for computing matrix functions",
- * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>25</b>:464&ndash;485, 2003.
- *
- * The actual work is done by the MatrixFunction class.
- *
- * Example: The following program checks that
- * \f[ \exp \left[ \begin{array}{ccc}
- * 0 & \frac14\pi & 0 \\
- * -\frac14\pi & 0 & 0 \\
- * 0 & 0 & 0
- * \end{array} \right] = \left[ \begin{array}{ccc}
- * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
- * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
- * 0 & 0 & 1
- * \end{array} \right]. \f]
- * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
- * the z-axis. This is the same example as used in the documentation
- * of ei_matrix_exponential().
- *
- * \include MatrixFunction.cpp
- * Output: \verbinclude MatrixFunction.out
- *
- * Note that the function \c expfn is defined for complex numbers
- * \c x, even though the matrix \c A is over the reals. Instead of
- * \c expfn, we could also have used StdStemFunctions::exp:
- * \code
- * ei_matrix_function(A, StdStemFunctions<std::complex<double> >::exp, &B);
- * \endcode
- */
-template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_function(const MatrixBase<Derived>& M,
- typename ei_stem_function<typename ei_traits<Derived>::Scalar>::type f,
- typename MatrixBase<Derived>::PlainMatrixType* result);
-
-/** \ingroup MatrixFunctions_Module
- * \brief Helper class for computing matrix functions.
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing matrix exponentials.
+ * \tparam MatrixType type of the argument of the matrix function,
+ * expected to be an instantiation of the Matrix class template.
*/
template <typename MatrixType, int IsComplex = NumTraits<typename ei_traits<MatrixType>::Scalar>::IsComplex>
class MatrixFunction
@@ -99,18 +44,26 @@ class MatrixFunction
public:
- /** \brief Constructor. Computes matrix function.
+ /** \brief Constructor.
*
* \param[in] A argument of matrix function, should be a square matrix.
* \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(A) \f$.
*
- * This function computes \f$ f(A) \f$ and stores the result in
- * the matrix pointed to by \p result.
+ * The class stores a reference to \p A, so it should not be
+ * changed (or destroyed) before compute() is called.
+ */
+ MatrixFunction(const MatrixType& A, StemFunction f);
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
*
- * See ei_matrix_function() for details.
+ * See ei_matrix_function() for details on how this computation
+ * is implemented.
*/
- MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result);
+ template <typename ResultType>
+ void compute(ResultType &result);
};
@@ -136,23 +89,36 @@ class MatrixFunction<MatrixType, 0>
public:
- /** \brief Constructor. Computes matrix function.
+ /** \brief Constructor.
*
* \param[in] A argument of matrix function, should be a square matrix.
* \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(A) \f$.
+ */
+ MatrixFunction(const MatrixType& A, StemFunction f) : m_A(A), m_f(f) { }
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
*
* This function converts the real matrix \c A to a complex matrix,
* uses MatrixFunction<MatrixType,1> and then converts the result back to
* a real matrix.
*/
- MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result)
+ template <typename ResultType>
+ void compute(ResultType& result)
{
- ComplexMatrix CA = A.template cast<ComplexScalar>();
+ ComplexMatrix CA = m_A.template cast<ComplexScalar>();
ComplexMatrix Cresult;
- MatrixFunction<ComplexMatrix>(CA, f, &Cresult);
- *result = Cresult.real();
+ MatrixFunction<ComplexMatrix> mf(CA, m_f);
+ mf.compute(Cresult);
+ result = Cresult.real();
}
+
+ private:
+
+ const MatrixType& m_A; /**< \brief Reference to argument of matrix function. */
+ StemFunction *m_f; /**< \brief Stem function for matrix function under consideration */
};
@@ -179,17 +145,12 @@ class MatrixFunction<MatrixType, 1>
public:
- /** \brief Constructor. Computes matrix function.
- *
- * \param[in] A argument of matrix function, should be a square matrix.
- * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
- * \param[out] result pointer to the matrix in which to store the result, \f$ f(A) \f$.
- */
- MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result);
+ MatrixFunction(const MatrixType& A, StemFunction f);
+ template <typename ResultType> void compute(ResultType& result);
private:
- void computeSchurDecomposition(const MatrixType& A);
+ void computeSchurDecomposition();
void partitionEigenvalues();
typename ListOfClusters::iterator findCluster(Scalar key);
void computeClusterSize();
@@ -202,6 +163,7 @@ class MatrixFunction<MatrixType, 1>
void computeOffDiagonal();
DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C);
+ const MatrixType& m_A; /**< \brief Reference to argument of matrix function. */
StemFunction *m_f; /**< \brief Stem function for matrix function under consideration */
MatrixType m_T; /**< \brief Triangular part of Schur decomposition */
MatrixType m_U; /**< \brief Unitary part of Schur decomposition */
@@ -221,11 +183,28 @@ class MatrixFunction<MatrixType, 1>
static const RealScalar separation() { return static_cast<RealScalar>(0.01); }
};
+/** \brief Constructor.
+ *
+ * \param[in] A argument of matrix function, should be a square matrix.
+ * \param[in] f an entire function; \c f(x,n) should compute the n-th derivative of f at x.
+ */
template <typename MatrixType>
-MatrixFunction<MatrixType,1>::MatrixFunction(const MatrixType& A, StemFunction f, MatrixType* result) :
- m_f(f)
+MatrixFunction<MatrixType,1>::MatrixFunction(const MatrixType& A, StemFunction f) :
+ m_A(A), m_f(f)
{
- computeSchurDecomposition(A);
+ /* empty body */
+}
+
+/** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
+ */
+template <typename MatrixType>
+template <typename ResultType>
+void MatrixFunction<MatrixType,1>::compute(ResultType& result)
+{
+ computeSchurDecomposition();
partitionEigenvalues();
computeClusterSize();
computeBlockStart();
@@ -233,14 +212,14 @@ MatrixFunction<MatrixType,1>::MatrixFunction(const MatrixType& A, StemFunction f
permuteSchur();
computeBlockAtomic();
computeOffDiagonal();
- *result = m_U * m_fT * m_U.adjoint();
+ result = m_U * m_fT * m_U.adjoint();
}
-/** \brief Store the Schur decomposition of \p A in #m_T and #m_U */
+/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
template <typename MatrixType>
-void MatrixFunction<MatrixType,1>::computeSchurDecomposition(const MatrixType& A)
+void MatrixFunction<MatrixType,1>::computeSchurDecomposition()
{
- const ComplexSchur<MatrixType> schurOfA(A);
+ const ComplexSchur<MatrixType> schurOfA(m_A);
m_T = schurOfA.matrixT();
m_U = schurOfA.matrixU();
}
@@ -498,23 +477,129 @@ typename MatrixFunction<MatrixType,1>::DynMatrixType MatrixFunction<MatrixType,1
return X;
}
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix function of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix function.
+ *
+ * This class holds the argument to the matrix function until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * ei_matrix_function() and related functions and most of the time
+ * this is the only way it is used.
+ */
+template<typename Derived> class MatrixFunctionReturnValue
+: public ReturnByValue<MatrixFunctionReturnValue<Derived> >
+{
+ private:
+
+ typedef typename ei_traits<Derived>::Scalar Scalar;
+ typedef typename ei_stem_function<Scalar>::type StemFunction;
+
+ public:
+
+ /** \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression) forming the argument of the
+ * matrix function.
+ * \param[in] f Stem function for matrix function under consideration.
+ */
+ MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { }
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result \p f applied to \p A, where \p f and \p A
+ * are as in the constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ const typename ei_eval<Derived>::type Aevaluated = m_A.eval();
+ MatrixFunction<typename Derived::PlainMatrixType> mf(Aevaluated, m_f);
+ mf.compute(result);
+ }
+
+ int rows() const { return m_A.rows(); }
+ int cols() const { return m_A.cols(); }
+ private:
+ const Derived& m_A;
+ StemFunction *m_f;
+};
+
+template<typename Derived>
+struct ei_traits<MatrixFunctionReturnValue<Derived> >
+{
+ typedef typename Derived::PlainMatrixType ReturnMatrixType;
+};
+
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Compute a matrix function.
+ *
+ * \param[in] M argument of matrix function, should be a square matrix.
+ * \param[in] f an entire function; \c f(x,n) should compute the n-th
+ * derivative of f at x.
+ * \returns expression representing \p f applied to \p M.
+ *
+ * Suppose that \p M is a matrix whose entries have type \c Scalar.
+ * Then, the second argument, \p f, should be a function with prototype
+ * \code
+ * ComplexScalar f(ComplexScalar, int)
+ * \endcode
+ * where \c ComplexScalar = \c std::complex<Scalar> if \c Scalar is
+ * real (e.g., \c float or \c double) and \c ComplexScalar =
+ * \c Scalar if \c Scalar is complex. The return value of \c f(x,n)
+ * should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x.
+ *
+ * This routine uses the algorithm described in:
+ * Philip Davies and Nicholas J. Higham,
+ * "A Schur-Parlett algorithm for computing matrix functions",
+ * <em>SIAM J. %Matrix Anal. Applic.</em>, <b>25</b>:464&ndash;485, 2003.
+ *
+ * The actual work is done by the MatrixFunction class.
+ *
+ * Example: The following program checks that
+ * \f[ \exp \left[ \begin{array}{ccc}
+ * 0 & \frac14\pi & 0 \\
+ * -\frac14\pi & 0 & 0 \\
+ * 0 & 0 & 0
+ * \end{array} \right] = \left[ \begin{array}{ccc}
+ * \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ * \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ * 0 & 0 & 1
+ * \end{array} \right]. \f]
+ * This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+ * the z-axis. This is the same example as used in the documentation
+ * of ei_matrix_exponential().
+ *
+ * \include MatrixFunction.cpp
+ * Output: \verbinclude MatrixFunction.out
+ *
+ * Note that the function \c expfn is defined for complex numbers
+ * \c x, even though the matrix \c A is over the reals. Instead of
+ * \c expfn, we could also have used StdStemFunctions::exp:
+ * \code
+ * ei_matrix_function(A, StdStemFunctions<std::complex<double> >::exp, &B);
+ * \endcode
+ */
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_function(const MatrixBase<Derived>& M,
- typename ei_stem_function<typename ei_traits<Derived>::Scalar>::type f,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_function(const MatrixBase<Derived> &M,
+ typename ei_stem_function<typename ei_traits<Derived>::Scalar>::type f)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- MatrixFunction<PlainMatrixType>(M, f, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), f);
}
/** \ingroup MatrixFunctions_Module
*
* \brief Compute the matrix sine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \sin(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \sin(M) \f$.
*
* This function calls ei_matrix_function() with StdStemFunctions::sin().
*
@@ -522,44 +607,42 @@ EIGEN_STRONG_INLINE void ei_matrix_function(const MatrixBase<Derived>& M,
* Output: \verbinclude MatrixSine.out
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_sin(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_sin(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::sin, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::sin);
}
/** \ingroup MatrixFunctions_Module
*
* \brief Compute the matrix cosine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \cos(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \cos(M) \f$.
*
* This function calls ei_matrix_function() with StdStemFunctions::cos().
*
* \sa ei_matrix_sin() for an example.
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_cos(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_cos(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::cos, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::cos);
}
/** \ingroup MatrixFunctions_Module
*
* \brief Compute the matrix hyperbolic sine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \sinh(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \sinh(M) \f$
*
* This function calls ei_matrix_function() with StdStemFunctions::sinh().
*
@@ -567,36 +650,34 @@ EIGEN_STRONG_INLINE void ei_matrix_cos(const MatrixBase<Derived>& M,
* Output: \verbinclude MatrixSinh.out
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_sinh(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_sinh(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::sinh, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::sinh);
}
/** \ingroup MatrixFunctions_Module
*
- * \brief Compute the matrix hyberpolic cosine.
+ * \brief Compute the matrix hyberbolic cosine.
*
- * \param[in] M a square matrix.
- * \param[out] result pointer to matrix in which to store the result, \f$ \cosh(M) \f$
+ * \param[in] M a square matrix.
+ * \returns expression representing \f$ \cosh(M) \f$
*
* This function calls ei_matrix_function() with StdStemFunctions::cosh().
*
* \sa ei_matrix_sinh() for an example.
*/
template <typename Derived>
-EIGEN_STRONG_INLINE void ei_matrix_cosh(const MatrixBase<Derived>& M,
- typename MatrixBase<Derived>::PlainMatrixType* result)
+MatrixFunctionReturnValue<Derived>
+ei_matrix_cosh(const MatrixBase<Derived>& M)
{
ei_assert(M.rows() == M.cols());
- typedef typename MatrixBase<Derived>::PlainMatrixType PlainMatrixType;
- typedef typename ei_traits<PlainMatrixType>::Scalar Scalar;
+ typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_stem_function<Scalar>::ComplexScalar ComplexScalar;
- MatrixFunction<PlainMatrixType>(M, StdStemFunctions<ComplexScalar>::cosh, result);
+ return MatrixFunctionReturnValue<Derived>(M.derived(), StdStemFunctions<ComplexScalar>::cosh);
}
#endif // EIGEN_MATRIX_FUNCTION
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
index d7409371b..4bcae47c0 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
@@ -85,7 +85,7 @@ MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
{
// TODO: Use that A is upper triangular
m_Arows = A.rows();
- m_avgEival = A.trace() / Scalar(m_Arows);
+ m_avgEival = A.trace() / Scalar(RealScalar(m_Arows));
m_Ashifted = A - m_avgEival * MatrixType::Identity(m_Arows, m_Arows);
computeMu();
MatrixType F = m_f(m_avgEival, 0) * MatrixType::Identity(m_Arows, m_Arows);
@@ -94,7 +94,7 @@ MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
for (int s = 1; s < 1.1 * m_Arows + 10; s++) { // upper limit is fairly arbitrary
Fincr = m_f(m_avgEival, s) * P;
F += Fincr;
- P = (1/(s + 1.0)) * P * m_Ashifted;
+ P = Scalar(RealScalar(1.0/(s + 1))) * P * m_Ashifted;
if (taylorConverged(s, F, Fincr, P)) {
return F;
}
@@ -121,19 +121,19 @@ bool MatrixFunctionAtomic<MatrixType>::taylorConverged(int s, const MatrixType&
const int n = F.rows();
const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
- if (Fincr_norm < epsilon<Scalar>() * F_norm) {
+ if (Fincr_norm < NumTraits<Scalar>::epsilon() * F_norm) {
RealScalar delta = 0;
RealScalar rfactorial = 1;
for (int r = 0; r < n; r++) {
RealScalar mx = 0;
for (int i = 0; i < n; i++)
- mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, s+r)));
- if (r != 0)
- rfactorial *= r;
+ mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, s+r)));
+ if (r != 0)
+ rfactorial *= RealScalar(r);
delta = std::max(delta, mx / rfactorial);
}
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
- if (m_mu * delta * P_norm < epsilon<Scalar>() * F_norm)
+ if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
return true;
}
return false;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index 3bf3d12ea..0754a426b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -28,6 +28,19 @@
#ifndef EIGEN_HYBRIDNONLINEARSOLVER_H
#define EIGEN_HYBRIDNONLINEARSOLVER_H
+namespace HybridNonLinearSolverSpace {
+ enum Status {
+ Running = -1,
+ ImproperInputParameters = 0,
+ RelativeErrorTooSmall = 1,
+ TooManyFunctionEvaluation = 2,
+ TolTooSmall = 3,
+ NotMakingProgressJacobian = 4,
+ NotMakingProgressIterations = 5,
+ UserAksed = 6
+ };
+}
+
/**
* \ingroup NonLinearOptimization_Module
* \brief Finds a zero of a system of n
@@ -46,22 +59,11 @@ public:
HybridNonLinearSolver(FunctorType &_functor)
: functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; }
- enum Status {
- Running = -1,
- ImproperInputParameters = 0,
- RelativeErrorTooSmall = 1,
- TooManyFunctionEvaluation = 2,
- TolTooSmall = 3,
- NotMakingProgressJacobian = 4,
- NotMakingProgressIterations = 5,
- UserAksed = 6
- };
-
struct Parameters {
Parameters()
: factor(Scalar(100.))
, maxfev(1000)
- , xtol(ei_sqrt(epsilon<Scalar>()))
+ , xtol(ei_sqrt(NumTraits<Scalar>::epsilon()))
, nb_of_subdiagonals(-1)
, nb_of_superdiagonals(-1)
, epsfcn(Scalar(0.)) {}
@@ -74,47 +76,50 @@ public:
};
typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType;
+ /* TODO: if eigen provides a triangular storage, use it here */
+ typedef Matrix< Scalar, Dynamic, Dynamic > UpperTriangularType;
- Status hybrj1(
+ HybridNonLinearSolverSpace::Status hybrj1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status solveInit(
+ HybridNonLinearSolverSpace::Status solveInit(
FVectorType &x,
const int mode=1
);
- Status solveOneStep(
+ HybridNonLinearSolverSpace::Status solveOneStep(
FVectorType &x,
const int mode=1
);
- Status solve(
+ HybridNonLinearSolverSpace::Status solve(
FVectorType &x,
const int mode=1
);
- Status hybrd1(
+ HybridNonLinearSolverSpace::Status hybrd1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status solveNumericalDiffInit(
+ HybridNonLinearSolverSpace::Status solveNumericalDiffInit(
FVectorType &x,
const int mode=1
);
- Status solveNumericalDiffOneStep(
+ HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(
FVectorType &x,
const int mode=1
);
- Status solveNumericalDiff(
+ HybridNonLinearSolverSpace::Status solveNumericalDiff(
FVectorType &x,
const int mode=1
);
void resetParameters(void) { parameters = Parameters(); }
Parameters parameters;
- FVectorType fvec, R, qtf, diag;
+ FVectorType fvec, qtf, diag;
JacobianType fjac;
+ UpperTriangularType R;
int nfev;
int njev;
int iter;
@@ -139,7 +144,7 @@ private:
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
FVectorType &x,
const Scalar tol
@@ -149,7 +154,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
/* check the input parameters for errors. */
if (n <= 0 || tol < 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
resetParameters();
parameters.maxfev = 100*(n+1);
@@ -162,7 +167,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
FVectorType &x,
const int mode
@@ -173,7 +178,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n);
fvec.resize(n);
qtf.resize(n);
- R.resize( (n*(n+1))/2);
fjac.resize(n, n);
if (mode != 2)
diag.resize(n);
@@ -184,47 +188,45 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. )
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
fnorm = fvec.stableNorm();
/* initialize iteration counter and monitors. */
-
iter = 1;
ncsuc = 0;
ncfail = 0;
nslow1 = 0;
nslow2 = 0;
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l, iwa[1];
+ int j;
+ std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
+
jeval = true;
/* calculate the jacobian matrix. */
-
if ( functor.df(x, fjac) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
++njev;
wa2 = fjac.colwise().blueNorm();
@@ -233,118 +235,71 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* compute the qr factorization of the jacobian. */
- ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, wa1.data());
-
- /* form (q transpose)*fvec and store in qtf. */
-
- qtf = fvec;
- for (j = 0; j < n; ++j)
- if (fjac(j,j) != 0.) {
- sum = 0.;
- for (i = j; i < n; ++i)
- sum += fjac(i,j) * qtf[i];
- temp = -sum / fjac(j,j);
- for (i = j; i < n; ++i)
- qtf[i] += fjac(i,j) * temp;
- }
+ wa2 = fjac.colwise().blueNorm();
+ HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
/* copy the triangular factor of the qr factorization into r. */
-
- sing = false;
- for (j = 0; j < n; ++j) {
- l = j;
- for (i = 0; i < j; ++i) {
- R[l] = fjac(i,j);
- l = l + n - i -1;
- }
- R[l] = wa1[j];
- if (wa1[j] == 0.)
- sing = true;
- }
+ R = qrfac.matrixQR();
/* accumulate the orthogonal factor in fjac. */
- ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
+ fjac = qrfac.householderQ();
- /* rescale if necessary. */
+ /* form (q transpose)*fvec and store in qtf. */
+ qtf = fjac.transpose() * fvec;
- /* Computing MAX */
+ /* rescale if necessary. */
if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
-
while (true) {
-
/* determine the direction p. */
-
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
-
- l = 0;
- for (i = 0; i < n; ++i) {
- sum = 0.;
- for (j = i; j < n; ++j) {
- sum += R[l] * wa1[j];
- ++l;
- }
- wa3[i] = qtf[i] + sum;
- }
+ wa3 = R.template triangularView<Upper>()*wa1 + qtf;
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
- /* compute the ratio of the actual to the predicted */
- /* reduction. */
-
+ /* compute the ratio of the actual to the predicted reduction. */
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
@@ -352,7 +307,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
} else {
ncfail = 0;
++ncsuc;
- if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
+ if (ratio >= Scalar(.5) || ncsuc > 1)
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
@@ -360,7 +315,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -372,7 +326,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
}
/* determine the progress of the iteration. */
-
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
@@ -382,62 +335,50 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
nslow2 = 0;
/* test for convergence. */
-
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
- return RelativeErrorTooSmall;
+ return HybridNonLinearSolverSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
- return TolTooSmall;
+ return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
+ if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
- return NotMakingProgressJacobian;
+ return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
if (nslow1 == 10)
- return NotMakingProgressIterations;
+ return HybridNonLinearSolverSpace::NotMakingProgressIterations;
/* criterion for recalculating jacobian. */
-
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
-
- for (j = 0; j < n; ++j) {
- sum = wa4.dot(fjac.col(j));
- wa2[j] = (sum - wa3[j]) / pnorm;
- wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
- if (ratio >= Scalar(1e-4))
- qtf[j] = sum;
- }
+ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm );
+ wa2 = fjac.transpose() * wa4;
+ if (ratio >= Scalar(1e-4))
+ qtf = wa2;
+ wa2 = (wa2-wa3)/pnorm;
/* compute the qr factorization of the updated jacobian. */
-
- ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
- ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
- ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
-
- /* end of the inner loop. */
+ ei_r1updt<Scalar>(R, wa1, v_givens, w_givens, wa2, wa3, &sing);
+ ei_r1mpyq<Scalar>(n, n, fjac.data(), v_givens, w_givens);
+ ei_r1mpyq<Scalar>(1, n, qtf.data(), v_givens, w_givens);
jeval = false;
}
- /* end of the outer loop. */
-
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
-
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solve(
FVectorType &x,
const int mode
)
{
- Status status = solveInit(x, mode);
- while (status==Running)
+ HybridNonLinearSolverSpace::Status status = solveInit(x, mode);
+ while (status==HybridNonLinearSolverSpace::Running)
status = solveOneStep(x, mode);
return status;
}
@@ -445,7 +386,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solve(
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
FVectorType &x,
const Scalar tol
@@ -455,7 +396,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
/* check the input parameters for errors. */
if (n <= 0 || tol < 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
resetParameters();
parameters.maxfev = 200*(n+1);
@@ -469,7 +410,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
FVectorType &x,
const int mode
@@ -482,7 +423,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n);
qtf.resize(n);
- R.resize( (n*(n+1))/2);
fjac.resize(n, n);
fvec.resize(n);
if (mode != 2)
@@ -491,54 +431,51 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
/* Function Body */
-
nfev = 0;
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. )
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
fnorm = fvec.stableNorm();
/* initialize iteration counter and monitors. */
-
iter = 1;
ncsuc = 0;
ncfail = 0;
nslow1 = 0;
nslow2 = 0;
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l, iwa[1];
+ int j;
+ std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
+
jeval = true;
if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1;
if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1;
/* calculate the jacobian matrix. */
-
if (ei_fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
nfev += std::min(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
wa2 = fjac.colwise().blueNorm();
@@ -547,118 +484,71 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* compute the qr factorization of the jacobian. */
- ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, wa1.data());
-
- /* form (q transpose)*fvec and store in qtf. */
-
- qtf = fvec;
- for (j = 0; j < n; ++j)
- if (fjac(j,j) != 0.) {
- sum = 0.;
- for (i = j; i < n; ++i)
- sum += fjac(i,j) * qtf[i];
- temp = -sum / fjac(j,j);
- for (i = j; i < n; ++i)
- qtf[i] += fjac(i,j) * temp;
- }
+ wa2 = fjac.colwise().blueNorm();
+ HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
/* copy the triangular factor of the qr factorization into r. */
-
- sing = false;
- for (j = 0; j < n; ++j) {
- l = j;
- for (i = 0; i < j; ++i) {
- R[l] = fjac(i,j);
- l = l + n - i -1;
- }
- R[l] = wa1[j];
- if (wa1[j] == 0.)
- sing = true;
- }
+ R = qrfac.matrixQR();
/* accumulate the orthogonal factor in fjac. */
- ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
+ fjac = qrfac.householderQ();
- /* rescale if necessary. */
+ /* form (q transpose)*fvec and store in qtf. */
+ qtf = fjac.transpose() * fvec;
- /* Computing MAX */
+ /* rescale if necessary. */
if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
-
while (true) {
-
/* determine the direction p. */
-
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAksed;
+ return HybridNonLinearSolverSpace::UserAksed;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
-
- l = 0;
- for (i = 0; i < n; ++i) {
- sum = 0.;
- for (j = i; j < n; ++j) {
- sum += R[l] * wa1[j];
- ++l;
- }
- wa3[i] = qtf[i] + sum;
- }
+ wa3 = R.template triangularView<Upper>()*wa1 + qtf;
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
- /* compute the ratio of the actual to the predicted */
- /* reduction. */
-
+ /* compute the ratio of the actual to the predicted reduction. */
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
@@ -666,7 +556,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
} else {
ncfail = 0;
++ncsuc;
- if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
+ if (ratio >= Scalar(.5) || ncsuc > 1)
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
@@ -674,7 +564,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -686,7 +575,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
}
/* determine the progress of the iteration. */
-
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
@@ -696,62 +584,50 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
nslow2 = 0;
/* test for convergence. */
-
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
- return RelativeErrorTooSmall;
+ return HybridNonLinearSolverSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
- return TolTooSmall;
+ return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
+ if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ return HybridNonLinearSolverSpace::TolTooSmall;
if (nslow2 == 5)
- return NotMakingProgressJacobian;
+ return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
if (nslow1 == 10)
- return NotMakingProgressIterations;
-
- /* criterion for recalculating jacobian approximation */
- /* by forward differences. */
+ return HybridNonLinearSolverSpace::NotMakingProgressIterations;
+ /* criterion for recalculating jacobian. */
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
-
- for (j = 0; j < n; ++j) {
- sum = wa4.dot(fjac.col(j));
- wa2[j] = (sum - wa3[j]) / pnorm;
- wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
- if (ratio >= Scalar(1e-4))
- qtf[j] = sum;
- }
+ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm );
+ wa2 = fjac.transpose() * wa4;
+ if (ratio >= Scalar(1e-4))
+ qtf = wa2;
+ wa2 = (wa2-wa3)/pnorm;
/* compute the qr factorization of the updated jacobian. */
-
- ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
- ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
- ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
-
- /* end of the inner loop. */
+ ei_r1updt<Scalar>(R, wa1, v_givens, w_givens, wa2, wa3, &sing);
+ ei_r1mpyq<Scalar>(n, n, fjac.data(), v_givens, w_givens);
+ ei_r1mpyq<Scalar>(1, n, qtf.data(), v_givens, w_givens);
jeval = false;
}
- /* end of the outer loop. */
-
- return Running;
+ return HybridNonLinearSolverSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename HybridNonLinearSolver<FunctorType,Scalar>::Status
+HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
FVectorType &x,
const int mode
)
{
- Status status = solveNumericalDiffInit(x, mode);
- while (status==Running)
+ HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x, mode);
+ while (status==HybridNonLinearSolverSpace::Running)
status = solveNumericalDiffOneStep(x, mode);
return status;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index 8df48d2ab..f21331e3e 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -28,21 +28,8 @@
#ifndef EIGEN_LEVENBERGMARQUARDT__H
#define EIGEN_LEVENBERGMARQUARDT__H
-/**
- * \ingroup NonLinearOptimization_Module
- * \brief Performs non linear optimization over a non-linear function,
- * using a variant of the Levenberg Marquardt algorithm.
- *
- * Check wikipedia for more information.
- * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
- */
-template<typename FunctorType, typename Scalar=double>
-class LevenbergMarquardt
-{
-public:
- LevenbergMarquardt(FunctorType &_functor)
- : functor(_functor) { nfev = njev = iter = 0; fnorm=gnorm = 0.; }
+namespace LevenbergMarquardtSpace {
enum Status {
NotStarted = -2,
Running = -1,
@@ -57,13 +44,31 @@ public:
GtolTooSmall = 8,
UserAsked = 9
};
+}
+
+
+
+/**
+ * \ingroup NonLinearOptimization_Module
+ * \brief Performs non linear optimization over a non-linear function,
+ * using a variant of the Levenberg Marquardt algorithm.
+ *
+ * Check wikipedia for more information.
+ * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
+ */
+template<typename FunctorType, typename Scalar=double>
+class LevenbergMarquardt
+{
+public:
+ LevenbergMarquardt(FunctorType &_functor)
+ : functor(_functor) { nfev = njev = iter = 0; fnorm=gnorm = 0.; }
struct Parameters {
Parameters()
: factor(Scalar(100.))
, maxfev(400)
- , ftol(ei_sqrt(epsilon<Scalar>()))
- , xtol(ei_sqrt(epsilon<Scalar>()))
+ , ftol(ei_sqrt(NumTraits<Scalar>::epsilon()))
+ , xtol(ei_sqrt(NumTraits<Scalar>::epsilon()))
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
@@ -77,45 +82,45 @@ public:
typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType;
- Status lmder1(
+ LevenbergMarquardtSpace::Status lmder1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status minimize(
+ LevenbergMarquardtSpace::Status minimize(
FVectorType &x,
const int mode=1
);
- Status minimizeInit(
+ LevenbergMarquardtSpace::Status minimizeInit(
FVectorType &x,
const int mode=1
);
- Status minimizeOneStep(
+ LevenbergMarquardtSpace::Status minimizeOneStep(
FVectorType &x,
const int mode=1
);
- static Status lmdif1(
+ static LevenbergMarquardtSpace::Status lmdif1(
FunctorType &functor,
FVectorType &x,
int *nfev,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status lmstr1(
+ LevenbergMarquardtSpace::Status lmstr1(
FVectorType &x,
- const Scalar tol = ei_sqrt(epsilon<Scalar>())
+ const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
);
- Status minimizeOptimumStorage(
+ LevenbergMarquardtSpace::Status minimizeOptimumStorage(
FVectorType &x,
const int mode=1
);
- Status minimizeOptimumStorageInit(
+ LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(
FVectorType &x,
const int mode=1
);
- Status minimizeOptimumStorageOneStep(
+ LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(
FVectorType &x,
const int mode=1
);
@@ -125,7 +130,7 @@ public:
Parameters parameters;
FVectorType fvec, qtf, diag;
JacobianType fjac;
- VectorXi ipvt;
+ PermutationMatrix<Dynamic,Dynamic> permutation;
int nfev;
int njev;
int iter;
@@ -146,7 +151,7 @@ private:
};
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::lmder1(
FVectorType &x,
const Scalar tol
@@ -157,7 +162,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmder1(
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
resetParameters();
parameters.ftol = tol;
@@ -169,21 +174,21 @@ LevenbergMarquardt<FunctorType,Scalar>::lmder1(
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimize(
FVectorType &x,
const int mode
)
{
- Status status = minimizeInit(x, mode);
+ LevenbergMarquardtSpace::Status status = minimizeInit(x, mode);
do {
status = minimizeOneStep(x, mode);
- } while (status==Running);
+ } while (status==LevenbergMarquardtSpace::Running);
return status;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
FVectorType &x,
const int mode
@@ -195,7 +200,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n);
wa4.resize(m);
fvec.resize(m);
- ipvt.resize(n);
fjac.resize(m, n);
if (mode != 2)
diag.resize(n);
@@ -207,72 +211,62 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */
-
par = 0.;
iter = 1;
- return NotStarted;
+ return LevenbergMarquardtSpace::NotStarted;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l;
+ int j;
/* calculate the jacobian matrix. */
-
int df_ret = functor.df(x, fjac);
if (df_ret<0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
if (df_ret>0)
// numerical diff, we evaluated the function df_ret times
nfev += df_ret;
else njev++;
/* compute the qr factorization of the jacobian. */
-
wa2 = fjac.colwise().blueNorm();
- ei_qrfac<Scalar>(m, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
- ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
+ ColPivHouseholderQR<JacobianType> qrfac(fjac);
+ fjac = qrfac.matrixQR();
+ permutation = qrfac.colsPermutation();
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
-
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.)? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
-
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
@@ -280,103 +274,65 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
/* form (q transpose)*fvec and store the first n components in */
/* qtf. */
-
wa4 = fvec;
- for (j = 0; j < n; ++j) {
- if (fjac(j,j) != 0.) {
- sum = 0.;
- for (i = j; i < m; ++i)
- sum += fjac(i,j) * wa4[i];
- temp = -sum / fjac(j,j);
- for (i = j; i < m; ++i)
- wa4[i] += fjac(i,j) * temp;
- }
- fjac(j,j) = wa1[j];
- qtf[j] = wa4[j];
- }
+ wa4.applyOnTheLeft(qrfac.householderQ().adjoint());
+ qtf = wa4.head(n);
/* compute the norm of the scaled gradient. */
-
gnorm = 0.;
if (fnorm != 0.)
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- if (wa2[l] != 0.) {
- sum = 0.;
- for (i = 0; i <= j; ++i)
- sum += fjac(i,j) * (qtf[i] / fnorm);
- /* Computing MAX */
- gnorm = std::max(gnorm, ei_abs(sum / wa2[l]));
- }
- }
+ for (j = 0; j < n; ++j)
+ if (wa2[permutation.indices()[j]] != 0.)
+ gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
-
if (gnorm <= parameters.gtol)
- return CosinusTooSmall;
+ return LevenbergMarquardtSpace::CosinusTooSmall;
/* rescale if necessary. */
-
- if (mode != 2) /* Computing MAX */
+ if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
do {
/* determine the levenberg-marquardt parameter. */
-
- ei_lmpar<Scalar>(fjac, ipvt, diag, qtf, delta, par, wa1);
+ ei_lmpar2<Scalar>(qrfac, diag, qtf, delta, par, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
- if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
+ if (Scalar(.1) * fnorm1 < fnorm)
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */
/* the scaled directional derivative. */
-
- wa3.fill(0.);
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- temp = wa1[l];
- for (i = 0; i <= j; ++i)
- wa3[i] += fjac(i,j) * temp;
- }
+ wa3 = fjac.template triangularView<Upper>() * (qrfac.colsPermutation().inverse() *wa1);
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
- /* Computing 2nd power */
prered = temp1 + temp2 / Scalar(.5);
dirder = -(temp1 + temp2);
/* compute the ratio of the actual to the predicted */
/* reduction. */
-
ratio = 0.;
if (prered != 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio <= Scalar(.25)) {
if (actred >= 0.)
temp = Scalar(.5);
@@ -393,7 +349,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -405,32 +360,30 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
}
/* tests for convergence. */
-
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
- return RelativeErrorAndReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
- return RelativeReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
if (delta <= parameters.xtol * xnorm)
- return RelativeErrorTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
- return FtolTooSmall;
- if (delta <= epsilon<Scalar>() * xnorm)
- return XtolTooSmall;
- if (gnorm <= epsilon<Scalar>())
- return GtolTooSmall;
- /* end of the inner loop. repeat if iteration unsuccessful. */
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ if (ei_abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ if (gnorm <= NumTraits<Scalar>::epsilon())
+ return LevenbergMarquardtSpace::GtolTooSmall;
+
} while (ratio < Scalar(1e-4));
- /* end of the outer loop. */
- return Running;
+
+ return LevenbergMarquardtSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
FVectorType &x,
const Scalar tol
@@ -441,7 +394,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
resetParameters();
parameters.ftol = tol;
@@ -452,7 +405,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
FVectorType &x,
const int mode
@@ -464,8 +417,12 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
wa1.resize(n); wa2.resize(n); wa3.resize(n);
wa4.resize(m);
fvec.resize(m);
- ipvt.resize(n);
- fjac.resize(m, n);
+ // Only R is stored in fjac. Q is only used to compute 'qtf', which is
+ // Q.transpose()*rhs. qtf will be updated using givens rotation,
+ // instead of storing them in Q.
+ // The purpose it to only use a nxn matrix, instead of mxn here, so
+ // that we can handle cases where m>>n :
+ fjac.resize(n, n);
if (mode != 2)
diag.resize(n);
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
@@ -476,74 +433,74 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
njev = 0;
/* check the input parameters for errors. */
-
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
if (mode == 2)
for (int j = 0; j < n; ++j)
if (diag[j] <= 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
/* evaluate the function at the starting point */
/* and calculate its norm. */
-
nfev = 1;
if ( functor(x, fvec) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */
-
par = 0.;
iter = 1;
- return NotStarted;
+ return LevenbergMarquardtSpace::NotStarted;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
FVectorType &x,
const int mode
)
{
- int i, j, l;
+ int i, j;
bool sing;
/* compute the qr factorization of the jacobian matrix */
/* calculated one row at a time, while simultaneously */
/* forming (q transpose)*fvec and storing the first */
/* n components in qtf. */
-
qtf.fill(0.);
fjac.fill(0.);
int rownb = 2;
for (i = 0; i < m; ++i) {
- if (functor.df(x, wa3, rownb) < 0) return UserAsked;
- temp = fvec[i];
- ei_rwupdt<Scalar>(n, fjac.data(), fjac.rows(), wa3.data(), qtf.data(), &temp, wa1.data(), wa2.data());
+ if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked;
+ ei_rwupdt<Scalar>(fjac, wa3, qtf, fvec[i]);
++rownb;
}
++njev;
/* if the jacobian is rank deficient, call qrfac to */
/* reorder its columns and update the components of qtf. */
-
sing = false;
for (j = 0; j < n; ++j) {
- if (fjac(j,j) == 0.) {
+ if (fjac(j,j) == 0.)
sing = true;
- }
- ipvt[j] = j;
wa2[j] = fjac.col(j).head(j).stableNorm();
}
+ permutation.setIdentity(n);
if (sing) {
- ipvt.array() += 1;
wa2 = fjac.colwise().blueNorm();
- ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
- ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
+ // TODO We have no unit test covering this code path, do not modify
+ // until it is carefully tested
+ ColPivHouseholderQR<JacobianType> qrfac(fjac);
+ fjac = qrfac.matrixQR();
+ wa1 = fjac.diagonal();
+ fjac.diagonal() = qrfac.hCoeffs();
+ permutation = qrfac.colsPermutation();
+ // TODO : avoid this:
+ for(int ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
+
for (j = 0; j < n; ++j) {
if (fjac(j,j) != 0.) {
sum = 0.;
@@ -559,107 +516,74 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
-
if (iter == 1) {
if (mode != 2)
- for (j = 0; j < n; ++j) {
- diag[j] = wa2[j];
- if (wa2[j] == 0.)
- diag[j] = 1.;
- }
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.)? 1. : wa2[j];
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
-
- wa3 = diag.cwiseProduct(x);
- xnorm = wa3.stableNorm();
+ xnorm = diag.cwiseProduct(x).stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* compute the norm of the scaled gradient. */
-
gnorm = 0.;
if (fnorm != 0.)
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- if (wa2[l] != 0.) {
- sum = 0.;
- for (i = 0; i <= j; ++i)
- sum += fjac(i,j) * (qtf[i] / fnorm);
- /* Computing MAX */
- gnorm = std::max(gnorm, ei_abs(sum / wa2[l]));
- }
- }
+ for (j = 0; j < n; ++j)
+ if (wa2[permutation.indices()[j]] != 0.)
+ gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
-
if (gnorm <= parameters.gtol)
- return CosinusTooSmall;
+ return LevenbergMarquardtSpace::CosinusTooSmall;
/* rescale if necessary. */
-
- if (mode != 2) /* Computing MAX */
+ if (mode != 2)
diag = diag.cwiseMax(wa2);
- /* beginning of the inner loop. */
do {
/* determine the levenberg-marquardt parameter. */
-
- ei_lmpar<Scalar>(fjac, ipvt, diag, qtf, delta, par, wa1);
+ ei_lmpar<Scalar>(fjac, permutation.indices(), diag, qtf, delta, par, wa1);
/* store the direction p and x + p. calculate the norm of p. */
-
wa1 = -wa1;
wa2 = x + wa1;
- wa3 = diag.cwiseProduct(wa1);
- pnorm = wa3.stableNorm();
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
/* on the first iteration, adjust the initial step bound. */
-
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
-
if ( functor(wa2, wa4) < 0)
- return UserAsked;
+ return LevenbergMarquardtSpace::UserAsked;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
-
actred = -1.;
- if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
+ if (Scalar(.1) * fnorm1 < fnorm)
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */
/* the scaled directional derivative. */
-
- wa3.fill(0.);
- for (j = 0; j < n; ++j) {
- l = ipvt[j];
- temp = wa1[l];
- for (i = 0; i <= j; ++i)
- wa3[i] += fjac(i,j) * temp;
- }
+ wa3 = fjac.corner(TopLeft,n,n).template triangularView<Upper>() * (permutation.inverse() * wa1);
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
- /* Computing 2nd power */
prered = temp1 + temp2 / Scalar(.5);
dirder = -(temp1 + temp2);
/* compute the ratio of the actual to the predicted */
/* reduction. */
-
ratio = 0.;
if (prered != 0.)
ratio = actred / prered;
/* update the step bound. */
-
if (ratio <= Scalar(.25)) {
if (actred >= 0.)
temp = Scalar(.5);
@@ -676,7 +600,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
}
/* test for successful iteration. */
-
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
@@ -688,46 +611,44 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
}
/* tests for convergence. */
-
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
- return RelativeErrorAndReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
- return RelativeReductionTooSmall;
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
if (delta <= parameters.xtol * xnorm)
- return RelativeErrorTooSmall;
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
-
if (nfev >= parameters.maxfev)
- return TooManyFunctionEvaluation;
- if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
- return FtolTooSmall;
- if (delta <= epsilon<Scalar>() * xnorm)
- return XtolTooSmall;
- if (gnorm <= epsilon<Scalar>())
- return GtolTooSmall;
- /* end of the inner loop. repeat if iteration unsuccessful. */
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ if (ei_abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ if (gnorm <= NumTraits<Scalar>::epsilon())
+ return LevenbergMarquardtSpace::GtolTooSmall;
+
} while (ratio < Scalar(1e-4));
- /* end of the outer loop. */
- return Running;
+
+ return LevenbergMarquardtSpace::Running;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
FVectorType &x,
const int mode
)
{
- Status status = minimizeOptimumStorageInit(x, mode);
+ LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x, mode);
do {
status = minimizeOptimumStorageOneStep(x, mode);
- } while (status==Running);
+ } while (status==LevenbergMarquardtSpace::Running);
return status;
}
template<typename FunctorType, typename Scalar>
-typename LevenbergMarquardt<FunctorType,Scalar>::Status
+LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
FunctorType &functor,
FVectorType &x,
@@ -740,7 +661,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
/* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.)
- return ImproperInputParameters;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
NumericalDiff<FunctorType> numDiff(functor);
// embedded LevenbergMarquardt
@@ -749,7 +670,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
lm.parameters.xtol = tol;
lm.parameters.maxfev = 200*(n+1);
- Status info = Status(lm.minimize(x));
+ LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x));
if (nfev)
* nfev = lm.nfev;
return info;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
index d5fca9c62..591e8bef7 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/chkder.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
@@ -4,27 +4,26 @@
template<typename Scalar>
void ei_chkder(
- Matrix< Scalar, Dynamic, 1 > &x,
- Matrix< Scalar, Dynamic, 1 > &fvec,
- Matrix< Scalar, Dynamic, Dynamic > &fjac,
+ const Matrix< Scalar, Dynamic, 1 > &x,
+ const Matrix< Scalar, Dynamic, 1 > &fvec,
+ const Matrix< Scalar, Dynamic, Dynamic > &fjac,
Matrix< Scalar, Dynamic, 1 > &xp,
- Matrix< Scalar, Dynamic, 1 > &fvecp,
+ const Matrix< Scalar, Dynamic, 1 > &fvecp,
int mode,
Matrix< Scalar, Dynamic, 1 > &err
)
{
- const Scalar eps = ei_sqrt(epsilon<Scalar>());
- const Scalar epsf = chkder_factor * epsilon<Scalar>();
+ const Scalar eps = ei_sqrt(NumTraits<Scalar>::epsilon());
+ const Scalar epsf = chkder_factor * NumTraits<Scalar>::epsilon();
const Scalar epslog = chkder_log10e * ei_log(eps);
Scalar temp;
- int i,j;
const int m = fvec.size(), n = x.size();
if (mode != 2) {
+ /* mode = 1. */
xp.resize(n);
- /* mode = 1. */
- for (j = 0; j < n; ++j) {
+ for (int j = 0; j < n; ++j) {
temp = eps * ei_abs(x[j]);
if (temp == 0.)
temp = eps;
@@ -32,24 +31,24 @@ void ei_chkder(
}
}
else {
- /* mode = 2. */
+ /* mode = 2. */
err.setZero(m);
- for (j = 0; j < n; ++j) {
+ for (int j = 0; j < n; ++j) {
temp = ei_abs(x[j]);
if (temp == 0.)
temp = 1.;
err += temp * fjac.col(j);
}
- for (i = 0; i < m; ++i) {
+ for (int i = 0; i < m; ++i) {
temp = 1.;
if (fvec[i] != 0. && fvecp[i] != 0. && ei_abs(fvecp[i] - fvec[i]) >= epsf * ei_abs(fvec[i]))
temp = eps * ei_abs((fvecp[i] - fvec[i]) / eps - err[i]) / (ei_abs(fvec[i]) + ei_abs(fvecp[i]));
err[i] = 1.;
- if (temp > epsilon<Scalar>() && temp < eps)
+ if (temp > NumTraits<Scalar>::epsilon() && temp < eps)
err[i] = (chkder_log10e * ei_log(temp) - epslog) / epslog;
if (temp >= eps)
err[i] = 0.;
}
}
-} /* chkder_ */
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/covar.h b/unsupported/Eigen/src/NonLinearOptimization/covar.h
index 2e495cd7f..2f983a958 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/covar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/covar.h
@@ -1,9 +1,9 @@
- template <typename Scalar>
+template <typename Scalar>
void ei_covar(
Matrix< Scalar, Dynamic, Dynamic > &r,
const VectorXi &ipvt,
- Scalar tol = ei_sqrt(epsilon<Scalar>()) )
+ Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon()) )
{
/* Local variables */
int i, j, k, l, ii, jj;
@@ -16,8 +16,7 @@ void ei_covar(
Matrix< Scalar, Dynamic, 1 > wa(n);
assert(ipvt.size()==n);
- /* form the inverse of r in the full upper triangle of r. */
-
+ /* form the inverse of r in the full upper triangle of r. */
l = -1;
for (k = 0; k < n; ++k)
if (ei_abs(r(k,k)) > tolr) {
@@ -25,29 +24,21 @@ void ei_covar(
for (j = 0; j <= k-1; ++j) {
temp = r(k,k) * r(j,k);
r(j,k) = 0.;
- for (i = 0; i <= j; ++i)
- r(i,k) -= temp * r(i,j);
+ r.col(k).head(j+1) -= r.col(j).head(j+1) * temp;
}
l = k;
}
- /* form the full upper triangle of the inverse of (r transpose)*r */
- /* in the full upper triangle of r. */
-
+ /* form the full upper triangle of the inverse of (r transpose)*r */
+ /* in the full upper triangle of r. */
for (k = 0; k <= l; ++k) {
- for (j = 0; j <= k-1; ++j) {
- temp = r(j,k);
- for (i = 0; i <= j; ++i)
- r(i,j) += temp * r(i,k);
- }
- temp = r(k,k);
- for (i = 0; i <= k; ++i)
- r(i,k) = temp * r(i,k);
+ for (j = 0; j <= k-1; ++j)
+ r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k);
+ r.col(k).head(k+1) *= r(k,k);
}
- /* form the full lower triangle of the covariance matrix */
- /* in the strict lower triangle of r and in wa. */
-
+ /* form the full lower triangle of the covariance matrix */
+ /* in the strict lower triangle of r and in wa. */
for (j = 0; j < n; ++j) {
jj = ipvt[j];
sing = j > l;
@@ -63,12 +54,8 @@ void ei_covar(
wa[jj] = r(j,j);
}
- /* symmetrize the covariance matrix in r. */
-
- for (j = 0; j < n; ++j) {
- for (i = 0; i <= j; ++i)
- r(i,j) = r(j,i);
- r(j,j) = wa[j];
- }
+ /* symmetrize the covariance matrix in r. */
+ r.corner(TopLeft,n,n).template triangularView<StrictlyUpper>() = r.corner(TopLeft,n,n).transpose();
+ r.diagonal() = wa;
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
index dd6b39cb4..9c1d38dea 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -1,76 +1,58 @@
template <typename Scalar>
void ei_dogleg(
- Matrix< Scalar, Dynamic, 1 > &r,
+ const Matrix< Scalar, Dynamic, Dynamic > &qrfac,
const Matrix< Scalar, Dynamic, 1 > &diag,
const Matrix< Scalar, Dynamic, 1 > &qtb,
Scalar delta,
Matrix< Scalar, Dynamic, 1 > &x)
{
/* Local variables */
- int i, j, k, l, jj;
+ int i, j;
Scalar sum, temp, alpha, bnorm;
Scalar gnorm, qnorm;
Scalar sgnorm;
/* Function Body */
- const Scalar epsmch = epsilon<Scalar>();
- const int n = diag.size();
- Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n);
+ const Scalar epsmch = NumTraits<Scalar>::epsilon();
+ const int n = qrfac.cols();
assert(n==qtb.size());
assert(n==x.size());
+ assert(n==diag.size());
+ Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n);
/* first, calculate the gauss-newton direction. */
-
- jj = n * (n + 1) / 2;
- for (k = 0; k < n; ++k) {
- j = n - k - 1;
- jj -= k+1;
- l = jj + 1;
- sum = 0.;
- for (i = j+1; i < n; ++i) {
- sum += r[l] * x[i];
- ++l;
- }
- temp = r[jj];
+ for (j = n-1; j >=0; --j) {
+ temp = qrfac(j,j);
if (temp == 0.) {
- l = j;
- for (i = 0; i <= j; ++i) {
- /* Computing MAX */
- temp = std::max(temp,ei_abs(r[l]));
- l = l + n - i;
- }
- temp = epsmch * temp;
+ temp = epsmch * qrfac.col(j).head(j+1).maxCoeff();
if (temp == 0.)
temp = epsmch;
}
- x[j] = (qtb[j] - sum) / temp;
+ if (j==n-1)
+ x[j] = qtb[j] / temp;
+ else
+ x[j] = (qtb[j] - qrfac.row(j).tail(n-j-1).dot(x.tail(n-j-1))) / temp;
}
/* test whether the gauss-newton direction is acceptable. */
-
- wa1.fill(0.);
- wa2 = diag.cwiseProduct(x);
- qnorm = wa2.stableNorm();
+ qnorm = diag.cwiseProduct(x).stableNorm();
if (qnorm <= delta)
return;
+ // TODO : this path is not tested by Eigen unit tests
+
/* the gauss-newton direction is not acceptable. */
/* next, calculate the scaled gradient direction. */
- l = 0;
+ wa1.fill(0.);
for (j = 0; j < n; ++j) {
- temp = qtb[j];
- for (i = j; i < n; ++i) {
- wa1[i] += r[l] * temp;
- ++l;
- }
+ wa1.tail(n-j) += qrfac.row(j).tail(n-j) * qtb[j];
wa1[j] /= diag[j];
}
/* calculate the norm of the scaled gradient and test for */
/* the special case in which the scaled gradient is zero. */
-
gnorm = wa1.stableNorm();
sgnorm = 0.;
alpha = delta / qnorm;
@@ -79,24 +61,20 @@ void ei_dogleg(
/* calculate the point along the scaled gradient */
/* at which the quadratic is minimized. */
-
wa1.array() /= (diag*gnorm).array();
- l = 0;
+ // TODO : once unit tests cover this part,:
+ // wa2 = qrfac.template triangularView<Upper>() * wa1;
for (j = 0; j < n; ++j) {
sum = 0.;
for (i = j; i < n; ++i) {
- sum += r[l] * wa1[i];
- ++l;
- /* L100: */
+ sum += qrfac(j,i) * wa1[i];
}
wa2[j] = sum;
- /* L110: */
}
temp = wa2.stableNorm();
sgnorm = gnorm / temp / temp;
/* test whether the scaled gradient direction is acceptable. */
-
alpha = 0.;
if (sgnorm >= delta)
goto algo_end;
@@ -104,21 +82,15 @@ void ei_dogleg(
/* the scaled gradient direction is not acceptable. */
/* finally, calculate the point along the dogleg */
/* at which the quadratic is minimized. */
-
bnorm = qtb.stableNorm();
temp = bnorm / gnorm * (bnorm / qnorm) * (sgnorm / delta);
- /* Computing 2nd power */
temp = temp - delta / qnorm * ei_abs2(sgnorm / delta) + ei_sqrt(ei_abs2(temp - delta / qnorm) + (1.-ei_abs2(delta / qnorm)) * (1.-ei_abs2(sgnorm / delta)));
- /* Computing 2nd power */
alpha = delta / qnorm * (1. - ei_abs2(sgnorm / delta)) / temp;
algo_end:
/* form appropriate convex combination of the gauss-newton */
/* direction and the scaled gradient direction. */
-
temp = (1.-alpha) * std::min(sgnorm,delta);
x = temp * wa1 + alpha * x;
- return;
-
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
index 376df7c40..3dc1e8070 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -10,19 +10,20 @@ int ei_fdjac1(
{
/* Local variables */
Scalar h;
- int i, j, k;
+ int j, k;
Scalar eps, temp;
int msum;
- int iflag = 0;
+ int iflag;
+ int start, length;
/* Function Body */
- const Scalar epsmch = epsilon<Scalar>();
+ const Scalar epsmch = NumTraits<Scalar>::epsilon();
const int n = x.size();
assert(fvec.size()==n);
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
- eps = ei_sqrt((std::max(epsfcn,epsmch)));
+ eps = ei_sqrt(std::max(epsfcn,epsmch));
msum = ml + mu + 1;
if (msum >= n) {
/* computation of dense approximate jacobian. */
@@ -42,29 +43,26 @@ int ei_fdjac1(
}else {
/* computation of banded approximate jacobian. */
for (k = 0; k < msum; ++k) {
- for (j = k; msum< 0 ? j > n: j < n; j += msum) {
+ for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
wa2[j] = x[j];
h = eps * ei_abs(wa2[j]);
if (h == 0.) h = eps;
x[j] = wa2[j] + h;
}
iflag = Functor(x, wa1);
- if (iflag < 0) {
+ if (iflag < 0)
return iflag;
- }
- for (j = k; msum< 0 ? j > n: j < n; j += msum) {
+ for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
x[j] = wa2[j];
h = eps * ei_abs(wa2[j]);
if (h == 0.) h = eps;
- for (i = 0; i < n; ++i) {
- fjac(i,j) = 0.;
- if (i >= j - mu && i <= j + ml) {
- fjac(i,j) = (wa1[i] - fvec[i]) / h;
- }
- }
+ fjac.col(j).setZero();
+ start = std::max(0,j-mu);
+ length = std::min(n-1, j+ml) - start + 1;
+ fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
}
}
}
- return iflag;
-} /* fdjac1_ */
+ return 0;
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
index ab8549f1a..d763bd4e7 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -2,7 +2,7 @@
template <typename Scalar>
void ei_lmpar(
Matrix< Scalar, Dynamic, Dynamic > &r,
- VectorXi &ipvt, // TODO : const once ipvt mess fixed
+ const VectorXi &ipvt,
const Matrix< Scalar, Dynamic, 1 > &diag,
const Matrix< Scalar, Dynamic, 1 > &qtb,
Scalar delta,
@@ -30,7 +30,6 @@ void ei_lmpar(
/* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */
-
int nsing = n-1;
wa1 = qtb;
for (j = 0; j < n; ++j) {
@@ -52,7 +51,6 @@ void ei_lmpar(
/* initialize the iteration counter. */
/* evaluate the function at the origin, and test */
/* for acceptance of the gauss-newton direction. */
-
iter = 0;
wa2 = diag.cwiseProduct(x);
dxnorm = wa2.blueNorm();
@@ -65,7 +63,6 @@ void ei_lmpar(
/* if the jacobian is not rank deficient, the newton */
/* step provides a lower bound, parl, for the zero of */
/* the function. otherwise set this bound to zero. */
-
parl = 0.;
if (nsing >= n-1) {
for (j = 0; j < n; ++j) {
@@ -85,7 +82,6 @@ void ei_lmpar(
}
/* calculate an upper bound, paru, for the zero of the function. */
-
for (j = 0; j < n; ++j)
wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]];
@@ -96,22 +92,18 @@ void ei_lmpar(
/* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */
-
par = std::max(par,parl);
par = std::min(par,paru);
if (par == 0.)
par = gnorm / dxnorm;
/* beginning of an iteration. */
-
while (true) {
++iter;
/* evaluate the function at the current value of par. */
-
if (par == 0.)
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
-
wa1 = ei_sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n);
@@ -125,12 +117,10 @@ void ei_lmpar(
/* if the function is small enough, accept the current value */
/* of par. also test for the exceptional cases where parl */
/* is zero or the number of iterations has reached 10. */
-
if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
break;
/* compute the newton correction. */
-
for (j = 0; j < n; ++j) {
l = ipvt[j];
wa1[j] = diag[l] * (wa2[l] / dxnorm);
@@ -145,23 +135,148 @@ void ei_lmpar(
parc = fp / delta / temp / temp;
/* depending on the sign of the function, update parl or paru. */
-
if (fp > 0.)
parl = std::max(parl,par);
if (fp < 0.)
paru = std::min(paru,par);
/* compute an improved estimate for par. */
-
/* Computing MAX */
par = std::max(parl,par+parc);
/* end of an iteration. */
-
}
/* termination. */
+ if (iter == 0)
+ par = 0.;
+ return;
+}
+
+template <typename Scalar>
+void ei_lmpar2(
+ const ColPivHouseholderQR<Matrix< Scalar, Dynamic, Dynamic> > &qr,
+ const Matrix< Scalar, Dynamic, 1 > &diag,
+ const Matrix< Scalar, Dynamic, 1 > &qtb,
+ Scalar delta,
+ Scalar &par,
+ Matrix< Scalar, Dynamic, 1 > &x)
+{
+ /* Local variables */
+ int j;
+ Scalar fp;
+ Scalar parc, parl;
+ int iter;
+ Scalar temp, paru;
+ Scalar gnorm;
+ Scalar dxnorm;
+
+
+ /* Function Body */
+ const Scalar dwarf = std::numeric_limits<Scalar>::min();
+ const int n = qr.matrixQR().cols();
+ assert(n==diag.size());
+ assert(n==qtb.size());
+
+ Matrix< Scalar, Dynamic, 1 > wa1, wa2;
+
+ /* compute and store in x the gauss-newton direction. if the */
+ /* jacobian is rank-deficient, obtain a least squares solution. */
+
+// const int rank = qr.nonzeroPivots(); // exactly double(0.)
+ const int rank = qr.rank(); // use a threshold
+ wa1 = qtb;
+ wa1.tail(n-rank).setZero();
+ qr.matrixQR().corner(TopLeft, rank, rank).template triangularView<Upper>().solveInPlace(wa1.head(rank));
+
+ x = qr.colsPermutation()*wa1;
+
+ /* initialize the iteration counter. */
+ /* evaluate the function at the origin, and test */
+ /* for acceptance of the gauss-newton direction. */
+ iter = 0;
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ fp = dxnorm - delta;
+ if (fp <= Scalar(0.1) * delta) {
+ par = 0;
+ return;
+ }
+
+ /* if the jacobian is not rank deficient, the newton */
+ /* step provides a lower bound, parl, for the zero of */
+ /* the function. otherwise set this bound to zero. */
+ parl = 0.;
+ if (rank==n) {
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm;
+ qr.matrixQR().corner(TopLeft, n, n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ temp = wa1.blueNorm();
+ parl = fp / delta / temp / temp;
+ }
+
+ /* calculate an upper bound, paru, for the zero of the function. */
+ for (j = 0; j < n; ++j)
+ wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)];
+
+ gnorm = wa1.stableNorm();
+ paru = gnorm / delta;
+ if (paru == 0.)
+ paru = dwarf / std::min(delta,Scalar(0.1));
+
+ /* if the input par lies outside of the interval (parl,paru), */
+ /* set par to the closer endpoint. */
+ par = std::max(par,parl);
+ par = std::min(par,paru);
+ if (par == 0.)
+ par = gnorm / dxnorm;
+
+ /* beginning of an iteration. */
+ Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR();
+ while (true) {
+ ++iter;
+
+ /* evaluate the function at the current value of par. */
+ if (par == 0.)
+ par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ wa1 = ei_sqrt(par)* diag;
+
+ Matrix< Scalar, Dynamic, 1 > sdiag(n);
+ ei_qrsolv<Scalar>(s, qr.colsPermutation().indices(), wa1, qtb, x, sdiag);
+
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ temp = fp;
+ fp = dxnorm - delta;
+
+ /* if the function is small enough, accept the current value */
+ /* of par. also test for the exceptional cases where parl */
+ /* is zero or the number of iterations has reached 10. */
+ if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
+ break;
+
+ /* compute the newton correction. */
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm);
+ // we could almost use this here, but the diagonal is outside qr, in sdiag[]
+ // qr.matrixQR().corner(TopLeft, n, n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ for (j = 0; j < n; ++j) {
+ wa1[j] /= sdiag[j];
+ temp = wa1[j];
+ for (int i = j+1; i < n; ++i)
+ wa1[i] -= s(i,j) * temp;
+ }
+ temp = wa1.blueNorm();
+ parc = fp / delta / temp / temp;
+
+ /* depending on the sign of the function, update parl or paru. */
+ if (fp > 0.)
+ parl = std::max(parl,par);
+ if (fp < 0.)
+ paru = std::min(paru,par);
+
+ /* compute an improved estimate for par. */
+ par = std::max(parl,par+parc);
+ }
if (iter == 0)
par = 0.;
return;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qform.h b/unsupported/Eigen/src/NonLinearOptimization/qform.h
deleted file mode 100644
index 47bbd1fbe..000000000
--- a/unsupported/Eigen/src/NonLinearOptimization/qform.h
+++ /dev/null
@@ -1,89 +0,0 @@
-
-template <typename Scalar>
-void ei_qform(int m, int n, Scalar *q, int
- ldq, Scalar *wa)
-{
- /* System generated locals */
- int q_dim1, q_offset;
-
- /* Local variables */
- int i, j, k, l, jm1, np1;
- Scalar sum, temp;
- int minmn;
-
- /* Parameter adjustments */
- --wa;
- q_dim1 = ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
-
- /* Function Body */
-
- /* zero out upper triangle of q in the first min(m,n) columns. */
-
- minmn = std::min(m,n);
- if (minmn < 2) {
- goto L30;
- }
- for (j = 2; j <= minmn; ++j) {
- jm1 = j - 1;
- for (i = 1; i <= jm1; ++i) {
- q[i + j * q_dim1] = 0.;
- /* L10: */
- }
- /* L20: */
- }
-L30:
-
- /* initialize remaining columns to those of the identity matrix. */
-
- np1 = n + 1;
- if (m < np1) {
- goto L60;
- }
- for (j = np1; j <= m; ++j) {
- for (i = 1; i <= m; ++i) {
- q[i + j * q_dim1] = 0.;
- /* L40: */
- }
- q[j + j * q_dim1] = 1.;
- /* L50: */
- }
-L60:
-
- /* accumulate q from its factored form. */
-
- for (l = 1; l <= minmn; ++l) {
- k = minmn - l + 1;
- for (i = k; i <= m; ++i) {
- wa[i] = q[i + k * q_dim1];
- q[i + k * q_dim1] = 0.;
- /* L70: */
- }
- q[k + k * q_dim1] = 1.;
- if (wa[k] == 0.) {
- goto L110;
- }
- for (j = k; j <= m; ++j) {
- sum = 0.;
- for (i = k; i <= m; ++i) {
- sum += q[i + j * q_dim1] * wa[i];
- /* L80: */
- }
- temp = sum / wa[k];
- for (i = k; i <= m; ++i) {
- q[i + j * q_dim1] -= temp * wa[i];
- /* L90: */
- }
- /* L100: */
- }
-L110:
- /* L120: */
- ;
- }
- return;
-
- /* last card of subroutine qform. */
-
-} /* qform_ */
-
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrfac.h b/unsupported/Eigen/src/NonLinearOptimization/qrfac.h
deleted file mode 100644
index 0c1ecf394..000000000
--- a/unsupported/Eigen/src/NonLinearOptimization/qrfac.h
+++ /dev/null
@@ -1,131 +0,0 @@
-
-template <typename Scalar>
-void ei_qrfac(int m, int n, Scalar *a, int
- lda, int pivot, int *ipvt, Scalar *rdiag)
-{
- /* System generated locals */
- int a_dim1, a_offset;
-
- /* Local variables */
- int i, j, k, jp1;
- Scalar sum;
- int kmax;
- Scalar temp;
- int minmn;
- Scalar ajnorm;
-
- Matrix< Scalar, Dynamic, 1 > wa(n+1);
-
- /* Parameter adjustments */
- --rdiag;
- a_dim1 = lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --ipvt;
-
- /* Function Body */
- const Scalar epsmch = epsilon<Scalar>();
-
- /* compute the initial column norms and initialize several arrays. */
-
- for (j = 1; j <= n; ++j) {
- rdiag[j] = Map< Matrix< Scalar, Dynamic, 1 > >(&a[j * a_dim1 + 1],m).blueNorm();
- wa[j] = rdiag[j];
- if (pivot)
- ipvt[j] = j;
- }
-
- /* reduce a to r with householder transformations. */
-
- minmn = std::min(m,n);
- for (j = 1; j <= minmn; ++j) {
- if (! (pivot)) {
- goto L40;
- }
-
- /* bring the column of largest norm into the pivot position. */
-
- kmax = j;
- for (k = j; k <= n; ++k) {
- if (rdiag[k] > rdiag[kmax]) {
- kmax = k;
- }
- /* L20: */
- }
- if (kmax == j) {
- goto L40;
- }
- for (i = 1; i <= m; ++i) {
- temp = a[i + j * a_dim1];
- a[i + j * a_dim1] = a[i + kmax * a_dim1];
- a[i + kmax * a_dim1] = temp;
- /* L30: */
- }
- rdiag[kmax] = rdiag[j];
- wa[kmax] = wa[j];
- k = ipvt[j];
- ipvt[j] = ipvt[kmax];
- ipvt[kmax] = k;
-L40:
-
- /* compute the householder transformation to reduce the */
- /* j-th column of a to a multiple of the j-th unit vector. */
-
- ajnorm = Map< Matrix< Scalar, Dynamic, 1 > >(&a[j + j * a_dim1],m-j+1).blueNorm();
- if (ajnorm == 0.) {
- goto L100;
- }
- if (a[j + j * a_dim1] < 0.) {
- ajnorm = -ajnorm;
- }
- for (i = j; i <= m; ++i) {
- a[i + j * a_dim1] /= ajnorm;
- /* L50: */
- }
- a[j + j * a_dim1] += 1.;
-
- /* apply the transformation to the remaining columns */
- /* and update the norms. */
-
- jp1 = j + 1;
- if (n < jp1) {
- goto L100;
- }
- for (k = jp1; k <= n; ++k) {
- sum = 0.;
- for (i = j; i <= m; ++i) {
- sum += a[i + j * a_dim1] * a[i + k * a_dim1];
- /* L60: */
- }
- temp = sum / a[j + j * a_dim1];
- for (i = j; i <= m; ++i) {
- a[i + k * a_dim1] -= temp * a[i + j * a_dim1];
- /* L70: */
- }
- if (! (pivot) || rdiag[k] == 0.) {
- goto L80;
- }
- temp = a[j + k * a_dim1] / rdiag[k];
- /* Computing MAX */
- /* Computing 2nd power */
- rdiag[k] *= ei_sqrt((std::max(Scalar(0.), Scalar(1.)-ei_abs2(temp))));
- /* Computing 2nd power */
- if (Scalar(.05) * ei_abs2(rdiag[k] / wa[k]) > epsmch) {
- goto L80;
- }
- rdiag[k] = Map< Matrix< Scalar, Dynamic, 1 > >(&a[jp1 + k * a_dim1],m-j).blueNorm();
- wa[k] = rdiag[k];
-L80:
- /* L90: */
- ;
- }
-L100:
- rdiag[j] = -ajnorm;
- /* L110: */
- }
- return;
-
- /* last card of subroutine qrfac. */
-
-} /* qrfac_ */
-
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
index 1ee21d5ed..f89a5f9a8 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
@@ -1,8 +1,10 @@
+// TODO : once qrsolv2 is removed, use ColPivHouseholderQR or PermutationMatrix instead of ipvt
template <typename Scalar>
void ei_qrsolv(
- Matrix< Scalar, Dynamic, Dynamic > &r,
- VectorXi &ipvt, // TODO : const once ipvt mess fixed
+ Matrix< Scalar, Dynamic, Dynamic > &s,
+ // TODO : use a PermutationMatrix once ei_lmpar is no more:
+ const VectorXi &ipvt,
const Matrix< Scalar, Dynamic, 1 > &diag,
const Matrix< Scalar, Dynamic, 1 > &qtb,
Matrix< Scalar, Dynamic, 1 > &x,
@@ -11,80 +13,69 @@ void ei_qrsolv(
{
/* Local variables */
int i, j, k, l;
- Scalar sum, temp;
- int n = r.cols();
+ Scalar temp;
+ int n = s.cols();
Matrix< Scalar, Dynamic, 1 > wa(n);
+ PlanarRotation<Scalar> givens;
/* Function Body */
+ // the following will only change the lower triangular part of s, including
+ // the diagonal, though the diagonal is restored afterward
/* copy r and (q transpose)*b to preserve input and initialize s. */
/* in particular, save the diagonal elements of r in x. */
-
- x = r.diagonal();
+ x = s.diagonal();
wa = qtb;
- for (j = 0; j < n; ++j)
- for (i = j+1; i < n; ++i)
- r(i,j) = r(j,i);
+ s.corner(TopLeft,n,n).template triangularView<StrictlyLower>() = s.corner(TopLeft,n,n).transpose();
/* eliminate the diagonal matrix d using a givens rotation. */
for (j = 0; j < n; ++j) {
/* prepare the row of d to be eliminated, locating the */
/* diagonal element using p from the qr factorization. */
-
l = ipvt[j];
if (diag[l] == 0.)
break;
- sdiag.segment(j,n-j).setZero();
+ sdiag.tail(n-j).setZero();
sdiag[j] = diag[l];
/* the transformations to eliminate the row of d */
/* modify only a single element of (q transpose)*b */
/* beyond the first n, which is initially zero. */
-
Scalar qtbpj = 0.;
for (k = j; k < n; ++k) {
/* determine a givens rotation which eliminates the */
/* appropriate element in the current row of d. */
- PlanarRotation<Scalar> givens;
- givens.makeGivens(-r(k,k), sdiag[k]);
+ givens.makeGivens(-s(k,k), sdiag[k]);
/* compute the modified diagonal element of r and */
/* the modified element of ((q transpose)*b,0). */
-
- r(k,k) = givens.c() * r(k,k) + givens.s() * sdiag[k];
+ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k];
temp = givens.c() * wa[k] + givens.s() * qtbpj;
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
wa[k] = temp;
/* accumulate the tranformation in the row of s. */
for (i = k+1; i<n; ++i) {
- temp = givens.c() * r(i,k) + givens.s() * sdiag[i];
- sdiag[i] = -givens.s() * r(i,k) + givens.c() * sdiag[i];
- r(i,k) = temp;
+ temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
+ sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
+ s(i,k) = temp;
}
}
}
- // restore
- sdiag = r.diagonal();
- r.diagonal() = x;
-
/* solve the triangular system for z. if the system is */
/* singular, then obtain a least squares solution. */
-
int nsing;
for (nsing=0; nsing<n && sdiag[nsing]!=0; nsing++);
- wa.segment(nsing,n-nsing).setZero();
- nsing--; // nsing is the last nonsingular index
- for (j = nsing; j>=0; j--) {
- sum = 0.;
- for (i = j+1; i <= nsing; ++i)
- sum += r(i,j) * wa[i];
- wa[j] = (wa[j] - sum) / sdiag[j];
- }
+ wa.tail(n-nsing).setZero();
+ s.corner(TopLeft, nsing, nsing).transpose().template triangularView<Upper>().solveInPlace(wa.head(nsing));
+
+ // restore
+ sdiag = s.diagonal();
+ s.diagonal() = x;
/* permute the components of z back to components of x. */
for (j = 0; j < n; ++j) x[ipvt[j]] = wa[j];
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
index 70a6d30c3..855cb7a1b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
@@ -1,60 +1,22 @@
+// TODO : move this to GivensQR once there's such a thing in Eigen
+
template <typename Scalar>
-void ei_r1mpyq(int m, int n, Scalar *a, int
- lda, const Scalar *v, const Scalar *w)
+void ei_r1mpyq(int m, int n, Scalar *a, const std::vector<PlanarRotation<Scalar> > &v_givens, const std::vector<PlanarRotation<Scalar> > &w_givens)
{
- /* System generated locals */
- int a_dim1, a_offset;
-
- /* Local variables */
- int i, j, nm1, nmj;
- Scalar cos__=0., sin__=0., temp;
-
- /* Parameter adjustments */
- --w;
- --v;
- a_dim1 = lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- nm1 = n - 1;
- if (nm1 < 1)
- return;
-
/* apply the first set of givens rotations to a. */
- for (nmj = 1; nmj <= nm1; ++nmj) {
- j = n - nmj;
- if (ei_abs(v[j]) > 1.) {
- cos__ = 1. / v[j];
- sin__ = ei_sqrt(1. - ei_abs2(cos__));
- } else {
- sin__ = v[j];
- cos__ = ei_sqrt(1. - ei_abs2(sin__));
- }
- for (i = 1; i <= m; ++i) {
- temp = cos__ * a[i + j * a_dim1] - sin__ * a[i + n * a_dim1];
- a[i + n * a_dim1] = sin__ * a[i + j * a_dim1] + cos__ * a[
- i + n * a_dim1];
- a[i + j * a_dim1] = temp;
+ for (int j = n-2; j>=0; --j)
+ for (int i = 0; i<m; ++i) {
+ Scalar temp = v_givens[j].c() * a[i+m*j] - v_givens[j].s() * a[i+m*(n-1)];
+ a[i+m*(n-1)] = v_givens[j].s() * a[i+m*j] + v_givens[j].c() * a[i+m*(n-1)];
+ a[i+m*j] = temp;
}
- }
/* apply the second set of givens rotations to a. */
- for (j = 1; j <= nm1; ++j) {
- if (ei_abs(w[j]) > 1.) {
- cos__ = 1. / w[j];
- sin__ = ei_sqrt(1. - ei_abs2(cos__));
- } else {
- sin__ = w[j];
- cos__ = ei_sqrt(1. - ei_abs2(sin__));
- }
- for (i = 1; i <= m; ++i) {
- temp = cos__ * a[i + j * a_dim1] + sin__ * a[i + n * a_dim1];
- a[i + n * a_dim1] = -sin__ * a[i + j * a_dim1] + cos__ * a[
- i + n * a_dim1];
- a[i + j * a_dim1] = temp;
+ for (int j = 0; j<n-1; ++j)
+ for (int i = 0; i<m; ++i) {
+ Scalar temp = w_givens[j].c() * a[i+m*j] + w_givens[j].s() * a[i+m*(n-1)];
+ a[i+m*(n-1)] = -w_givens[j].s() * a[i+m*j] + w_givens[j].c() * a[i+m*(n-1)];
+ a[i+m*j] = temp;
}
- }
- return;
-} /* r1mpyq_ */
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
index f2ddb189b..3d8978837 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -1,175 +1,88 @@
- template <typename Scalar>
-void ei_r1updt(int m, int n, Scalar *s, int /* ls */, const Scalar *u, Scalar *v, Scalar *w, bool *sing)
+template <typename Scalar>
+void ei_r1updt(
+ Matrix< Scalar, Dynamic, Dynamic > &s,
+ const Matrix< Scalar, Dynamic, 1> &u,
+ std::vector<PlanarRotation<Scalar> > &v_givens,
+ std::vector<PlanarRotation<Scalar> > &w_givens,
+ Matrix< Scalar, Dynamic, 1> &v,
+ Matrix< Scalar, Dynamic, 1> &w,
+ bool *sing)
{
/* Local variables */
- int i, j, l, jj, nm1;
- Scalar tan__;
- int nmj;
- Scalar cos__, sin__, tau, temp, cotan;
-
- /* Parameter adjustments */
- --w;
- --u;
- --v;
- --s;
-
- /* Function Body */
- const Scalar giant = std::numeric_limits<Scalar>::max();
-
- /* initialize the diagonal element pointer. */
-
- jj = n * ((m << 1) - n + 1) / 2 - (m - n);
-
- /* move the nontrivial part of the last column of s into w. */
-
- l = jj;
- for (i = n; i <= m; ++i) {
- w[i] = s[l];
- ++l;
- /* L10: */
- }
-
- /* rotate the vector v into a multiple of the n-th unit vector */
- /* in such a way that a spike is introduced into w. */
-
- nm1 = n - 1;
- if (nm1 < 1) {
- goto L70;
- }
- for (nmj = 1; nmj <= nm1; ++nmj) {
- j = n - nmj;
- jj -= m - j + 1;
+ const int m = s.rows();
+ const int n = s.cols();
+ int i, j=1;
+ Scalar temp;
+ PlanarRotation<Scalar> givens;
+
+ // ei_r1updt had a broader usecase, but we dont use it here. And, more
+ // importantly, we can not test it.
+ assert(m==n);
+ assert(u.size()==m);
+ assert(v.size()==n);
+ assert(w.size()==n);
+
+ /* move the nontrivial part of the last column of s into w. */
+ w[n-1] = s(n-1,n-1);
+
+ /* rotate the vector v into a multiple of the n-th unit vector */
+ /* in such a way that a spike is introduced into w. */
+ for (j=n-2; j>=0; --j) {
w[j] = 0.;
- if (v[j] == 0.) {
- goto L50;
- }
-
- /* determine a givens rotation which eliminates the */
- /* j-th element of v. */
-
- if (ei_abs(v[n]) >= ei_abs(v[j]))
- goto L20;
- cotan = v[n] / v[j];
- /* Computing 2nd power */
- sin__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(cotan));
- cos__ = sin__ * cotan;
- tau = 1.;
- if (ei_abs(cos__) * giant > 1.) {
- tau = 1. / cos__;
- }
- goto L30;
-L20:
- tan__ = v[j] / v[n];
- /* Computing 2nd power */
- cos__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(tan__));
- sin__ = cos__ * tan__;
- tau = sin__;
-L30:
-
- /* apply the transformation to v and store the information */
- /* necessary to recover the givens rotation. */
-
- v[n] = sin__ * v[j] + cos__ * v[n];
- v[j] = tau;
-
- /* apply the transformation to s and extend the spike in w. */
-
- l = jj;
- for (i = j; i <= m; ++i) {
- temp = cos__ * s[l] - sin__ * w[i];
- w[i] = sin__ * s[l] + cos__ * w[i];
- s[l] = temp;
- ++l;
- /* L40: */
+ if (v[j] != 0.) {
+ /* determine a givens rotation which eliminates the */
+ /* j-th element of v. */
+ givens.makeGivens(-v[n-1], v[j]);
+
+ /* apply the transformation to v and store the information */
+ /* necessary to recover the givens rotation. */
+ v[n-1] = givens.s() * v[j] + givens.c() * v[n-1];
+ v_givens[j] = givens;
+
+ /* apply the transformation to s and extend the spike in w. */
+ for (i = j; i < m; ++i) {
+ temp = givens.c() * s(j,i) - givens.s() * w[i];
+ w[i] = givens.s() * s(j,i) + givens.c() * w[i];
+ s(j,i) = temp;
+ }
}
-L50:
- /* L60: */
- ;
}
-L70:
- /* add the spike from the rank 1 update to w. */
-
- for (i = 1; i <= m; ++i) {
- w[i] += v[n] * u[i];
- /* L80: */
- }
-
- /* eliminate the spike. */
+ /* add the spike from the rank 1 update to w. */
+ w += v[n-1] * u;
+ /* eliminate the spike. */
*sing = false;
- if (nm1 < 1) {
- goto L140;
- }
- for (j = 1; j <= nm1; ++j) {
- if (w[j] == 0.) {
- goto L120;
+ for (j = 0; j < n-1; ++j) {
+ if (w[j] != 0.) {
+ /* determine a givens rotation which eliminates the */
+ /* j-th element of the spike. */
+ givens.makeGivens(-s(j,j), w[j]);
+
+ /* apply the transformation to s and reduce the spike in w. */
+ for (i = j; i < m; ++i) {
+ temp = givens.c() * s(j,i) + givens.s() * w[i];
+ w[i] = -givens.s() * s(j,i) + givens.c() * w[i];
+ s(j,i) = temp;
+ }
+
+ /* store the information necessary to recover the */
+ /* givens rotation. */
+ w_givens[j] = givens;
}
- /* determine a givens rotation which eliminates the */
- /* j-th element of the spike. */
-
- if (ei_abs(s[jj]) >= ei_abs(w[j]))
- goto L90;
- cotan = s[jj] / w[j];
- /* Computing 2nd power */
- sin__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(cotan));
- cos__ = sin__ * cotan;
- tau = 1.;
- if (ei_abs(cos__) * giant > 1.) {
- tau = 1. / cos__;
- }
- goto L100;
-L90:
- tan__ = w[j] / s[jj];
- /* Computing 2nd power */
- cos__ = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(tan__));
- sin__ = cos__ * tan__;
- tau = sin__;
-L100:
-
- /* apply the transformation to s and reduce the spike in w. */
-
- l = jj;
- for (i = j; i <= m; ++i) {
- temp = cos__ * s[l] + sin__ * w[i];
- w[i] = -sin__ * s[l] + cos__ * w[i];
- s[l] = temp;
- ++l;
- /* L110: */
- }
-
- /* store the information necessary to recover the */
- /* givens rotation. */
-
- w[j] = tau;
-L120:
-
- /* test for zero diagonal elements in the output s. */
-
- if (s[jj] == 0.) {
+ /* test for zero diagonal elements in the output s. */
+ if (s(j,j) == 0.) {
*sing = true;
}
- jj += m - j + 1;
- /* L130: */
}
-L140:
+ /* move w back into the last column of the output s. */
+ s(n-1,n-1) = w[n-1];
- /* move w back into the last column of the output s. */
-
- l = jj;
- for (i = n; i <= m; ++i) {
- s[l] = w[i];
- ++l;
- /* L150: */
- }
- if (s[jj] == 0.) {
+ if (s(j,j) == 0.) {
*sing = true;
}
return;
-
- /* last card of subroutine r1updt. */
-
-} /* r1updt_ */
+}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
index 6be292f6a..7703ff8de 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
@@ -1,80 +1,41 @@
template <typename Scalar>
-void ei_rwupdt(int n, Scalar *r__, int ldr,
- const Scalar *w, Scalar *b, Scalar *alpha, Scalar *cos__,
- Scalar *sin__)
+void ei_rwupdt(
+ Matrix< Scalar, Dynamic, Dynamic > &r,
+ const Matrix< Scalar, Dynamic, 1> &w,
+ Matrix< Scalar, Dynamic, 1> &b,
+ Scalar alpha)
{
- /* System generated locals */
- int r_dim1, r_offset;
+ const int n = r.cols();
+ assert(r.rows()>=n);
+ std::vector<PlanarRotation<Scalar> > givens(n);
/* Local variables */
- int i, j, jm1;
- Scalar tan__, temp, rowj, cotan;
-
- /* Parameter adjustments */
- --sin__;
- --cos__;
- --b;
- --w;
- r_dim1 = ldr;
- r_offset = 1 + r_dim1 * 1;
- r__ -= r_offset;
+ Scalar temp, rowj;
/* Function Body */
-
- for (j = 1; j <= n; ++j) {
- rowj = w[j];
- jm1 = j - 1;
-
-/* apply the previous transformations to */
-/* r(i,j), i=1,2,...,j-1, and to w(j). */
-
- if (jm1 < 1) {
- goto L20;
- }
- for (i = 1; i <= jm1; ++i) {
- temp = cos__[i] * r__[i + j * r_dim1] + sin__[i] * rowj;
- rowj = -sin__[i] * r__[i + j * r_dim1] + cos__[i] * rowj;
- r__[i + j * r_dim1] = temp;
-/* L10: */
- }
-L20:
-
-/* determine a givens rotation which eliminates w(j). */
-
- cos__[j] = 1.;
- sin__[j] = 0.;
- if (rowj == 0.) {
- goto L50;
- }
- if (ei_abs(r__[j + j * r_dim1]) >= ei_abs(rowj))
- goto L30;
- cotan = r__[j + j * r_dim1] / rowj;
-/* Computing 2nd power */
- sin__[j] = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(cotan));
- cos__[j] = sin__[j] * cotan;
- goto L40;
-L30:
- tan__ = rowj / r__[j + j * r_dim1];
-/* Computing 2nd power */
- cos__[j] = Scalar(.5) / ei_sqrt(Scalar(0.25) + Scalar(0.25) * ei_abs2(tan__));
- sin__[j] = cos__[j] * tan__;
-L40:
-
-/* apply the current transformation to r(j,j), b(j), and alpha. */
-
- r__[j + j * r_dim1] = cos__[j] * r__[j + j * r_dim1] + sin__[j] *
- rowj;
- temp = cos__[j] * b[j] + sin__[j] * *alpha;
- *alpha = -sin__[j] * b[j] + cos__[j] * *alpha;
- b[j] = temp;
-L50:
-/* L60: */
- ;
+ for (int j = 0; j < n; ++j) {
+ rowj = w[j];
+
+ /* apply the previous transformations to */
+ /* r(i,j), i=0,1,...,j-1, and to w(j). */
+ for (int i = 0; i < j; ++i) {
+ temp = givens[i].c() * r(i,j) + givens[i].s() * rowj;
+ rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj;
+ r(i,j) = temp;
+ }
+
+ if (rowj == 0.)
+ continue;
+
+ /* determine a givens rotation which eliminates w(j). */
+ givens[j].makeGivens(-r(j,j), rowj);
+
+ /* apply the current transformation to r(j,j), b(j), and alpha. */
+ r(j,j) = givens[j].c() * r(j,j) + givens[j].s() * rowj;
+ temp = givens[j].c() * b[j] + givens[j].s() * alpha;
+ alpha = -givens[j].s() * b[j] + givens[j].c() * alpha;
+ b[j] = temp;
}
- return;
-
-/* last card of subroutine rwupdt. */
-
-} /* rwupdt_ */
+}
diff --git a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
index db6f791df..8d23cb4ae 100644
--- a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
+++ b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
@@ -80,7 +80,7 @@ public:
Scalar h;
int nfev=0;
const int n = _x.size();
- const Scalar eps = ei_sqrt((std::max(epsfcn,epsilon<Scalar>() )));
+ const Scalar eps = ei_sqrt((std::max(epsfcn,NumTraits<Scalar>::epsilon() )));
ValueType val1, val2;
InputType x = _x;
// TODO : we should do this only if the size is not already known
diff --git a/unsupported/doc/examples/MatrixExponential.cpp b/unsupported/doc/examples/MatrixExponential.cpp
index 7dc2396df..801ee4d01 100644
--- a/unsupported/doc/examples/MatrixExponential.cpp
+++ b/unsupported/doc/examples/MatrixExponential.cpp
@@ -12,7 +12,6 @@ int main()
0, 0, 0;
std::cout << "The matrix A is:\n" << A << "\n\n";
- MatrixXd B;
- ei_matrix_exponential(A, &B);
+ MatrixXd B = ei_matrix_exponential(A);
std::cout << "The matrix exponential of A is:\n" << B << "\n\n";
}
diff --git a/unsupported/doc/examples/MatrixFunction.cpp b/unsupported/doc/examples/MatrixFunction.cpp
index c11cb821b..075fe7361 100644
--- a/unsupported/doc/examples/MatrixFunction.cpp
+++ b/unsupported/doc/examples/MatrixFunction.cpp
@@ -15,9 +15,8 @@ int main()
A << 0, -pi/4, 0,
pi/4, 0, 0,
0, 0, 0;
- std::cout << "The matrix A is:\n" << A << "\n\n";
- MatrixXd B;
- ei_matrix_function(A, expfn, &B);
- std::cout << "The matrix exponential of A is:\n" << B << "\n\n";
+ std::cout << "The matrix A is:\n" << A << "\n\n";
+ std::cout << "The matrix exponential of A is:\n"
+ << ei_matrix_function(A, expfn) << "\n\n";
}
diff --git a/unsupported/doc/examples/MatrixSine.cpp b/unsupported/doc/examples/MatrixSine.cpp
index f8780ac92..2bbf99bbb 100644
--- a/unsupported/doc/examples/MatrixSine.cpp
+++ b/unsupported/doc/examples/MatrixSine.cpp
@@ -7,12 +7,10 @@ int main()
MatrixXd A = MatrixXd::Random(3,3);
std::cout << "A = \n" << A << "\n\n";
- MatrixXd sinA;
- ei_matrix_sin(A, &sinA);
+ MatrixXd sinA = ei_matrix_sin(A);
std::cout << "sin(A) = \n" << sinA << "\n\n";
- MatrixXd cosA;
- ei_matrix_cos(A, &cosA);
+ MatrixXd cosA = ei_matrix_cos(A);
std::cout << "cos(A) = \n" << cosA << "\n\n";
// The matrix functions satisfy sin^2(A) + cos^2(A) = I,
diff --git a/unsupported/doc/examples/MatrixSinh.cpp b/unsupported/doc/examples/MatrixSinh.cpp
index 488d95652..036534dea 100644
--- a/unsupported/doc/examples/MatrixSinh.cpp
+++ b/unsupported/doc/examples/MatrixSinh.cpp
@@ -7,12 +7,10 @@ int main()
MatrixXf A = MatrixXf::Random(3,3);
std::cout << "A = \n" << A << "\n\n";
- MatrixXf sinhA;
- ei_matrix_sinh(A, &sinhA);
+ MatrixXf sinhA = ei_matrix_sinh(A);
std::cout << "sinh(A) = \n" << sinhA << "\n\n";
- MatrixXf coshA;
- ei_matrix_cosh(A, &coshA);
+ MatrixXf coshA = ei_matrix_cosh(A);
std::cout << "cosh(A) = \n" << coshA << "\n\n";
// The matrix functions satisfy cosh^2(A) - sinh^2(A) = I,
diff --git a/unsupported/test/FFT.cpp b/unsupported/test/FFT.cpp
index 02cd5a48f..45c87f5a7 100644
--- a/unsupported/test/FFT.cpp
+++ b/unsupported/test/FFT.cpp
@@ -1,245 +1,2 @@
-#if 0
-
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2009 Mark Borgerding mark a borgerding net
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#include "main.h"
-#include <unsupported/Eigen/FFT>
-
-template <typename T>
-std::complex<T> RandomCpx() { return std::complex<T>( (T)(rand()/(T)RAND_MAX - .5), (T)(rand()/(T)RAND_MAX - .5) ); }
-
-using namespace std;
-using namespace Eigen;
-
-float norm(float x) {return x*x;}
-double norm(double x) {return x*x;}
-long double norm(long double x) {return x*x;}
-
-template < typename T>
-complex<long double> promote(complex<T> x) { return complex<long double>(x.real(),x.imag()); }
-
-complex<long double> promote(float x) { return complex<long double>( x); }
-complex<long double> promote(double x) { return complex<long double>( x); }
-complex<long double> promote(long double x) { return complex<long double>( x); }
-
-
- template <typename T1,typename T2>
- long double fft_rmse( const vector<T1> & fftbuf,const vector<T2> & timebuf)
- {
- long double totalpower=0;
- long double difpower=0;
- long double pi = acos((long double)-1 );
- for (size_t k0=0;k0<fftbuf.size();++k0) {
- complex<long double> acc = 0;
- long double phinc = -2.*k0* pi / timebuf.size();
- for (size_t k1=0;k1<timebuf.size();++k1) {
- acc += promote( timebuf[k1] ) * exp( complex<long double>(0,k1*phinc) );
- }
- totalpower += norm(acc);
- complex<long double> x = promote(fftbuf[k0]);
- complex<long double> dif = acc - x;
- difpower += norm(dif);
- cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(norm(dif)) << endl;
- }
- cerr << "rmse:" << sqrt(difpower/totalpower) << endl;
- return sqrt(difpower/totalpower);
- }
-
- template <typename T1,typename T2>
- long double dif_rmse( const vector<T1> buf1,const vector<T2> buf2)
- {
- long double totalpower=0;
- long double difpower=0;
- size_t n = min( buf1.size(),buf2.size() );
- for (size_t k=0;k<n;++k) {
- totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
- difpower += norm(buf1[k] - buf2[k]);
- }
- return sqrt(difpower/totalpower);
- }
-
-enum { StdVectorContainer, EigenVectorContainer };
-
-template<int Container, typename Scalar> struct VectorType;
-
-template<typename Scalar> struct VectorType<StdVectorContainer,Scalar>
-{
- typedef vector<Scalar> type;
-};
-
-template<typename Scalar> struct VectorType<EigenVectorContainer,Scalar>
-{
- typedef Matrix<Scalar,Dynamic,1> type;
-};
-
-template <int Container, typename T>
-void test_scalar_generic(int nfft)
-{
- typedef typename FFT<T>::Complex Complex;
- typedef typename FFT<T>::Scalar Scalar;
- typedef typename VectorType<Container,Scalar>::type ScalarVector;
- typedef typename VectorType<Container,Complex>::type ComplexVector;
-
- FFT<T> fft;
- ScalarVector inbuf(nfft);
- ComplexVector outbuf;
- for (int k=0;k<nfft;++k)
- inbuf[k]= (T)(rand()/(double)RAND_MAX - .5);
-
- // make sure it DOESN'T give the right full spectrum answer
- // if we've asked for half-spectrum
- fft.SetFlag(fft.HalfSpectrum );
- fft.fwd( outbuf,inbuf);
- VERIFY(outbuf.size() == (size_t)( (nfft>>1)+1) );
- VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
-
- fft.ClearFlag(fft.HalfSpectrum );
- fft.fwd( outbuf,inbuf);
- VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
-
- ScalarVector buf3;
- fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-
- // verify that the Unscaled flag takes effect
- ComplexVector buf4;
- fft.SetFlag(fft.Unscaled);
- fft.inv( buf4 , outbuf);
- for (int k=0;k<nfft;++k)
- buf4[k] *= T(1./nfft);
- VERIFY( dif_rmse(inbuf,buf4) < test_precision<T>() );// gross check
-
- // verify that ClearFlag works
- fft.ClearFlag(fft.Unscaled);
- fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-}
-
-template <typename T>
-void test_scalar(int nfft)
-{
- test_scalar_generic<StdVectorContainer,T>(nfft);
- test_scalar_generic<EigenVectorContainer,T>(nfft);
-}
-
-template <int Container, typename T>
-void test_complex_generic(int nfft)
-{
- typedef typename FFT<T>::Complex Complex;
- typedef typename VectorType<Container,Complex>::type ComplexVector;
-
- FFT<T> fft;
-
- ComplexVector inbuf(nfft);
- ComplexVector outbuf;
- ComplexVector buf3;
- for (int k=0;k<nfft;++k)
- inbuf[k]= Complex( (T)(rand()/(double)RAND_MAX - .5), (T)(rand()/(double)RAND_MAX - .5) );
- fft.fwd( outbuf , inbuf);
-
- VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
-
- fft.inv( buf3 , outbuf);
-
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-
- // verify that the Unscaled flag takes effect
- ComplexVector buf4;
- fft.SetFlag(fft.Unscaled);
- fft.inv( buf4 , outbuf);
- for (int k=0;k<nfft;++k)
- buf4[k] *= T(1./nfft);
- VERIFY( dif_rmse(inbuf,buf4) < test_precision<T>() );// gross check
-
- // verify that ClearFlag works
- fft.ClearFlag(fft.Unscaled);
- fft.inv( buf3 , outbuf);
- VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
-}
-
-template <typename T>
-void test_complex(int nfft)
-{
- test_complex_generic<StdVectorContainer,T>(nfft);
- test_complex_generic<EigenVectorContainer,T>(nfft);
-}
-
-void test_FFT()
-{
-
- CALL_SUBTEST( test_complex<float>(32) );
- CALL_SUBTEST( test_complex<double>(32) );
- CALL_SUBTEST( test_complex<long double>(32) );
-
- CALL_SUBTEST( test_complex<float>(256) );
- CALL_SUBTEST( test_complex<double>(256) );
- CALL_SUBTEST( test_complex<long double>(256) );
-
- CALL_SUBTEST( test_complex<float>(3*8) );
- CALL_SUBTEST( test_complex<double>(3*8) );
- CALL_SUBTEST( test_complex<long double>(3*8) );
-
- CALL_SUBTEST( test_complex<float>(5*32) );
- CALL_SUBTEST( test_complex<double>(5*32) );
- CALL_SUBTEST( test_complex<long double>(5*32) );
-
- CALL_SUBTEST( test_complex<float>(2*3*4) );
- CALL_SUBTEST( test_complex<double>(2*3*4) );
- CALL_SUBTEST( test_complex<long double>(2*3*4) );
-
- CALL_SUBTEST( test_complex<float>(2*3*4*5) );
- CALL_SUBTEST( test_complex<double>(2*3*4*5) );
- CALL_SUBTEST( test_complex<long double>(2*3*4*5) );
-
- CALL_SUBTEST( test_complex<float>(2*3*4*5*7) );
- CALL_SUBTEST( test_complex<double>(2*3*4*5*7) );
- CALL_SUBTEST( test_complex<long double>(2*3*4*5*7) );
-
-
-
- CALL_SUBTEST( test_scalar<float>(32) );
- CALL_SUBTEST( test_scalar<double>(32) );
- CALL_SUBTEST( test_scalar<long double>(32) );
-
- CALL_SUBTEST( test_scalar<float>(45) );
- CALL_SUBTEST( test_scalar<double>(45) );
- CALL_SUBTEST( test_scalar<long double>(45) );
-
- CALL_SUBTEST( test_scalar<float>(50) );
- CALL_SUBTEST( test_scalar<double>(50) );
- CALL_SUBTEST( test_scalar<long double>(50) );
-
- CALL_SUBTEST( test_scalar<float>(256) );
- CALL_SUBTEST( test_scalar<double>(256) );
- CALL_SUBTEST( test_scalar<long double>(256) );
-
- CALL_SUBTEST( test_scalar<float>(2*3*4*5*7) );
- CALL_SUBTEST( test_scalar<double>(2*3*4*5*7) );
- CALL_SUBTEST( test_scalar<long double>(2*3*4*5*7) );
-}
-#else
#define test_FFTW test_FFT
#include "FFTW.cpp"
-#endif
diff --git a/unsupported/test/FFTW.cpp b/unsupported/test/FFTW.cpp
index 94de53e36..838ddb238 100644
--- a/unsupported/test/FFTW.cpp
+++ b/unsupported/test/FFTW.cpp
@@ -23,7 +23,6 @@
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#include "main.h"
-#include <iostream>
#include <unsupported/Eigen/FFT>
template <typename T>
@@ -107,8 +106,6 @@ void test_scalar_generic(int nfft)
for (int k=0;k<nfft;++k)
tbuf[k]= (T)( rand()/(double)RAND_MAX - .5);
- cout << "tbuf=["; for (size_t i=0;i<(size_t) tbuf.size();++i) {cout << tbuf[i] << " ";} cout << "];\n";
-
// make sure it DOESN'T give the right full spectrum answer
// if we've asked for half-spectrum
fft.SetFlag(fft.HalfSpectrum );
@@ -125,9 +122,7 @@ void test_scalar_generic(int nfft)
return; // odd FFTs get the wrong size inverse FFT
ScalarVector tbuf2;
- cout << "freqBuf=["; for (size_t i=0;i<(size_t) freqBuf.size();++i) {cout << freqBuf[i] << " ";} cout << "];\n";
fft.inv( tbuf2 , freqBuf);
- cout << "tbuf2=["; for (size_t i=0;i<(size_t) tbuf2.size();++i) {cout << tbuf2[i] << " ";} cout << "];\n";
VERIFY( dif_rmse(tbuf,tbuf2) < test_precision<T>() );// gross check
@@ -135,9 +130,7 @@ void test_scalar_generic(int nfft)
ScalarVector tbuf3;
fft.SetFlag(fft.Unscaled);
- cout << "freqBuf=["; for (size_t i=0;i<(size_t) freqBuf.size();++i) {cout << freqBuf[i] << " ";} cout << "];\n";
fft.inv( tbuf3 , freqBuf);
- cout << "tbuf3=["; for (size_t i=0;i<(size_t) tbuf3.size();++i) {cout << tbuf3[i] << " ";} cout << "];\n";
for (int k=0;k<nfft;++k)
tbuf3[k] *= T(1./nfft);
@@ -146,8 +139,6 @@ void test_scalar_generic(int nfft)
//for (size_t i=0;i<(size_t) tbuf.size();++i)
// cout << "freqBuf=" << freqBuf[i] << " in2=" << tbuf3[i] << " - in=" << tbuf[i] << " => " << (tbuf3[i] - tbuf[i] ) << endl;
- cout << "dif_rmse = " << dif_rmse(tbuf,tbuf3) << endl;
- cout << "test_precision = " << test_precision<T>() << endl;
VERIFY( dif_rmse(tbuf,tbuf3) < test_precision<T>() );// gross check
// verify that ClearFlag works
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
index ae587f016..1313726a1 100644
--- a/unsupported/test/NonLinearOptimization.cpp
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -216,7 +216,7 @@ void testLmder()
// check covariance
covfac = fnorm*fnorm/(m-n);
- ei_covar(lm.fjac, lm.ipvt); // TODO : move this as a function of lm
+ ei_covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
MatrixXd cov_ref(n,n);
cov_ref <<
@@ -605,7 +605,7 @@ void testLmdif()
// check covariance
covfac = fnorm*fnorm/(m-n);
- ei_covar(lm.fjac, lm.ipvt);
+ ei_covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
MatrixXd cov_ref(n,n);
cov_ref <<
@@ -692,8 +692,8 @@ void testNistChwirut2(void)
x<< 0.15, 0.008, 0.010;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = 1.E6*epsilon<double>();
- lm.parameters.xtol = 1.E6*epsilon<double>();
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1010,7 +1010,7 @@ void testNistLanczos1(void)
VERIFY( 79 == lm.nfev);
VERIFY( 72 == lm.njev);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.429604433690E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
@@ -1031,7 +1031,7 @@ void testNistLanczos1(void)
VERIFY( 9 == lm.nfev);
VERIFY( 8 == lm.njev);
// check norm^2
- VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.43049947737308E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
// check x
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
@@ -1170,9 +1170,9 @@ void testNistMGH10(void)
info = lm.minimize(x);
// check return value
- VERIFY( 2 == info);
- VERIFY( 285 == lm.nfev);
- VERIFY( 250 == lm.njev);
+ VERIFY( 2 == info);
+ VERIFY( 284 == lm.nfev);
+ VERIFY( 249 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
// check x
@@ -1188,7 +1188,7 @@ void testNistMGH10(void)
info = lm.minimize(x);
// check return value
- VERIFY( 2 == info);
+ VERIFY( 3 == info);
VERIFY( 126 == lm.nfev);
VERIFY( 116 == lm.njev);
// check norm^2
@@ -1243,8 +1243,8 @@ void testNistBoxBOD(void)
// do the computation
BoxBOD_functor functor;
LevenbergMarquardt<BoxBOD_functor> lm(functor);
- lm.parameters.ftol = 1.E6*epsilon<double>();
- lm.parameters.xtol = 1.E6*epsilon<double>();
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
lm.parameters.factor = 10.;
info = lm.minimize(x);
@@ -1264,14 +1264,14 @@ void testNistBoxBOD(void)
x<< 100., 0.75;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = epsilon<double>();
- lm.parameters.xtol = epsilon<double>();
+ lm.parameters.ftol = NumTraits<double>::epsilon();
+ lm.parameters.xtol = NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
- VERIFY( 1 == info);
- VERIFY( 15 == lm.nfev);
- VERIFY( 14 == lm.njev);
+ VERIFY( 1 == info);
+ VERIFY( 15 == lm.nfev);
+ VERIFY( 14 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
// check x
@@ -1325,15 +1325,15 @@ void testNistMGH17(void)
// do the computation
MGH17_functor functor;
LevenbergMarquardt<MGH17_functor> lm(functor);
- lm.parameters.ftol = epsilon<double>();
- lm.parameters.xtol = epsilon<double>();
+ lm.parameters.ftol = NumTraits<double>::epsilon();
+ lm.parameters.xtol = NumTraits<double>::epsilon();
lm.parameters.maxfev = 1000;
info = lm.minimize(x);
// check return value
- VERIFY( 1 == info);
- VERIFY( 599 == lm.nfev);
- VERIFY( 544 == lm.njev);
+ VERIFY( 2 == info);
+ VERIFY( 602 == lm.nfev);
+ VERIFY( 545 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
// check x
@@ -1418,16 +1418,16 @@ void testNistMGH09(void)
info = lm.minimize(x);
// check return value
- VERIFY( 1 == info);
- VERIFY( 503== lm.nfev);
- VERIFY( 385 == lm.njev);
+ VERIFY( 1 == info);
+ VERIFY( 490 == lm.nfev);
+ VERIFY( 376 == lm.njev);
// check norm^2
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
// check x
- VERIFY_IS_APPROX(x[0], 0.19280624); // should be 1.9280693458E-01
- VERIFY_IS_APPROX(x[1], 0.19129774); // should be 1.9128232873E-01
- VERIFY_IS_APPROX(x[2], 0.12305940); // should be 1.2305650693E-01
- VERIFY_IS_APPROX(x[3], 0.13606946); // should be 1.3606233068E-01
+ VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01
+ VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01
+ VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01
+ VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01
/*
* Second try
@@ -1584,8 +1584,8 @@ void testNistThurber(void)
// do the computation
thurber_functor functor;
LevenbergMarquardt<thurber_functor> lm(functor);
- lm.parameters.ftol = 1.E4*epsilon<double>();
- lm.parameters.xtol = 1.E4*epsilon<double>();
+ lm.parameters.ftol = 1.E4*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E4*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1609,8 +1609,8 @@ void testNistThurber(void)
x<< 1300 ,1500 ,500 ,75 ,1 ,0.4 ,0.05 ;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = 1.E4*epsilon<double>();
- lm.parameters.xtol = 1.E4*epsilon<double>();
+ lm.parameters.ftol = 1.E4*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E4*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1676,8 +1676,8 @@ void testNistRat43(void)
// do the computation
rat43_functor functor;
LevenbergMarquardt<rat43_functor> lm(functor);
- lm.parameters.ftol = 1.E6*epsilon<double>();
- lm.parameters.xtol = 1.E6*epsilon<double>();
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1698,8 +1698,8 @@ void testNistRat43(void)
x<< 700., 5., 0.75, 1.3;
// do the computation
lm.resetParameters();
- lm.parameters.ftol = 1.E5*epsilon<double>();
- lm.parameters.xtol = 1.E5*epsilon<double>();
+ lm.parameters.ftol = 1.E5*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E5*NumTraits<double>::epsilon();
info = lm.minimize(x);
// check return value
@@ -1833,7 +1833,6 @@ void test_NonLinearOptimization()
/*
* Can be useful for debugging...
- printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev);
printf("info, nfev : %d, %d\n", info, lm.nfev);
printf("info, nfev, njev : %d, %d, %d\n", info, solver.nfev, solver.njev);
printf("info, nfev : %d, %d\n", info, solver.nfev);
@@ -1843,5 +1842,14 @@ void test_NonLinearOptimization()
printf("x[3] : %.32g\n", x[3]);
printf("fvec.blueNorm() : %.32g\n", solver.fvec.blueNorm());
printf("fvec.blueNorm() : %.32g\n", lm.fvec.blueNorm());
+
+ printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev);
+ printf("fvec.squaredNorm() : %.13g\n", lm.fvec.squaredNorm());
+ std::cout << x << std::endl;
+ std::cout.precision(9);
+ std::cout << x[0] << std::endl;
+ std::cout << x[1] << std::endl;
+ std::cout << x[2] << std::endl;
+ std::cout << x[3] << std::endl;
*/
diff --git a/unsupported/test/matrix_exponential.cpp b/unsupported/test/matrix_exponential.cpp
index a5b40adde..6150439c5 100644
--- a/unsupported/test/matrix_exponential.cpp
+++ b/unsupported/test/matrix_exponential.cpp
@@ -61,7 +61,7 @@ void test2dRotation(double tol)
std::cout << "test2dRotation: i = " << i << " error funm = " << relerr(C, B);
VERIFY(C.isApprox(B, static_cast<T>(tol)));
- ei_matrix_exponential(angle*A, &C);
+ C = ei_matrix_exponential(angle*A);
std::cout << " error expm = " << relerr(C, B) << "\n";
VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
@@ -86,7 +86,7 @@ void test2dHyperbolicRotation(double tol)
std::cout << "test2dHyperbolicRotation: i = " << i << " error funm = " << relerr(C, B);
VERIFY(C.isApprox(B, static_cast<T>(tol)));
- ei_matrix_exponential(A, &C);
+ C = ei_matrix_exponential(A);
std::cout << " error expm = " << relerr(C, B) << "\n";
VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
@@ -110,7 +110,7 @@ void testPascal(double tol)
std::cout << "testPascal: size = " << size << " error funm = " << relerr(C, B);
VERIFY(C.isApprox(B, static_cast<T>(tol)));
- ei_matrix_exponential(A, &C);
+ C = ei_matrix_exponential(A);
std::cout << " error expm = " << relerr(C, B) << "\n";
VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
@@ -137,10 +137,9 @@ void randomTest(const MatrixType& m, double tol)
std::cout << "randomTest: error funm = " << relerr(identity, m2 * m3);
VERIFY(identity.isApprox(m2 * m3, static_cast<RealScalar>(tol)));
- ei_matrix_exponential(m1, &m2);
- ei_matrix_exponential(-m1, &m3);
- std::cout << " error expm = " << relerr(identity, m2 * m3) << "\n";
- VERIFY(identity.isApprox(m2 * m3, static_cast<RealScalar>(tol)));
+ m2 = ei_matrix_exponential(m1) * ei_matrix_exponential(-m1);
+ std::cout << " error expm = " << relerr(identity, m2) << "\n";
+ VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
}
}
diff --git a/unsupported/test/matrix_function.cpp b/unsupported/test/matrix_function.cpp
index 0eb30eecb..4ff6d7f1e 100644
--- a/unsupported/test/matrix_function.cpp
+++ b/unsupported/test/matrix_function.cpp
@@ -25,44 +25,100 @@
#include "main.h"
#include <unsupported/Eigen/MatrixFunctions>
+// Returns a matrix with eigenvalues clustered around 0, 1 and 2.
template<typename MatrixType>
-void testMatrixExponential(const MatrixType& m)
+MatrixType randomMatrixWithRealEivals(const int size)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ MatrixType diag = MatrixType::Zero(size, size);
+ for (int i = 0; i < size; ++i) {
+ diag(i, i) = Scalar(RealScalar(ei_random<int>(0,2)))
+ + ei_random<Scalar>() * Scalar(RealScalar(0.01));
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ return A.inverse() * diag * A;
+}
+
+template <typename MatrixType, int IsComplex = NumTraits<typename ei_traits<MatrixType>::Scalar>::IsComplex>
+struct randomMatrixWithImagEivals
+{
+ // Returns a matrix with eigenvalues clustered around 0 and +/- i.
+ static MatrixType run(const int size);
+};
+
+// Partial specialization for real matrices
+template<typename MatrixType>
+struct randomMatrixWithImagEivals<MatrixType, 0>
+{
+ static MatrixType run(const int size)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ MatrixType diag = MatrixType::Zero(size, size);
+ int i = 0;
+ while (i < size) {
+ int randomInt = ei_random<int>(-1, 1);
+ if (randomInt == 0 || i == size-1) {
+ diag(i, i) = ei_random<Scalar>() * Scalar(0.01);
+ ++i;
+ } else {
+ Scalar alpha = Scalar(randomInt) + ei_random<Scalar>() * Scalar(0.01);
+ diag(i, i+1) = alpha;
+ diag(i+1, i) = -alpha;
+ i += 2;
+ }
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ return A.inverse() * diag * A;
+ }
+};
+
+// Partial specialization for complex matrices
+template<typename MatrixType>
+struct randomMatrixWithImagEivals<MatrixType, 1>
+{
+ static MatrixType run(const int size)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ const Scalar imagUnit(0, 1);
+ MatrixType diag = MatrixType::Zero(size, size);
+ for (int i = 0; i < size; ++i) {
+ diag(i, i) = Scalar(RealScalar(ei_random<int>(-1, 1))) * imagUnit
+ + ei_random<Scalar>() * Scalar(RealScalar(0.01));
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ return A.inverse() * diag * A;
+ }
+};
+
+template<typename MatrixType>
+void testMatrixExponential(const MatrixType& A)
{
typedef typename ei_traits<MatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef std::complex<RealScalar> ComplexScalar;
- const int rows = m.rows();
- const int cols = m.cols();
-
for (int i = 0; i < g_repeat; i++) {
- MatrixType A = MatrixType::Random(rows, cols);
- MatrixType expA1, expA2;
- ei_matrix_exponential(A, &expA1);
- ei_matrix_function(A, StdStemFunctions<ComplexScalar>::exp, &expA2);
- VERIFY_IS_APPROX(expA1, expA2);
+ VERIFY_IS_APPROX(ei_matrix_exponential(A),
+ ei_matrix_function(A, StdStemFunctions<ComplexScalar>::exp));
}
}
template<typename MatrixType>
-void testHyperbolicFunctions(const MatrixType& m)
+void testHyperbolicFunctions(const MatrixType& A)
{
- const int rows = m.rows();
- const int cols = m.cols();
-
for (int i = 0; i < g_repeat; i++) {
- MatrixType A = MatrixType::Random(rows, cols);
- MatrixType sinhA, coshA, expA;
- ei_matrix_sinh(A, &sinhA);
- ei_matrix_cosh(A, &coshA);
- ei_matrix_exponential(A, &expA);
+ MatrixType sinhA = ei_matrix_sinh(A);
+ MatrixType coshA = ei_matrix_cosh(A);
+ MatrixType expA = ei_matrix_exponential(A);
VERIFY_IS_APPROX(sinhA, (expA - expA.inverse())/2);
VERIFY_IS_APPROX(coshA, (expA + expA.inverse())/2);
}
}
template<typename MatrixType>
-void testGonioFunctions(const MatrixType& m)
+void testGonioFunctions(const MatrixType& A)
{
typedef ei_traits<MatrixType> Traits;
typedef typename Traits::Scalar Scalar;
@@ -71,36 +127,44 @@ void testGonioFunctions(const MatrixType& m)
typedef Matrix<ComplexScalar, Traits::RowsAtCompileTime,
Traits::ColsAtCompileTime, MatrixType::Options> ComplexMatrix;
- const int rows = m.rows();
- const int cols = m.cols();
ComplexScalar imagUnit(0,1);
ComplexScalar two(2,0);
for (int i = 0; i < g_repeat; i++) {
- MatrixType A = MatrixType::Random(rows, cols);
ComplexMatrix Ac = A.template cast<ComplexScalar>();
- ComplexMatrix exp_iA;
- ei_matrix_exponential(imagUnit * Ac, &exp_iA);
+ ComplexMatrix exp_iA = ei_matrix_exponential(imagUnit * Ac);
- MatrixType sinA;
- ei_matrix_sin(A, &sinA);
+ MatrixType sinA = ei_matrix_sin(A);
ComplexMatrix sinAc = sinA.template cast<ComplexScalar>();
VERIFY_IS_APPROX(sinAc, (exp_iA - exp_iA.inverse()) / (two*imagUnit));
- MatrixType cosA;
- ei_matrix_cos(A, &cosA);
+ MatrixType cosA = ei_matrix_cos(A);
ComplexMatrix cosAc = cosA.template cast<ComplexScalar>();
VERIFY_IS_APPROX(cosAc, (exp_iA + exp_iA.inverse()) / 2);
}
}
template<typename MatrixType>
+void testMatrix(const MatrixType& A)
+{
+ testMatrixExponential(A);
+ testHyperbolicFunctions(A);
+ testGonioFunctions(A);
+}
+
+template<typename MatrixType>
void testMatrixType(const MatrixType& m)
{
- testMatrixExponential(m);
- testHyperbolicFunctions(m);
- testGonioFunctions(m);
+ // Matrices with clustered eigenvalue lead to different code paths
+ // in MatrixFunction.h and are thus useful for testing.
+
+ const int size = m.rows();
+ for (int i = 0; i < g_repeat; i++) {
+ testMatrix(MatrixType::Random(size, size).eval());
+ testMatrix(randomMatrixWithRealEivals<MatrixType>(size));
+ testMatrix(randomMatrixWithImagEivals<MatrixType>::run(size));
+ }
}
void test_matrix_function()