aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported
diff options
context:
space:
mode:
authorGravatar Jitse Niesen <jitse@maths.leeds.ac.uk>2013-07-21 20:50:15 +0100
committerGravatar Jitse Niesen <jitse@maths.leeds.ac.uk>2013-07-21 20:50:15 +0100
commit5879937f58efb9337b82d288ae8dd3513b918791 (patch)
tree806bff387d4d02deb72c177daddf5d17e3473b4b /unsupported
parent660b905e129c92fd0e8271d2df06d11347f4f32f (diff)
parent01190b3544cd0a674be6475185d5dd8e4b7890c5 (diff)
Merge in jdh8's branch.
* Enable singular matrix power and complex exponents. * Eliminate unnecessary copying for sparse Kronecker product.
Diffstat (limited to 'unsupported')
-rw-r--r--unsupported/Eigen/MatrixFunctions55
-rw-r--r--unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h116
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h65
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h12
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h6
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h10
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h380
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h20
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/StemFunction.h2
-rw-r--r--unsupported/test/kronecker_product.cpp21
-rw-r--r--unsupported/test/matrix_functions.h41
-rw-r--r--unsupported/test/matrix_power.cpp151
12 files changed, 630 insertions, 249 deletions
diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions
index 0991817d5..0b12aaffb 100644
--- a/unsupported/Eigen/MatrixFunctions
+++ b/unsupported/Eigen/MatrixFunctions
@@ -13,8 +13,6 @@
#include <cfloat>
#include <list>
-#include <functional>
-#include <iterator>
#include <Eigen/Core>
#include <Eigen/LU>
@@ -230,22 +228,65 @@ const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(RealScalar p) con
\endcode
\param[in] M base of the matrix power, should be a square matrix.
-\param[in] p exponent of the matrix power, should be real.
+\param[in] p exponent of the matrix power.
The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$,
where exp denotes the matrix exponential, and log denotes the matrix
logarithm.
-The matrix \f$ M \f$ should meet the conditions to be an argument of
-matrix logarithm. If \p p is not of the real scalar type of \p M, it
-is casted into the real scalar type of \p M.
+If \p p is complex, the scalar type of \p M should be the type of \p
+p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$.
+Therefore, the matrix \f$ M \f$ should meet the conditions to be an
+argument of matrix logarithm.
-This function computes the matrix power using the Schur-Pad&eacute;
+If \p p is real, it is casted into the real scalar type of \p M. Then
+this function computes the matrix power using the Schur-Pad&eacute;
algorithm as implemented by class MatrixPower. The exponent is split
into integral part and fractional part, where the fractional part is
in the interval \f$ (-1, 1) \f$. The main diagonal and the first
super-diagonal is directly computed.
+If \p M is singular with a semisimple zero eigenvalue and \p p is
+positive, the Schur factor \f$ T \f$ is reordered with Givens
+rotations, i.e.
+
+\f[ T = \left[ \begin{array}{cc}
+ T_1 & T_2 \\
+ 0 & 0
+ \end{array} \right] \f]
+
+where \f$ T_1 \f$ is invertible. Then \f$ T^p \f$ is given by
+
+\f[ T^p = \left[ \begin{array}{cc}
+ T_1^p & T_1^{-1} T_1^p T_2 \\
+ 0 & 0
+ \end{array}. \right] \f]
+
+\warning Fractional power of a matrix with a non-semisimple zero
+eigenvalue is not well-defined. We introduce an assertion failure
+against inaccurate result, e.g. \code
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+int main()
+{
+ Eigen::Matrix4d A;
+ A << 0, 0, 2, 3,
+ 0, 0, 4, 5,
+ 0, 0, 6, 7,
+ 0, 0, 8, 9;
+ std::cout << A.pow(0.37) << std::endl;
+
+ // The 1 makes eigenvalue 0 non-semisimple.
+ A.coeffRef(0, 1) = 1;
+
+ // This fails if EIGEN_NO_DEBUG is undefined.
+ std::cout << A.pow(0.37) << std::endl;
+
+ return 0;
+}
+\endcode
+
Details of the algorithm can be found in: Nicholas J. Higham and
Lijing Lin, "A Schur-Pad&eacute; algorithm for fractional powers of a
matrix," <em>SIAM J. %Matrix Anal. Applic.</em>,
diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
index 532896c3b..80be9fd7a 100644
--- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
+++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
@@ -12,58 +12,94 @@
#ifndef KRONECKER_TENSOR_PRODUCT_H
#define KRONECKER_TENSOR_PRODUCT_H
-namespace Eigen {
-
-template<typename Scalar, int Options, typename Index> class SparseMatrix;
+namespace Eigen {
/*!
- * \brief Kronecker tensor product helper class for dense matrices
+ * \ingroup KroneckerProduct_Module
*
- * This class is the return value of kroneckerProduct(MatrixBase,
- * MatrixBase). Use the function rather than construct this class
- * directly to avoid specifying template prarameters.
+ * \brief The base class of dense and sparse Kronecker product.
*
- * \tparam Lhs Type of the left-hand side, a matrix expression.
- * \tparam Rhs Type of the rignt-hand side, a matrix expression.
+ * \tparam Derived is the derived type.
*/
-template<typename Lhs, typename Rhs>
-class KroneckerProduct : public ReturnByValue<KroneckerProduct<Lhs,Rhs> >
+template<typename Derived>
+class KroneckerProductBase : public ReturnByValue<Derived>
{
private:
- typedef ReturnByValue<KroneckerProduct> Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::Index Index;
+ typedef typename internal::traits<Derived> Traits;
+ typedef typename Traits::Lhs Lhs;
+ typedef typename Traits::Rhs Rhs;
+ typedef typename Traits::Scalar Scalar;
+
+ protected:
+ typedef typename Traits::Index Index;
public:
/*! \brief Constructor. */
- KroneckerProduct(const Lhs& A, const Rhs& B)
+ KroneckerProductBase(const Lhs& A, const Rhs& B)
: m_A(A), m_B(B)
{}
- /*! \brief Evaluate the Kronecker tensor product. */
- template<typename Dest> void evalTo(Dest& dst) const;
-
inline Index rows() const { return m_A.rows() * m_B.rows(); }
inline Index cols() const { return m_A.cols() * m_B.cols(); }
+ /*!
+ * This overrides ReturnByValue::coeff because this function is
+ * efficient enough.
+ */
Scalar coeff(Index row, Index col) const
{
return m_A.coeff(row / m_B.rows(), col / m_B.cols()) *
m_B.coeff(row % m_B.rows(), col % m_B.cols());
}
+ /*!
+ * This overrides ReturnByValue::coeff because this function is
+ * efficient enough.
+ */
Scalar coeff(Index i) const
{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(KroneckerProduct);
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size());
}
- private:
+ protected:
typename Lhs::Nested m_A;
typename Rhs::Nested m_B;
};
/*!
+ * \ingroup KroneckerProduct_Module
+ *
+ * \brief Kronecker tensor product helper class for dense matrices
+ *
+ * This class is the return value of kroneckerProduct(MatrixBase,
+ * MatrixBase). Use the function rather than construct this class
+ * directly to avoid specifying template prarameters.
+ *
+ * \tparam Lhs Type of the left-hand side, a matrix expression.
+ * \tparam Rhs Type of the rignt-hand side, a matrix expression.
+ */
+template<typename Lhs, typename Rhs>
+class KroneckerProduct : public KroneckerProductBase<KroneckerProduct<Lhs,Rhs> >
+{
+ private:
+ typedef KroneckerProductBase<KroneckerProduct> Base;
+ using Base::m_A;
+ using Base::m_B;
+
+ public:
+ /*! \brief Constructor. */
+ KroneckerProduct(const Lhs& A, const Rhs& B)
+ : Base(A, B)
+ {}
+
+ /*! \brief Evaluate the Kronecker tensor product. */
+ template<typename Dest> void evalTo(Dest& dst) const;
+};
+
+/*!
+ * \ingroup KroneckerProduct_Module
+ *
* \brief Kronecker tensor product helper class for sparse matrices
*
* If at least one of the operands is a sparse matrix expression,
@@ -77,40 +113,28 @@ class KroneckerProduct : public ReturnByValue<KroneckerProduct<Lhs,Rhs> >
* \tparam Rhs Type of the rignt-hand side, a matrix expression.
*/
template<typename Lhs, typename Rhs>
-class KroneckerProductSparse : public EigenBase<KroneckerProductSparse<Lhs,Rhs> >
+class KroneckerProductSparse : public KroneckerProductBase<KroneckerProductSparse<Lhs,Rhs> >
{
private:
- typedef typename internal::traits<KroneckerProductSparse>::Index Index;
+ typedef KroneckerProductBase<KroneckerProductSparse> Base;
+ using Base::m_A;
+ using Base::m_B;
public:
/*! \brief Constructor. */
KroneckerProductSparse(const Lhs& A, const Rhs& B)
- : m_A(A), m_B(B)
+ : Base(A, B)
{}
/*! \brief Evaluate the Kronecker tensor product. */
template<typename Dest> void evalTo(Dest& dst) const;
-
- inline Index rows() const { return m_A.rows() * m_B.rows(); }
- inline Index cols() const { return m_A.cols() * m_B.cols(); }
-
- template<typename Scalar, int Options, typename Index>
- operator SparseMatrix<Scalar, Options, Index>()
- {
- SparseMatrix<Scalar, Options, Index> result;
- evalTo(result.derived());
- return result;
- }
-
- private:
- typename Lhs::Nested m_A;
- typename Rhs::Nested m_B;
};
template<typename Lhs, typename Rhs>
template<typename Dest>
void KroneckerProduct<Lhs,Rhs>::evalTo(Dest& dst) const
{
+ typedef typename Base::Index Index;
const int BlockRows = Rhs::RowsAtCompileTime,
BlockCols = Rhs::ColsAtCompileTime;
const Index Br = m_B.rows(),
@@ -124,9 +148,10 @@ template<typename Lhs, typename Rhs>
template<typename Dest>
void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
{
+ typedef typename Base::Index Index;
const Index Br = m_B.rows(),
Bc = m_B.cols();
- dst.resize(rows(),cols());
+ dst.resize(this->rows(), this->cols());
dst.resizeNonZeros(0);
dst.reserve(m_A.nonZeros() * m_B.nonZeros());
@@ -155,6 +180,7 @@ struct traits<KroneckerProduct<_Lhs,_Rhs> >
typedef typename remove_all<_Lhs>::type Lhs;
typedef typename remove_all<_Rhs>::type Rhs;
typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename promote_index_type<typename Lhs::Index, typename Rhs::Index>::type Index;
enum {
Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
@@ -193,6 +219,8 @@ struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
| EvalBeforeNestingBit | EvalBeforeAssigningBit,
CoeffReadCost = Dynamic
};
+
+ typedef SparseMatrix<Scalar> ReturnType;
};
} // end namespace internal
@@ -228,6 +256,16 @@ KroneckerProduct<A,B> kroneckerProduct(const MatrixBase<A>& a, const MatrixBase<
* Computes Kronecker tensor product of two matrices, at least one of
* which is sparse
*
+ * \warning If you want to replace a matrix by its Kronecker product
+ * with some matrix, do \b NOT do this:
+ * \code
+ * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect
+ * \endcode
+ * instead, use eval() to work around this:
+ * \code
+ * A = kroneckerProduct(A,B).eval();
+ * \endcode
+ *
* \param a Dense/sparse matrix a
* \param b Dense/sparse matrix b
* \return Kronecker tensor product of a and b, stored in a sparse
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index 6825a7882..30b36a179 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -21,8 +21,8 @@ namespace Eigen {
* expected to be an instantiation of the Matrix class template.
*/
template <typename MatrixType>
-class MatrixExponential {
-
+class MatrixExponential : internal::noncopyable
+{
public:
/** \brief Constructor.
@@ -32,7 +32,7 @@ class MatrixExponential {
*
* \param[in] M matrix whose exponential is to be computed.
*/
- MatrixExponential(const MatrixType &M);
+ explicit MatrixExponential(const MatrixType &M);
/** \brief Computes the matrix exponential.
*
@@ -43,10 +43,6 @@ class MatrixExponential {
private:
- // Prevent copying
- MatrixExponential(const MatrixExponential&);
- MatrixExponential& operator=(const MatrixExponential&);
-
/** \brief Compute the (3,3)-Pad&eacute; approximant to the exponential.
*
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Pad&eacute;
@@ -130,6 +126,7 @@ class MatrixExponential {
*/
void computeUV(long double);
+ struct ScalingOp;
typedef typename internal::traits<MatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename std::complex<RealScalar> ComplexScalar;
@@ -159,6 +156,43 @@ class MatrixExponential {
RealScalar m_l1norm;
};
+/** \brief Scaling operator.
+ *
+ * This struct is used by CwiseUnaryOp to scale a matrix by \f$ 2^{-s} \f$.
+ */
+template <typename MatrixType>
+struct MatrixExponential<MatrixType>::ScalingOp
+{
+ /** \brief Constructor.
+ *
+ * \param[in] squarings The integer \f$ s \f$ in this document.
+ */
+ ScalingOp(int squarings) : m_squarings(squarings) { }
+
+ /** \brief Scale a matrix coefficient.
+ *
+ * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$.
+ */
+ inline const RealScalar operator() (const RealScalar& x) const
+ {
+ using std::ldexp;
+ return ldexp(x, -m_squarings);
+ }
+
+ /** \brief Scale a matrix coefficient.
+ *
+ * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$.
+ */
+ inline const ComplexScalar operator() (const ComplexScalar& x) const
+ {
+ using std::ldexp;
+ return ComplexScalar(ldexp(x.real(), -m_squarings), ldexp(x.imag(), -m_squarings));
+ }
+
+ private:
+ int m_squarings;
+};
+
template <typename MatrixType>
MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M) :
m_M(M),
@@ -284,7 +318,6 @@ template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(float)
{
using std::frexp;
- using std::pow;
if (m_l1norm < 4.258730016922831e-001) {
pade3(m_M);
} else if (m_l1norm < 1.880152677804762e+000) {
@@ -293,7 +326,7 @@ void MatrixExponential<MatrixType>::computeUV(float)
const float maxnorm = 3.925724783138660f;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ MatrixType A = CwiseUnaryOp<ScalingOp, const MatrixType>(m_M, m_squarings);
pade7(A);
}
}
@@ -302,7 +335,6 @@ template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(double)
{
using std::frexp;
- using std::pow;
if (m_l1norm < 1.495585217958292e-002) {
pade3(m_M);
} else if (m_l1norm < 2.539398330063230e-001) {
@@ -315,7 +347,7 @@ void MatrixExponential<MatrixType>::computeUV(double)
const double maxnorm = 5.371920351148152;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ MatrixType A = CwiseUnaryOp<ScalingOp, const MatrixType>(m_M, m_squarings);
pade13(A);
}
}
@@ -324,7 +356,6 @@ template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(long double)
{
using std::frexp;
- using std::pow;
#if LDBL_MANT_DIG == 53 // double precision
computeUV(double());
#elif LDBL_MANT_DIG <= 64 // extended precision
@@ -340,7 +371,7 @@ void MatrixExponential<MatrixType>::computeUV(long double)
const long double maxnorm = 4.0246098906697353063L;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ MatrixType A = CwiseUnaryOp<ScalingOp, const MatrixType>(m_M, m_squarings);
pade13(A);
}
#elif LDBL_MANT_DIG <= 106 // double-double
@@ -358,7 +389,7 @@ void MatrixExponential<MatrixType>::computeUV(long double)
const long double maxnorm = 3.2579440895405400856599663723517L;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ MatrixType A = CwiseUnaryOp<ScalingOp, const MatrixType>(m_M, m_squarings);
pade17(A);
}
#elif LDBL_MANT_DIG <= 112 // quadruple precison
@@ -376,7 +407,7 @@ void MatrixExponential<MatrixType>::computeUV(long double)
const long double maxnorm = 2.884233277829519311757165057717815L;
frexp(m_l1norm / maxnorm, &m_squarings);
if (m_squarings < 0) m_squarings = 0;
- MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ MatrixType A = CwiseUnaryOp<ScalingOp, const MatrixType>(m_M, m_squarings);
pade17(A);
}
#else
@@ -407,7 +438,7 @@ template<typename Derived> struct MatrixExponentialReturnValue
* \param[in] src %Matrix (expression) forming the argument of the
* matrix exponential.
*/
- MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
+ explicit MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
/** \brief Compute the matrix exponential.
*
@@ -427,8 +458,6 @@ template<typename Derived> struct MatrixExponentialReturnValue
protected:
const Derived& m_src;
- private:
- MatrixExponentialReturnValue& operator=(const MatrixExponentialReturnValue&);
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index 7d426640c..ad54d8ed0 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -34,7 +34,7 @@ namespace Eigen {
template <typename MatrixType,
typename AtomicType,
int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
-class MatrixFunction
+class MatrixFunction : internal::noncopyable
{
public:
@@ -65,7 +65,7 @@ class MatrixFunction
* \brief Partial specialization of MatrixFunction for real matrices
*/
template <typename MatrixType, typename AtomicType>
-class MatrixFunction<MatrixType, AtomicType, 0>
+class MatrixFunction<MatrixType, AtomicType, 0> : internal::noncopyable
{
private:
@@ -111,8 +111,6 @@ class MatrixFunction<MatrixType, AtomicType, 0>
private:
typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
-
- MatrixFunction& operator=(const MatrixFunction&);
};
@@ -120,7 +118,7 @@ class MatrixFunction<MatrixType, AtomicType, 0>
* \brief Partial specialization of MatrixFunction for complex matrices
*/
template <typename MatrixType, typename AtomicType>
-class MatrixFunction<MatrixType, AtomicType, 1>
+class MatrixFunction<MatrixType, AtomicType, 1> : internal::noncopyable
{
private:
@@ -176,8 +174,6 @@ class MatrixFunction<MatrixType, AtomicType, 1>
* separation constant is set to 0.1, a value taken from the
* paper by Davies and Higham. */
static const RealScalar separation() { return static_cast<RealScalar>(0.1); }
-
- MatrixFunction& operator=(const MatrixFunction&);
};
/** \brief Constructor.
@@ -531,8 +527,6 @@ template<typename Derived> class MatrixFunctionReturnValue
private:
typename internal::nested<Derived>::type m_A;
StemFunction *m_f;
-
- MatrixFunctionReturnValue& operator=(const MatrixFunctionReturnValue&);
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
index efe332c48..d6ff5f1ce 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
@@ -21,7 +21,7 @@ namespace Eigen {
* entries are close to each other.
*/
template <typename MatrixType>
-class MatrixFunctionAtomic
+class MatrixFunctionAtomic : internal::noncopyable
{
public:
@@ -44,10 +44,6 @@ class MatrixFunctionAtomic
private:
- // Prevent copying
- MatrixFunctionAtomic(const MatrixFunctionAtomic&);
- MatrixFunctionAtomic& operator=(const MatrixFunctionAtomic&);
-
void computeMu();
bool taylorConverged(Index s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P);
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
index c744fc05f..33cfadfb4 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -28,7 +28,7 @@ namespace Eigen {
* \sa class MatrixFunctionAtomic, MatrixBase::log()
*/
template <typename MatrixType>
-class MatrixLogarithmAtomic
+class MatrixLogarithmAtomic : internal::noncopyable
{
public:
@@ -71,10 +71,6 @@ private:
std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
std::numeric_limits<RealScalar>::digits<=106? 10: // double-double
11; // quadruple precision
-
- // Prevent copying
- MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
- MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
};
/** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */
@@ -429,7 +425,7 @@ public:
*
* \param[in] A %Matrix (expression) forming the argument of the matrix logarithm.
*/
- MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
+ explicit MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
/** \brief Compute the matrix logarithm.
*
@@ -458,8 +454,6 @@ public:
private:
typename internal::nested<Derived>::type m_A;
-
- MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
index c32437281..5548bd95c 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -14,16 +14,48 @@ namespace Eigen {
template<typename MatrixType> class MatrixPower;
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix power of some matrix.
+ *
+ * \tparam MatrixType type of the base, a matrix.
+ *
+ * This class holds the arguments to the matrix power until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixPower::operator() and related functions and most of the
+ * time this is the only way it is used.
+ */
+/* TODO This class is only used by MatrixPower, so it should be nested
+ * into MatrixPower, like MatrixPower::ReturnValue. However, my
+ * compiler complained about unused template parameter in the
+ * following declaration in namespace internal.
+ *
+ * template<typename MatrixType>
+ * struct traits<MatrixPower<MatrixType>::ReturnValue>;
+ */
template<typename MatrixType>
-class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval<MatrixType> >
+class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParenthesesReturnValue<MatrixType> >
{
public:
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
- MatrixPowerRetval(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] pow %MatrixPower storing the base.
+ * \param[in] p scalar, the exponent of the matrix power.
+ */
+ MatrixPowerParenthesesReturnValue(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
{ }
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[out] result
+ */
template<typename ResultType>
inline void evalTo(ResultType& res) const
{ m_pow.compute(res, m_p); }
@@ -34,11 +66,25 @@ class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval<MatrixType> >
private:
MatrixPower<MatrixType>& m_pow;
const RealScalar m_p;
- MatrixPowerRetval& operator=(const MatrixPowerRetval&);
};
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Class for computing matrix powers.
+ *
+ * \tparam MatrixType type of the base, expected to be an instantiation
+ * of the Matrix class template.
+ *
+ * This class is capable of computing triangular real/complex matrices
+ * raised to a power in the interval \f$ (-1, 1) \f$.
+ *
+ * \note Currently this class is only used by MatrixPower. One may
+ * insist that this be nested into MatrixPower. This class is here to
+ * faciliate future development of triangular matrix functions.
+ */
template<typename MatrixType>
-class MatrixPowerAtomic
+class MatrixPowerAtomic : internal::noncopyable
{
private:
enum {
@@ -49,14 +95,14 @@ class MatrixPowerAtomic
typedef typename MatrixType::RealScalar RealScalar;
typedef std::complex<RealScalar> ComplexScalar;
typedef typename MatrixType::Index Index;
- typedef Array<Scalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> ArrayType;
+ typedef Block<MatrixType,Dynamic,Dynamic> ResultType;
const MatrixType& m_A;
RealScalar m_p;
- void computePade(int degree, const MatrixType& IminusT, MatrixType& res) const;
- void compute2x2(MatrixType& res, RealScalar p) const;
- void computeBig(MatrixType& res) const;
+ void computePade(int degree, const MatrixType& IminusT, ResultType& res) const;
+ void compute2x2(ResultType& res, RealScalar p) const;
+ void computeBig(ResultType& res) const;
static int getPadeDegree(float normIminusT);
static int getPadeDegree(double normIminusT);
static int getPadeDegree(long double normIminusT);
@@ -64,24 +110,45 @@ class MatrixPowerAtomic
static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p);
public:
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] T the base of the matrix power.
+ * \param[in] p the exponent of the matrix power, should be in
+ * \f$ (-1, 1) \f$.
+ *
+ * The class stores a reference to T, so it should not be changed
+ * (or destroyed) before evaluation. Only the upper triangular
+ * part of T is read.
+ */
MatrixPowerAtomic(const MatrixType& T, RealScalar p);
- void compute(MatrixType& res) const;
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[out] res \f$ A^p \f$ where A and p are specified in the
+ * constructor.
+ */
+ void compute(ResultType& res) const;
};
template<typename MatrixType>
MatrixPowerAtomic<MatrixType>::MatrixPowerAtomic(const MatrixType& T, RealScalar p) :
m_A(T), m_p(p)
-{ eigen_assert(T.rows() == T.cols()); }
+{
+ eigen_assert(T.rows() == T.cols());
+ eigen_assert(p > -1 && p < 1);
+}
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::compute(MatrixType& res) const
+void MatrixPowerAtomic<MatrixType>::compute(ResultType& res) const
{
- res.resizeLike(m_A);
+ using std::pow;
switch (m_A.rows()) {
case 0:
break;
case 1:
- res(0,0) = std::pow(m_A(0,0), m_p);
+ res(0,0) = pow(m_A(0,0), m_p);
break;
case 2:
compute2x2(res, m_p);
@@ -92,25 +159,24 @@ void MatrixPowerAtomic<MatrixType>::compute(MatrixType& res) const
}
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, MatrixType& res) const
+void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, ResultType& res) const
{
- int i = degree<<1;
- res = (m_p-degree) / ((i-1)<<1) * IminusT;
+ int i = 2*degree;
+ res = (m_p-degree) / (2*i-2) * IminusT;
+
for (--i; i; --i) {
res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView<Upper>()
- .solve((i==1 ? -m_p : i&1 ? (-m_p-(i>>1))/(i<<1) : (m_p-(i>>1))/((i-1)<<1)) * IminusT).eval();
+ .solve((i==1 ? -m_p : i&1 ? (-m_p-i/2)/(2*i) : (m_p-i/2)/(2*i-2)) * IminusT).eval();
}
res += MatrixType::Identity(IminusT.rows(), IminusT.cols());
}
// This function assumes that res has the correct size (see bug 614)
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) const
+void MatrixPowerAtomic<MatrixType>::compute2x2(ResultType& res, RealScalar p) const
{
using std::abs;
using std::pow;
-
- ArrayType logTdiag = m_A.diagonal().array().log();
res.coeffRef(0,0) = pow(m_A.coeff(0,0), p);
for (Index i=1; i < m_A.cols(); ++i) {
@@ -126,8 +192,9 @@ void MatrixPowerAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) co
}
template<typename MatrixType>
-void MatrixPowerAtomic<MatrixType>::computeBig(MatrixType& res) const
+void MatrixPowerAtomic<MatrixType>::computeBig(ResultType& res) const
{
+ using std::ldexp;
const int digits = std::numeric_limits<RealScalar>::digits;
const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision
digits <= 53? 2.789358995219730e-1: // double precision
@@ -139,19 +206,6 @@ void MatrixPowerAtomic<MatrixType>::computeBig(MatrixType& res) const
int degree, degree2, numberOfSquareRoots = 0;
bool hasExtraSquareRoot = false;
- /* FIXME
- * For singular T, norm(I - T) >= 1 but maxNormForPade < 1, leads to infinite
- * loop. We should move 0 eigenvalues to bottom right corner. We need not
- * worry about tiny values (e.g. 1e-300) because they will reach 1 if
- * repetitively sqrt'ed.
- *
- * If the 0 eigenvalues are semisimple, they can form a 0 matrix at the
- * bottom right corner.
- *
- * [ T A ]^p [ T^p (T^-1 T^p A) ]
- * [ ] = [ ]
- * [ 0 0 ] [ 0 0 ]
- */
for (Index i=0; i < m_A.cols(); ++i)
eigen_assert(m_A(i,i) != RealScalar(0));
@@ -172,7 +226,7 @@ void MatrixPowerAtomic<MatrixType>::computeBig(MatrixType& res) const
computePade(degree, IminusT, res);
for (; numberOfSquareRoots; --numberOfSquareRoots) {
- compute2x2(res, std::ldexp(m_p, -numberOfSquareRoots));
+ compute2x2(res, ldexp(m_p, -numberOfSquareRoots));
res = res.template triangularView<Upper>() * res;
}
compute2x2(res, m_p);
@@ -237,19 +291,28 @@ template<typename MatrixType>
inline typename MatrixPowerAtomic<MatrixType>::ComplexScalar
MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p)
{
- ComplexScalar logCurr = std::log(curr);
- ComplexScalar logPrev = std::log(prev);
- int unwindingNumber = std::ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI));
+ using std::ceil;
+ using std::exp;
+ using std::log;
+ using std::sinh;
+
+ ComplexScalar logCurr = log(curr);
+ ComplexScalar logPrev = log(prev);
+ int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI));
ComplexScalar w = numext::atanh2(curr - prev, curr + prev) + ComplexScalar(0, M_PI*unwindingNumber);
- return RealScalar(2) * std::exp(RealScalar(0.5) * p * (logCurr + logPrev)) * std::sinh(p * w) / (curr - prev);
+ return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev);
}
template<typename MatrixType>
inline typename MatrixPowerAtomic<MatrixType>::RealScalar
MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p)
{
+ using std::exp;
+ using std::log;
+ using std::sinh;
+
RealScalar w = numext::atanh2(curr - prev, curr + prev);
- return 2 * std::exp(p * (std::log(curr) + std::log(prev)) / 2) * std::sinh(p * w) / (curr - prev);
+ return 2 * exp(p * (log(curr) + log(prev)) / 2) * sinh(p * w) / (curr - prev);
}
/**
@@ -272,15 +335,9 @@ MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev
* Output: \verbinclude MatrixPower_optimal.out
*/
template<typename MatrixType>
-class MatrixPower
+class MatrixPower : internal::noncopyable
{
private:
- enum {
- RowsAtCompileTime = MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
- };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
@@ -294,7 +351,11 @@ class MatrixPower
* The class stores a reference to A, so it should not be changed
* (or destroyed) before evaluation.
*/
- explicit MatrixPower(const MatrixType& A) : m_A(A), m_conditionNumber(0)
+ explicit MatrixPower(const MatrixType& A) :
+ m_A(A),
+ m_conditionNumber(0),
+ m_rank(A.cols()),
+ m_nulls(0)
{ eigen_assert(A.rows() == A.cols()); }
/**
@@ -304,8 +365,8 @@ class MatrixPower
* \return The expression \f$ A^p \f$, where A is specified in the
* constructor.
*/
- const MatrixPowerRetval<MatrixType> operator()(RealScalar p)
- { return MatrixPowerRetval<MatrixType>(*this, p); }
+ const MatrixPowerParenthesesReturnValue<MatrixType> operator()(RealScalar p)
+ { return MatrixPowerParenthesesReturnValue<MatrixType>(*this, p); }
/**
* \brief Compute the matrix power.
@@ -322,21 +383,54 @@ class MatrixPower
private:
typedef std::complex<RealScalar> ComplexScalar;
- typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, MatrixType::Options,
- MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrix;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, MatrixType::Options,
+ MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> ComplexMatrix;
+ /** \brief Reference to the base of matrix power. */
typename MatrixType::Nested m_A;
+
+ /** \brief Temporary storage. */
MatrixType m_tmp;
- ComplexMatrix m_T, m_U, m_fT;
+
+ /** \brief Store the result of Schur decomposition. */
+ ComplexMatrix m_T, m_U;
+
+ /** \brief Store fractional power of m_T. */
+ ComplexMatrix m_fT;
+
+ /**
+ * \brief Condition number of m_A.
+ *
+ * It is initialized as 0 to avoid performing unnecessary Schur
+ * decomposition, which is the bottleneck.
+ */
RealScalar m_conditionNumber;
- RealScalar modfAndInit(RealScalar, RealScalar*);
+ /** \brief Rank of m_A. */
+ Index m_rank;
+
+ /** \brief Rank deficiency of m_A. */
+ Index m_nulls;
+
+ /**
+ * \brief Split p into integral part and fractional part.
+ *
+ * \param[in] p The exponent.
+ * \param[out] p The fractional part ranging in \f$ (-1, 1) \f$.
+ * \param[out] intpart The integral part.
+ *
+ * Only if the fractional part is nonzero, it calls initialize().
+ */
+ void split(RealScalar& p, RealScalar& intpart);
+
+ /** \brief Perform Schur decomposition for fractional power. */
+ void initialize();
template<typename ResultType>
- void computeIntPower(ResultType&, RealScalar);
+ void computeIntPower(ResultType& res, RealScalar p);
template<typename ResultType>
- void computeFracPower(ResultType&, RealScalar);
+ void computeFracPower(ResultType& res, RealScalar p);
template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
static void revertSchur(
@@ -355,59 +449,102 @@ template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::compute(ResultType& res, RealScalar p)
{
+ using std::pow;
switch (cols()) {
case 0:
break;
case 1:
- res(0,0) = std::pow(m_A.coeff(0,0), p);
+ res(0,0) = pow(m_A.coeff(0,0), p);
break;
default:
- RealScalar intpart, x = modfAndInit(p, &intpart);
+ RealScalar intpart;
+ split(p, intpart);
+
+ res = MatrixType::Identity(rows(), cols());
computeIntPower(res, intpart);
- computeFracPower(res, x);
+ if (p) computeFracPower(res, p);
}
}
template<typename MatrixType>
-typename MatrixPower<MatrixType>::RealScalar
-MatrixPower<MatrixType>::modfAndInit(RealScalar x, RealScalar* intpart)
+void MatrixPower<MatrixType>::split(RealScalar& p, RealScalar& intpart)
{
- typedef Array<RealScalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> RealArray;
+ using std::floor;
+ using std::pow;
- *intpart = std::floor(x);
- RealScalar res = x - *intpart;
+ intpart = floor(p);
+ p -= intpart;
- if (!m_conditionNumber && res) {
- const ComplexSchur<MatrixType> schurOfA(m_A);
- m_T = schurOfA.matrixT();
- m_U = schurOfA.matrixU();
-
- const RealArray absTdiag = m_T.diagonal().array().abs();
- m_conditionNumber = absTdiag.maxCoeff() / absTdiag.minCoeff();
+ // Perform Schur decomposition if it is not yet performed and the power is
+ // not an integer.
+ if (!m_conditionNumber && p)
+ initialize();
+
+ // Choose the more stable of intpart = floor(p) and intpart = ceil(p).
+ if (p > RealScalar(0.5) && p > (1-p) * pow(m_conditionNumber, p)) {
+ --p;
+ ++intpart;
+ }
+}
+
+template<typename MatrixType>
+void MatrixPower<MatrixType>::initialize()
+{
+ const ComplexSchur<MatrixType> schurOfA(m_A);
+ JacobiRotation<ComplexScalar> rot;
+ ComplexScalar eigenvalue;
+
+ m_fT.resizeLike(m_A);
+ m_T = schurOfA.matrixT();
+ m_U = schurOfA.matrixU();
+ m_conditionNumber = m_T.diagonal().array().abs().maxCoeff() / m_T.diagonal().array().abs().minCoeff();
+
+ // Move zero eigenvalues to the bottom right corner.
+ for (Index i = cols()-1; i>=0; --i) {
+ if (m_rank <= 2)
+ return;
+ if (m_T.coeff(i,i) == RealScalar(0)) {
+ for (Index j=i+1; j < m_rank; ++j) {
+ eigenvalue = m_T.coeff(j,j);
+ rot.makeGivens(m_T.coeff(j-1,j), eigenvalue);
+ m_T.applyOnTheRight(j-1, j, rot);
+ m_T.applyOnTheLeft(j-1, j, rot.adjoint());
+ m_T.coeffRef(j-1,j-1) = eigenvalue;
+ m_T.coeffRef(j,j) = RealScalar(0);
+ m_U.applyOnTheRight(j-1, j, rot);
+ }
+ --m_rank;
+ }
}
- if (res>RealScalar(0.5) && res>(1-res)*std::pow(m_conditionNumber, res)) {
- --res;
- ++*intpart;
+ m_nulls = rows() - m_rank;
+ if (m_nulls) {
+ eigen_assert(m_T.bottomRightCorner(m_nulls, m_nulls).isZero()
+ && "Base of matrix power should be invertible or with a semisimple zero eigenvalue.");
+ m_fT.bottomRows(m_nulls).fill(RealScalar(0));
}
- return res;
}
template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::computeIntPower(ResultType& res, RealScalar p)
{
- RealScalar pp = std::abs(p);
+ using std::abs;
+ using std::fmod;
+ RealScalar pp = abs(p);
- if (p<0) m_tmp = m_A.inverse();
- else m_tmp = m_A;
+ if (p<0)
+ m_tmp = m_A.inverse();
+ else
+ m_tmp = m_A;
- res = MatrixType::Identity(rows(), cols());
- while (pp >= 1) {
- if (std::fmod(pp, 2) >= 1)
+ while (true) {
+ if (fmod(pp, 2) >= 1)
res = m_tmp * res;
- m_tmp *= m_tmp;
pp /= 2;
+ if (pp < 1)
+ break;
+ m_tmp *= m_tmp;
}
}
@@ -415,12 +552,17 @@ template<typename MatrixType>
template<typename ResultType>
void MatrixPower<MatrixType>::computeFracPower(ResultType& res, RealScalar p)
{
- if (p) {
- eigen_assert(m_conditionNumber);
- MatrixPowerAtomic<ComplexMatrix>(m_T, p).compute(m_fT);
- revertSchur(m_tmp, m_fT, m_U);
- res = m_tmp * res;
+ Block<ComplexMatrix,Dynamic,Dynamic> blockTp(m_fT, 0, 0, m_rank, m_rank);
+ eigen_assert(m_conditionNumber);
+ eigen_assert(m_rank + m_nulls == rows());
+
+ MatrixPowerAtomic<ComplexMatrix>(m_T.topLeftCorner(m_rank, m_rank), p).compute(blockTp);
+ if (m_nulls) {
+ m_fT.topRightCorner(m_rank, m_nulls) = m_T.topLeftCorner(m_rank, m_rank).template triangularView<Upper>()
+ .solve(blockTp * m_T.topRightCorner(m_rank, m_nulls));
}
+ revertSchur(m_tmp, m_fT, m_U);
+ res = m_tmp * res;
}
template<typename MatrixType>
@@ -464,7 +606,7 @@ class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Deri
* \brief Constructor.
*
* \param[in] A %Matrix (expression), the base of the matrix power.
- * \param[in] p scalar, the exponent of the matrix power.
+ * \param[in] p real scalar, the exponent of the matrix power.
*/
MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p)
{ }
@@ -485,25 +627,83 @@ class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Deri
private:
const Derived& m_A;
const RealScalar m_p;
- MatrixPowerReturnValue& operator=(const MatrixPowerReturnValue&);
+};
+
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix power of some matrix (expression).
+ *
+ * \tparam Derived type of the base, a matrix (expression).
+ *
+ * This class holds the arguments to the matrix power until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixBase::pow() and related functions and most of the
+ * time this is the only way it is used.
+ */
+template<typename Derived>
+class MatrixComplexPowerReturnValue : public ReturnByValue< MatrixComplexPowerReturnValue<Derived> >
+{
+ public:
+ typedef typename Derived::PlainObject PlainObject;
+ typedef typename std::complex<typename Derived::RealScalar> ComplexScalar;
+ typedef typename Derived::Index Index;
+
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression), the base of the matrix power.
+ * \param[in] p complex scalar, the exponent of the matrix power.
+ */
+ MatrixComplexPowerReturnValue(const Derived& A, const ComplexScalar& p) : m_A(A), m_p(p)
+ { }
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * Because \p p is complex, \f$ A^p \f$ is simply evaluated as \f$
+ * \exp(p \log(A)) \f$.
+ *
+ * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the
+ * constructor.
+ */
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const
+ { res = (m_p * m_A.log()).exp(); }
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ private:
+ const Derived& m_A;
+ const ComplexScalar m_p;
};
namespace internal {
template<typename MatrixPowerType>
-struct traits< MatrixPowerRetval<MatrixPowerType> >
+struct traits< MatrixPowerParenthesesReturnValue<MatrixPowerType> >
{ typedef typename MatrixPowerType::PlainObject ReturnType; };
template<typename Derived>
struct traits< MatrixPowerReturnValue<Derived> >
{ typedef typename Derived::PlainObject ReturnType; };
+template<typename Derived>
+struct traits< MatrixComplexPowerReturnValue<Derived> >
+{ typedef typename Derived::PlainObject ReturnType; };
+
}
template<typename Derived>
const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(const RealScalar& p) const
{ return MatrixPowerReturnValue<Derived>(derived(), p); }
+template<typename Derived>
+const MatrixComplexPowerReturnValue<Derived> MatrixBase<Derived>::pow(const std::complex<RealScalar>& p) const
+{ return MatrixComplexPowerReturnValue<Derived>(derived(), p); }
+
} // namespace Eigen
#endif // EIGEN_MATRIX_POWER
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
index b48ea9d46..0cd39ebe4 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
@@ -24,7 +24,7 @@ namespace Eigen {
* \sa MatrixSquareRoot, MatrixSquareRootTriangular
*/
template <typename MatrixType>
-class MatrixSquareRootQuasiTriangular
+class MatrixSquareRootQuasiTriangular : internal::noncopyable
{
public:
@@ -36,7 +36,7 @@ class MatrixSquareRootQuasiTriangular
* The class stores a reference to \p A, so it should not be
* changed (or destroyed) before compute() is called.
*/
- MatrixSquareRootQuasiTriangular(const MatrixType& A)
+ explicit MatrixSquareRootQuasiTriangular(const MatrixType& A)
: m_A(A)
{
eigen_assert(A.rows() == A.cols());
@@ -253,10 +253,10 @@ void MatrixSquareRootQuasiTriangular<MatrixType>
* \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular
*/
template <typename MatrixType>
-class MatrixSquareRootTriangular
+class MatrixSquareRootTriangular : internal::noncopyable
{
public:
- MatrixSquareRootTriangular(const MatrixType& A)
+ explicit MatrixSquareRootTriangular(const MatrixType& A)
: m_A(A)
{
eigen_assert(A.rows() == A.cols());
@@ -321,7 +321,7 @@ class MatrixSquareRoot
* The class stores a reference to \p A, so it should not be
* changed (or destroyed) before compute() is called.
*/
- MatrixSquareRoot(const MatrixType& A);
+ explicit MatrixSquareRoot(const MatrixType& A);
/** \brief Compute the matrix square root
*
@@ -341,7 +341,7 @@ class MatrixSquareRoot<MatrixType, 0>
{
public:
- MatrixSquareRoot(const MatrixType& A)
+ explicit MatrixSquareRoot(const MatrixType& A)
: m_A(A)
{
eigen_assert(A.rows() == A.cols());
@@ -370,11 +370,11 @@ class MatrixSquareRoot<MatrixType, 0>
// ********** Partial specialization for complex matrices **********
template <typename MatrixType>
-class MatrixSquareRoot<MatrixType, 1>
+class MatrixSquareRoot<MatrixType, 1> : internal::noncopyable
{
public:
- MatrixSquareRoot(const MatrixType& A)
+ explicit MatrixSquareRoot(const MatrixType& A)
: m_A(A)
{
eigen_assert(A.rows() == A.cols());
@@ -422,7 +422,7 @@ template<typename Derived> class MatrixSquareRootReturnValue
* \param[in] src %Matrix (expression) forming the argument of the
* matrix square root.
*/
- MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { }
+ explicit MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { }
/** \brief Compute the matrix square root.
*
@@ -442,8 +442,6 @@ template<typename Derived> class MatrixSquareRootReturnValue
protected:
const Derived& m_src;
- private:
- MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&);
};
namespace internal {
diff --git a/unsupported/Eigen/src/MatrixFunctions/StemFunction.h b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
index 724e55c1d..0a815d834 100644
--- a/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
@@ -16,7 +16,7 @@ namespace Eigen {
* \brief Stem functions corresponding to standard mathematical functions.
*/
template <typename Scalar>
-class StdStemFunctions
+class StdStemFunctions : internal::noncopyable
{
public:
diff --git a/unsupported/test/kronecker_product.cpp b/unsupported/test/kronecker_product.cpp
index 8ddc6ec28..c68a07de8 100644
--- a/unsupported/test/kronecker_product.cpp
+++ b/unsupported/test/kronecker_product.cpp
@@ -107,31 +107,34 @@ void test_kronecker_product()
SparseMatrix<double,RowMajor> SM_row_a(SM_a), SM_row_b(SM_b);
- // test kroneckerProduct(DM_block,DM,DM_fixedSize)
+ // test DM_fixedSize = kroneckerProduct(DM_block,DM)
Matrix<double, 6, 6> DM_fix_ab = kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b);
CALL_SUBTEST(check_kronecker_product(DM_fix_ab));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b)));
for(int i=0;i<DM_fix_ab.rows();++i)
for(int j=0;j<DM_fix_ab.cols();++j)
VERIFY_IS_APPROX(kroneckerProduct(DM_a,DM_b).coeff(i,j), DM_fix_ab(i,j));
- // test kroneckerProduct(DM,DM,DM_block)
+ // test DM_block = kroneckerProduct(DM,DM)
MatrixXd DM_block_ab(10,15);
DM_block_ab.block<6,6>(2,5) = kroneckerProduct(DM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(DM_block_ab.block<6,6>(2,5)));
- // test kroneckerProduct(DM,DM,DM)
+ // test DM = kroneckerProduct(DM,DM)
MatrixXd DM_ab = kroneckerProduct(DM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(DM_ab));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,DM_b)));
- // test kroneckerProduct(SM,DM,SM)
+ // test SM = kroneckerProduct(SM,DM)
SparseMatrix<double> SM_ab = kroneckerProduct(SM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab));
SparseMatrix<double,RowMajor> SM_ab2 = kroneckerProduct(SM_a,DM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab2));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,DM_b)));
- // test kroneckerProduct(DM,SM,SM)
+ // test SM = kroneckerProduct(DM,SM)
SM_ab.setZero();
SM_ab.insert(0,0)=37.0;
SM_ab = kroneckerProduct(DM_a,SM_b);
@@ -140,8 +143,9 @@ void test_kronecker_product()
SM_ab2.insert(0,0)=37.0;
SM_ab2 = kroneckerProduct(DM_a,SM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab2));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,SM_b)));
- // test kroneckerProduct(SM,SM,SM)
+ // test SM = kroneckerProduct(SM,SM)
SM_ab.resize(2,33);
SM_ab.insert(0,0)=37.0;
SM_ab = kroneckerProduct(SM_a,SM_b);
@@ -150,8 +154,9 @@ void test_kronecker_product()
SM_ab2.insert(0,0)=37.0;
SM_ab2 = kroneckerProduct(SM_a,SM_b);
CALL_SUBTEST(check_kronecker_product(SM_ab2));
+ CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,SM_b)));
- // test kroneckerProduct(SM,SM,SM) with sparse pattern
+ // test SM = kroneckerProduct(SM,SM) with sparse pattern
SM_a.resize(4,5);
SM_b.resize(3,2);
SM_a.resizeNonZeros(0);
@@ -169,7 +174,7 @@ void test_kronecker_product()
SM_ab = kroneckerProduct(SM_a,SM_b);
CALL_SUBTEST(check_sparse_kronecker_product(SM_ab));
- // test dimension of result of kroneckerProduct(DM,DM,DM)
+ // test dimension of result of DM = kroneckerProduct(DM,DM)
MatrixXd DM_a2(2,1);
MatrixXd DM_b2(5,4);
MatrixXd DM_ab2 = kroneckerProduct(DM_a2,DM_b2);
diff --git a/unsupported/test/matrix_functions.h b/unsupported/test/matrix_functions.h
index 5817caef6..295da16b6 100644
--- a/unsupported/test/matrix_functions.h
+++ b/unsupported/test/matrix_functions.h
@@ -10,27 +10,48 @@
#include "main.h"
#include <unsupported/Eigen/MatrixFunctions>
+// For complex matrices, any matrix is fine.
+template<typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+struct processTriangularMatrix
+{
+ static void run(MatrixType&, MatrixType&, const MatrixType&)
+ { }
+};
+
+// For real matrices, make sure none of the eigenvalues are negative.
+template<typename MatrixType>
+struct processTriangularMatrix<MatrixType,0>
+{
+ static void run(MatrixType& m, MatrixType& T, const MatrixType& U)
+ {
+ typedef typename MatrixType::Index Index;
+ const Index size = m.cols();
+
+ for (Index i=0; i < size; ++i) {
+ if (i == size - 1 || T.coeff(i+1,i) == 0)
+ T.coeffRef(i,i) = std::abs(T.coeff(i,i));
+ else
+ ++i;
+ }
+ m = U * T * U.transpose();
+ }
+};
+
template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
struct generateTestMatrix;
-// for real matrices, make sure none of the eigenvalues are negative
template <typename MatrixType>
struct generateTestMatrix<MatrixType,0>
{
static void run(MatrixType& result, typename MatrixType::Index size)
{
- MatrixType mat = MatrixType::Random(size, size);
- EigenSolver<MatrixType> es(mat);
- typename EigenSolver<MatrixType>::EigenvalueType eivals = es.eigenvalues();
- for (typename MatrixType::Index i = 0; i < size; ++i) {
- if (eivals(i).imag() == 0 && eivals(i).real() < 0)
- eivals(i) = -eivals(i);
- }
- result = (es.eigenvectors() * eivals.asDiagonal() * es.eigenvectors().inverse()).real();
+ result = MatrixType::Random(size, size);
+ RealSchur<MatrixType> schur(result);
+ MatrixType T = schur.matrixT();
+ processTriangularMatrix<MatrixType>::run(result, T, schur.matrixU());
}
};
-// for complex matrices, any matrix is fine
template <typename MatrixType>
struct generateTestMatrix<MatrixType,1>
{
diff --git a/unsupported/test/matrix_power.cpp b/unsupported/test/matrix_power.cpp
index b9d513b45..849e4287b 100644
--- a/unsupported/test/matrix_power.cpp
+++ b/unsupported/test/matrix_power.cpp
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
+// Copyright (C) 2012, 2013 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -9,33 +9,6 @@
#include "matrix_functions.h"
-template <typename MatrixType, int IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
-struct generateTriangularMatrix;
-
-// for real matrices, make sure none of the eigenvalues are negative
-template <typename MatrixType>
-struct generateTriangularMatrix<MatrixType,0>
-{
- static void run(MatrixType& result, typename MatrixType::Index size)
- {
- result.resize(size, size);
- result.template triangularView<Upper>() = MatrixType::Random(size, size);
- for (typename MatrixType::Index i = 0; i < size; ++i)
- result.coeffRef(i,i) = std::abs(result.coeff(i,i));
- }
-};
-
-// for complex matrices, any matrix is fine
-template <typename MatrixType>
-struct generateTriangularMatrix<MatrixType,1>
-{
- static void run(MatrixType& result, typename MatrixType::Index size)
- {
- result.resize(size, size);
- result.template triangularView<Upper>() = MatrixType::Random(size, size);
- }
-};
-
template<typename T>
void test2dRotation(double tol)
{
@@ -53,7 +26,7 @@ void test2dRotation(double tol)
C = Apow(std::ldexp(angle,1) / M_PI);
std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
- VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ VERIFY(C.isApprox(B, tol));
}
}
@@ -75,12 +48,26 @@ void test2dHyperbolicRotation(double tol)
C = Apow(angle);
std::cout << "test2dHyperbolicRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
- VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ VERIFY(C.isApprox(B, tol));
+ }
+}
+
+template<typename T>
+void test3dRotation(double tol)
+{
+ Matrix<T,3,1> v;
+ T angle;
+
+ for (int i=0; i<=20; ++i) {
+ v = Matrix<T,3,1>::Random();
+ v.normalize();
+ angle = pow(10, (i-10) / 5.);
+ VERIFY(AngleAxis<T>(angle, v).matrix().isApprox(AngleAxis<T>(1,v).matrix().pow(angle), tol));
}
}
template<typename MatrixType>
-void testExponentLaws(const MatrixType& m, double tol)
+void testGeneral(const MatrixType& m, double tol)
{
typedef typename MatrixType::RealScalar RealScalar;
MatrixType m1, m2, m3, m4, m5;
@@ -97,19 +84,64 @@ void testExponentLaws(const MatrixType& m, double tol)
m4 = mpow(x+y);
m5.noalias() = m2 * m3;
- VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+ VERIFY(m4.isApprox(m5, tol));
m4 = mpow(x*y);
m5 = m2.pow(y);
- VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+ VERIFY(m4.isApprox(m5, tol));
m4 = (std::abs(x) * m1).pow(y);
m5 = std::pow(std::abs(x), y) * m3;
- VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+ VERIFY(m4.isApprox(m5, tol));
+ }
+}
+
+template<typename MatrixType>
+void testSingular(MatrixType m, double tol)
+{
+ const int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex;
+ typedef typename internal::conditional< IsComplex, MatrixSquareRootTriangular<MatrixType>,
+ MatrixSquareRootQuasiTriangular<MatrixType> >::type SquareRootType;
+ typedef typename internal::conditional<IsComplex, TriangularView<MatrixType,Upper>, const MatrixType&>::type TriangularType;
+ typename internal::conditional< IsComplex, ComplexSchur<MatrixType>, RealSchur<MatrixType> >::type schur;
+ MatrixType T;
+
+ for (int i=0; i < g_repeat; ++i) {
+ m.setRandom();
+ m.col(0).fill(0);
+
+ schur.compute(m);
+ T = schur.matrixT();
+ const MatrixType& U = schur.matrixU();
+ processTriangularMatrix<MatrixType>::run(m, T, U);
+ MatrixPower<MatrixType> mpow(m);
+
+ SquareRootType(T).compute(T);
+ VERIFY(mpow(0.5).isApprox(U * (TriangularType(T) * U.adjoint()), tol));
+
+ SquareRootType(T).compute(T);
+ VERIFY(mpow(0.25).isApprox(U * (TriangularType(T) * U.adjoint()), tol));
+
+ SquareRootType(T).compute(T);
+ VERIFY(mpow(0.125).isApprox(U * (TriangularType(T) * U.adjoint()), tol));
+ }
+}
+
+template<typename MatrixType>
+void testLogThenExp(MatrixType m, double tol)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ Scalar x;
+
+ for (int i=0; i < g_repeat; ++i) {
+ generateTestMatrix<MatrixType>::run(m, m.rows());
+ x = internal::random<Scalar>();
+ VERIFY(m.pow(x).isApprox((x * m.log()).exp(), tol));
}
}
typedef Matrix<double,3,3,RowMajor> Matrix3dRowMajor;
+typedef Matrix<long double,3,3> Matrix3e;
typedef Matrix<long double,Dynamic,Dynamic> MatrixXe;
void test_matrix_power()
@@ -121,13 +153,46 @@ void test_matrix_power()
CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
CALL_SUBTEST_9(test2dHyperbolicRotation<long double>(1e-14));
- CALL_SUBTEST_2(testExponentLaws(Matrix2d(), 1e-13));
- CALL_SUBTEST_7(testExponentLaws(Matrix3dRowMajor(), 1e-13));
- CALL_SUBTEST_3(testExponentLaws(Matrix4cd(), 1e-13));
- CALL_SUBTEST_4(testExponentLaws(MatrixXd(8,8), 2e-12));
- CALL_SUBTEST_1(testExponentLaws(Matrix2f(), 1e-4));
- CALL_SUBTEST_5(testExponentLaws(Matrix3cf(), 1e-4));
- CALL_SUBTEST_8(testExponentLaws(Matrix4f(), 1e-4));
- CALL_SUBTEST_6(testExponentLaws(MatrixXf(2,2), 1e-3)); // see bug 614
- CALL_SUBTEST_9(testExponentLaws(MatrixXe(7,7), 1e-13));
+ CALL_SUBTEST_10(test3dRotation<double>(1e-13));
+ CALL_SUBTEST_11(test3dRotation<float>(1e-5));
+ CALL_SUBTEST_12(test3dRotation<long double>(1e-13));
+
+ CALL_SUBTEST_2(testGeneral(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(testGeneral(Matrix3dRowMajor(), 1e-13));
+ CALL_SUBTEST_3(testGeneral(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(testGeneral(MatrixXd(8,8), 2e-12));
+ CALL_SUBTEST_1(testGeneral(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(testGeneral(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_8(testGeneral(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(testGeneral(MatrixXf(2,2), 1e-3)); // see bug 614
+ CALL_SUBTEST_9(testGeneral(MatrixXe(7,7), 1e-13));
+ CALL_SUBTEST_10(testGeneral(Matrix3d(), 1e-13));
+ CALL_SUBTEST_11(testGeneral(Matrix3f(), 1e-4));
+ CALL_SUBTEST_12(testGeneral(Matrix3e(), 1e-13));
+
+ CALL_SUBTEST_2(testSingular(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(testSingular(Matrix3dRowMajor(), 1e-13));
+ CALL_SUBTEST_3(testSingular(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(testSingular(MatrixXd(8,8), 2e-12));
+ CALL_SUBTEST_1(testSingular(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(testSingular(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_8(testSingular(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(testSingular(MatrixXf(2,2), 1e-3));
+ CALL_SUBTEST_9(testSingular(MatrixXe(7,7), 1e-13));
+ CALL_SUBTEST_10(testSingular(Matrix3d(), 1e-13));
+ CALL_SUBTEST_11(testSingular(Matrix3f(), 1e-4));
+ CALL_SUBTEST_12(testSingular(Matrix3e(), 1e-13));
+
+ CALL_SUBTEST_2(testLogThenExp(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(testLogThenExp(Matrix3dRowMajor(), 1e-13));
+ CALL_SUBTEST_3(testLogThenExp(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(testLogThenExp(MatrixXd(8,8), 2e-12));
+ CALL_SUBTEST_1(testLogThenExp(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(testLogThenExp(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_8(testLogThenExp(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(testLogThenExp(MatrixXf(2,2), 1e-3));
+ CALL_SUBTEST_9(testLogThenExp(MatrixXe(7,7), 1e-13));
+ CALL_SUBTEST_10(testLogThenExp(Matrix3d(), 1e-13));
+ CALL_SUBTEST_11(testLogThenExp(Matrix3f(), 1e-4));
+ CALL_SUBTEST_12(testLogThenExp(Matrix3e(), 1e-13));
}