aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/src/Core/MatrixBase.h3
-rw-r--r--Eigen/src/Core/util/ForwardDeclarations.h1
-rw-r--r--unsupported/Eigen/MatrixFunctions49
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h2
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h700
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPowerBase.h359
-rw-r--r--unsupported/doc/examples/MatrixPower.cpp2
-rw-r--r--unsupported/doc/examples/MatrixPower_optimal.cpp17
-rw-r--r--unsupported/test/matrix_power.cpp116
9 files changed, 693 insertions, 556 deletions
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 31ebde8ab..521bba18a 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -162,6 +162,9 @@ template<typename Derived> class MatrixBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ProductDerived, typename Lhs, typename Rhs>
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
+
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ Derived& lazyAssign(const MatrixPowerProductBase<ProductDerived, Lhs,Rhs>& other);
#endif // not EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h
index 30d32e2dc..58e1d87dc 100644
--- a/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/Eigen/src/Core/util/ForwardDeclarations.h
@@ -272,6 +272,7 @@ template<typename Derived> class MatrixFunctionReturnValue;
template<typename Derived> class MatrixSquareRootReturnValue;
template<typename Derived> class MatrixLogarithmReturnValue;
template<typename Derived> class MatrixPowerReturnValue;
+template<typename Derived, typename Lhs, typename Rhs> class MatrixPowerProductBase;
namespace internal {
template <typename Scalar>
diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions
index 002c1f71c..1a4d42de0 100644
--- a/unsupported/Eigen/MatrixFunctions
+++ b/unsupported/Eigen/MatrixFunctions
@@ -59,6 +59,7 @@
#include "src/MatrixFunctions/MatrixFunction.h"
#include "src/MatrixFunctions/MatrixSquareRoot.h"
#include "src/MatrixFunctions/MatrixLogarithm.h"
+#include "src/MatrixFunctions/MatrixPowerBase.h"
#include "src/MatrixFunctions/MatrixPower.h"
@@ -211,8 +212,9 @@ documentation of \ref matrixbase_exp "exp()".
\include MatrixLogarithm.cpp
Output: \verbinclude MatrixLogarithm.out
-\note \p M has to be a matrix of \c float, \c double, \c long double
-\c complex<float>, \c complex<double>, or \c complex<long double> .
+\note \p M has to be a matrix of \c float, \c double, <tt>long
+double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
+double> .
\sa MatrixBase::exp(), MatrixBase::matrixFunction(),
class MatrixLogarithmAtomic, MatrixBase::sqrt().
@@ -234,27 +236,14 @@ where exp denotes the matrix exponential, and log denotes the matrix
logarithm.
The matrix \f$ M \f$ should meet the conditions to be an argument of
-matrix logarithm. If \p p is neither an integer nor the real scalar
-type of \p M, it is casted into the real scalar type of \p M.
-
-This function computes the matrix logarithm using the
-Schur-Pad&eacute; algorithm as implemented by MatrixBase::pow().
-The exponent is split into integral part and fractional part, where
-the fractional part is in the interval \f$ (-1, 1) \f$. The main
-diagonal and the first super-diagonal is directly computed.
-
-The actual work is done by the MatrixPower class, which can compute
-\f$ M^p v \f$, where \p v is another matrix with the same rows as
-\p M. The matrix \p v is set to be the identity matrix by default.
-Therefore, the expression <tt>M.pow(p) * v</tt> is specialized for
-this. No temporary storage is created for the result. The code below
-directly evaluates R-values into L-values without aliasing issue. Do
-\b NOT try to \a optimize with noalias(). It won't compile.
-\code
-v = m.pow(p) * v;
-m = m.pow(q);
-// v2.noalias() = m.pow(p) * v1; Won't compile!
-\endcode
+matrix logarithm. If \p p is not of the real scalar type of \p M, it
+is casted into the real scalar type of \p M.
+
+This function computes the matrix power using the Schur-Pad&eacute;
+algorithm as implemented by class MatrixPower. The exponent is split
+into integral part and fractional part, where the fractional part is
+in the interval \f$ (-1, 1) \f$. The main diagonal and the first
+super-diagonal is directly computed.
Details of the algorithm can be found in: Nicholas J. Higham and
Lijing Lin, "A Schur-Pad&eacute; algorithm for fractional powers of a
@@ -277,8 +266,18 @@ the z-axis.
\include MatrixPower.cpp
Output: \verbinclude MatrixPower.out
-\note \p M has to be a matrix of \c float, \c double, \c long double
-\c complex<float>, \c complex<double>, or \c complex<long double> .
+MatrixBase::pow() is user-friendly. However, there are some
+circumstances under which you should use class MatrixPower directly.
+MatrixPower can save the result of Schur decomposition, so it's
+better for computing various powers for the same matrix.
+
+Example:
+\include MatrixPower_optimal.cpp
+Output: \verbinclude MatrixPower_optimal.out
+
+\note \p M has to be a matrix of \c float, \c double, <tt>long
+double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
+double> .
\sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower.
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index c57ca87ed..e87a28f6c 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -209,7 +209,7 @@ void MatrixFunction<MatrixType,AtomicType,1>::compute(ResultType& result)
permuteSchur();
computeBlockAtomic();
computeOffDiagonal();
- result = m_U * m_fT * m_U.adjoint();
+ result = m_U * (m_fT.template triangularView<Upper>() * m_U.adjoint());
}
/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
index 7238501ed..bf8bc4424 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -10,503 +10,262 @@
#ifndef EIGEN_MATRIX_POWER
#define EIGEN_MATRIX_POWER
-#ifndef M_PI
-#define M_PI 3.141592653589793238462643383279503L
-#endif
-
namespace Eigen {
+template<typename MatrixType> class MatrixPowerEvaluator;
+
/**
* \ingroup MatrixFunctions_Module
*
* \brief Class for computing matrix powers.
*
- * \tparam MatrixType type of the base, expected to be an instantiation
+ * \tparam MatrixType type of the base, expected to be an instantiation
* of the Matrix class template.
- * \tparam PlainObject type of the multiplier.
+ *
+ * This class is capable of computing real/complex matrices raised to
+ * an arbitrary real power. Meanwhile, it saves the result of Schur
+ * decomposition if an non-integral power has even been calculated.
+ * Therefore, if you want to compute multiple (>= 2) matrix powers
+ * for the same matrix, using the class directly is more efficient than
+ * calling MatrixBase::pow().
+ *
+ * Example:
+ * \include MatrixPower_optimal.cpp
+ * Output: \verbinclude MatrixPower_optimal.out
*/
-template<typename MatrixType, typename PlainObject = MatrixType>
-class MatrixPower
+template<typename MatrixType>
+class MatrixPower : public MatrixPowerBase<MatrixPower<MatrixType>,MatrixType>
{
- private:
- typedef internal::traits<MatrixType> Traits;
- static const int Rows = Traits::RowsAtCompileTime;
- static const int Cols = Traits::ColsAtCompileTime;
- static const int Options = Traits::Options;
- static const int MaxRows = Traits::MaxRowsAtCompileTime;
- static const int MaxCols = Traits::MaxColsAtCompileTime;
-
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef std::complex<RealScalar> ComplexScalar;
- typedef typename MatrixType::Index Index;
- typedef Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols> ComplexMatrix;
- typedef Array<ComplexScalar, Rows, 1, ColMajor, MaxRows> ComplexArray;
-
public:
+ EIGEN_MATRIX_POWER_PUBLIC_INTERFACE(MatrixPower)
+
/**
* \brief Constructor.
*
* \param[in] A the base of the matrix power.
- * \param[in] p the exponent of the matrix power.
- * \param[in] b the multiplier.
- */
- MatrixPower(const MatrixType& A, RealScalar p, const PlainObject& b) :
- m_A(A),
- m_p(p),
- m_b(b),
- m_dimA(A.cols()),
- m_dimb(b.cols())
- { /* empty body */ }
-
- /**
- * \brief Compute the matrix power.
*
- * \param[out] result \f$ A^p b \f$, as specified in the constructor.
+ * \warning Construct with a matrix, not a matrix expression!
*/
- template<typename ResultType> void compute(ResultType& result);
-
- private:
+ explicit MatrixPower(const MatrixType& A) : Base(A,0)
+ { }
+
/**
- * \brief Compute the matrix to integral power.
- *
- * If \p b is \em fatter than \p A, it computes \f$ A^{p_{\textrm int}}
- * \f$ first, and then multiplies it with \p b. Otherwise,
- * #computeChainProduct optimizes the expression.
+ * \brief Return the expression \f$ A^p \f$.
*
- * \sa computeChainProduct(ResultType&);
+ * \param[in] p exponent, a real scalar.
*/
- template<typename ResultType>
- void computeIntPower(ResultType& result);
+ const MatrixPowerEvaluator<MatrixType> operator()(RealScalar p)
+ { return MatrixPowerEvaluator<MatrixType>(*this, p); }
/**
- * \brief Convert integral power of the matrix into chain product.
+ * \brief Compute the matrix power.
*
- * This optimizes the matrix expression. It automatically chooses binary
- * powering or matrix chain multiplication or solving the linear system
- * repetitively, according to which algorithm costs less.
+ * \param[in] p exponent, a real scalar.
+ * \param[out] res \f$ A^p \f$ where A is specified in the
+ * constructor.
*/
- template<typename ResultType>
- void computeChainProduct(ResultType&);
-
- /** \brief Compute the cost of binary powering. */
- static int computeCost(RealScalar);
-
- /** \brief Solve the linear system repetitively. */
- template<typename ResultType>
- void partialPivLuSolve(ResultType&, RealScalar);
-
- /** \brief Compute Schur decomposition of #m_A. */
- void computeSchurDecomposition();
+ void compute(MatrixType& res, RealScalar p);
/**
- * \brief Split #m_p into integral part and fractional part.
+ * \brief Compute the matrix power multiplied by another matrix.
*
- * This method stores the integral part \f$ p_{\textrm int} \f$ into
- * #m_pInt and the fractional part \f$ p_{\textrm frac} \f$ into
- * #m_pFrac, where #m_pFrac is in the interval \f$ (-1,1) \f$. To
- * choose between the possibilities below, it considers the computation
- * of \f$ A^{p_1} \f$ and \f$ A^{p_2} \f$ and determines which of these
- * computations is the better conditioned.
+ * \param[in] b a matrix with the same rows as A.
+ * \param[in] p exponent, a real scalar.
+ * \param[out] res \f$ A^p b \f$, where A is specified in the
+ * constructor.
*/
- void getFractionalExponent();
+ template<typename Derived, typename ResultType>
+ void compute(const Derived& b, ResultType& res, RealScalar p);
- /** \brief Compute power of 2x2 triangular matrix. */
- void compute2x2(RealScalar p);
-
- /**
- * \brief Compute power of triangular matrices with size > 2.
- * \details This uses a Schur-Pad&eacute; algorithm.
- */
- void computeBig();
+ private:
+ EIGEN_MATRIX_POWER_PROTECTED_MEMBERS(MatrixPower)
- /** \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
- inline int getPadeDegree(double);
+ typedef Matrix<std::complex<RealScalar>, RowsAtCompileTime, ColsAtCompileTime,
+ Options,MaxRowsAtCompileTime,MaxColsAtCompileTime> ComplexMatrix;
+ ComplexMatrix m_T, m_U, m_fT;
- /** \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
- inline int getPadeDegree(float);
-
- /** \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
- inline int getPadeDegree(long double);
+ RealScalar modfAndInit(RealScalar, RealScalar*);
- /** \brief Compute Pad&eacute; approximation to matrix fractional power. */
- void computePade(const int& degree, const ComplexMatrix& IminusT);
+ template<typename Derived, typename ResultType>
+ void apply(const Derived&, ResultType&, bool&);
- /** \brief Get a certain coefficient of the Pad&eacute; approximation. */
- inline RealScalar coeff(const int& degree);
+ template<typename ResultType>
+ void computeIntPower(ResultType&, RealScalar);
- /**
- * \brief Store the fractional power into #m_tmp.
- *
- * This intended for complex matrices.
- */
- void computeTmp(ComplexScalar);
+ template<typename Derived, typename ResultType>
+ void computeIntPower(const Derived&, ResultType&, RealScalar);
- /**
- * \brief Store the fractional power into #m_tmp.
- *
- * This is intended for real matrices. It takes the real part of
- * \f$ U T^{p_{\textrm frac}} U^H \f$.
- *
- * \sa computeTmp(ComplexScalar);
- */
- void computeTmp(RealScalar);
-
- const MatrixType& m_A; ///< \brief Reference to the matrix base.
- const RealScalar m_p; ///< \brief The real exponent.
- const PlainObject& m_b; ///< \brief Reference to the multiplier.
- const Index m_dimA; ///< \brief The dimension of #m_A, equivalent to %m_A.cols().
- const Index m_dimb; ///< \brief The dimension of #m_b, equivalent to %m_b.cols().
- MatrixType m_tmp; ///< \brief Used for temporary storage.
- RealScalar m_pInt; ///< \brief Integral part of #m_p.
- RealScalar m_pFrac; ///< \brief Fractional part of #m_p.
- ComplexMatrix m_T; ///< \brief Triangular part of Schur decomposition.
- ComplexMatrix m_U; ///< \brief Unitary part of Schur decomposition.
- ComplexMatrix m_fT; ///< \brief #m_T to the power of #m_pFrac.
- ComplexArray m_logTdiag; ///< \brief Logarithm of the main diagonal of #m_T.
+ template<typename ResultType>
+ void computeFracPower(ResultType&, RealScalar);
};
-template<typename MatrixType, typename PlainObject>
-template<typename ResultType>
-void MatrixPower<MatrixType,PlainObject>::compute(ResultType& result)
+template<typename MatrixType>
+void MatrixPower<MatrixType>::compute(MatrixType& res, RealScalar p)
{
- using std::floor;
- using std::pow;
-
- m_pInt = floor(m_p + RealScalar(0.5));
- m_pFrac = m_p - m_pInt;
-
- if (!m_pFrac) {
- computeIntPower(result);
- } else if (m_dimA == 1)
- result = pow(m_A(0,0), m_p) * m_b;
- else {
- computeSchurDecomposition();
- getFractionalExponent();
- computeIntPower(result);
- if (m_dimA == 2)
- compute2x2(m_pFrac);
- else
- computeBig();
- computeTmp(Scalar());
- result = m_tmp * result;
+ switch (m_A.cols()) {
+ case 0:
+ break;
+ case 1:
+ res(0,0) = std::pow(m_A.coeff(0,0), p);
+ break;
+ default:
+ RealScalar intpart, x = modfAndInit(p, &intpart);
+ res = m_Id;
+ computeIntPower(res, intpart);
+ computeFracPower(res, x);
}
}
-template<typename MatrixType, typename PlainObject>
-template<typename ResultType>
-void MatrixPower<MatrixType,PlainObject>::computeIntPower(ResultType& result)
+template<typename MatrixType>
+template<typename Derived, typename ResultType>
+void MatrixPower<MatrixType>::compute(const Derived& b, ResultType& res, RealScalar p)
{
- MatrixType tmp;
- if (m_dimb > m_dimA) {
- tmp = MatrixType::Identity(m_dimA, m_dimA);
- computeChainProduct(tmp);
- result.noalias() = tmp * m_b;
- } else {
- result = m_b;
- computeChainProduct(result);
+ switch (m_A.cols()) {
+ case 0:
+ break;
+ case 1:
+ res = std::pow(m_A.coeff(0,0), p) * b;
+ break;
+ default:
+ RealScalar intpart, x = modfAndInit(p, &intpart);
+ computeIntPower(b, res, intpart);
+ computeFracPower(res, x);
}
}
-template<typename MatrixType, typename PlainObject>
-template<typename ResultType>
-void MatrixPower<MatrixType,PlainObject>::computeChainProduct(ResultType& result)
+template<typename MatrixType>
+typename MatrixPower<MatrixType>::Base::RealScalar MatrixPower<MatrixType>::modfAndInit(RealScalar x, RealScalar* intpart)
{
- using std::abs;
- using std::fmod;
- using std::ldexp;
-
- RealScalar p = abs(m_pInt);
- int cost = computeCost(p);
-
- if (m_pInt < RealScalar(0)) {
- if (p * m_dimb <= cost * m_dimA && m_dimA > 2) {
- partialPivLuSolve(result, p);
- return;
- } else {
- m_tmp = m_A.inverse();
- }
- } else {
- m_tmp = m_A;
+ *intpart = std::floor(x);
+ RealScalar res = x - *intpart;
+
+ if (!m_conditionNumber && res) {
+ const ComplexSchur<MatrixType> schurOfA(m_A);
+ m_T = schurOfA.matrixT();
+ m_U = schurOfA.matrixU();
+
+ const RealArray absTdiag = m_T.diagonal().array().abs();
+ m_conditionNumber = absTdiag.maxCoeff() / absTdiag.minCoeff();
}
- while (p * m_dimb > cost * m_dimA) {
- if (fmod(p, RealScalar(2)) >= RealScalar(1)) {
- result = m_tmp * result;
- cost--;
- }
- m_tmp *= m_tmp;
- cost--;
- p = ldexp(p, -1);
+
+ if (res>RealScalar(0.5) && res>(1-res)*std::pow(m_conditionNumber, res)) {
+ --res;
+ ++*intpart;
}
- for (; p >= RealScalar(1); p--)
- result = m_tmp * result;
+ return res;
}
-template<typename MatrixType, typename PlainObject>
-int MatrixPower<MatrixType,PlainObject>::computeCost(RealScalar p)
+template<typename MatrixType>
+template<typename Derived, typename ResultType>
+void MatrixPower<MatrixType>::apply(const Derived& b, ResultType& res, bool& init)
{
- using std::frexp;
- using std::ldexp;
- int cost, tmp;
-
- frexp(p, &cost);
- while (frexp(p, &tmp), tmp > 0) {
- p -= ldexp(RealScalar(0.5), tmp);
- cost++;
+ if (init)
+ res = m_tmp1 * res;
+ else {
+ init = true;
+ res.noalias() = m_tmp1 * b;
}
- return cost;
}
-template<typename MatrixType, typename PlainObject>
+template<typename MatrixType>
template<typename ResultType>
-void MatrixPower<MatrixType,PlainObject>::partialPivLuSolve(ResultType& result, RealScalar p)
+void MatrixPower<MatrixType>::computeIntPower(ResultType& res, RealScalar p)
{
- const PartialPivLU<MatrixType> Asolver(m_A);
- for (; p >= RealScalar(1); p--)
- result = Asolver.solve(result);
-}
+ RealScalar pp = std::abs(p);
-template<typename MatrixType, typename PlainObject>
-void MatrixPower<MatrixType,PlainObject>::computeSchurDecomposition()
-{
- const ComplexSchur<MatrixType> schurOfA(m_A);
- m_T = schurOfA.matrixT();
- m_U = schurOfA.matrixU();
-}
+ if (p<0) m_tmp1 = m_A.inverse();
+ else m_tmp1 = m_A;
-template<typename MatrixType, typename PlainObject>
-void MatrixPower<MatrixType,PlainObject>::getFractionalExponent()
-{
- using std::pow;
- typedef Array<RealScalar, Rows, 1, ColMajor, MaxRows> RealArray;
-
- const ComplexArray Tdiag = m_T.diagonal();
- const RealArray absTdiag = Tdiag.abs();
- const RealScalar maxAbsEival = absTdiag.maxCoeff();
- const RealScalar minAbsEival = absTdiag.minCoeff();
-
- m_logTdiag = Tdiag.log();
- if (m_pFrac > RealScalar(0.5) && // This is just a shortcut.
- m_pFrac > (RealScalar(1) - m_pFrac) * pow(maxAbsEival/minAbsEival, m_pFrac)) {
- m_pFrac--;
- m_pInt++;
+ while (pp >= 1) {
+ if (std::fmod(pp, 2) >= 1)
+ res = m_tmp1 * res;
+ m_tmp1 *= m_tmp1;
+ pp /= 2;
}
}
-template<typename MatrixType, typename PlainObject>
-void MatrixPower<MatrixType,PlainObject>::compute2x2(RealScalar p)
+template<typename MatrixType>
+template<typename Derived, typename ResultType>
+void MatrixPower<MatrixType>::computeIntPower(const Derived& b, ResultType& res, RealScalar p)
{
- using std::abs;
- using std::ceil;
- using std::exp;
- using std::imag;
- using std::ldexp;
- using std::pow;
- using std::sinh;
-
- int i, j, unwindingNumber;
- ComplexScalar w;
-
- m_fT(0,0) = pow(m_T(0,0), p);
- for (j = 1; j < m_dimA; j++) {
- i = j - 1;
- m_fT(j,j) = pow(m_T(j,j), p);
-
- if (m_T(i,i) == m_T(j,j)) {
- m_fT(i,j) = p * pow(m_T(i,j), p - RealScalar(1));
- } else if (abs(m_T(i,i)) < ldexp(abs(m_T(j,j)), -1) || abs(m_T(j,j)) < ldexp(abs(m_T(i,i)), -1)) {
- m_fT(i,j) = m_T(i,j) * (m_fT(j,j) - m_fT(i,i)) / (m_T(j,j) - m_T(i,i));
- } else {
- // computation in previous branch is inaccurate if abs(m_T(j,j)) \approx abs(m_T(i,i))
- unwindingNumber = ceil((imag(m_logTdiag[j] - m_logTdiag[i]) - M_PI) / (2 * M_PI));
- w = internal::atanh2(m_T(j,j) - m_T(i,i), m_T(j,j) + m_T(i,i)) + ComplexScalar(0, M_PI * unwindingNumber);
- m_fT(i,j) = m_T(i,j) * RealScalar(2) * exp(RealScalar(0.5) * p * (m_logTdiag[j] + m_logTdiag[i])) *
- sinh(p * w) / (m_T(j,j) - m_T(i,i));
- }
+ if (b.cols() >= m_A.cols()) {
+ m_tmp2 = m_Id;
+ computeIntPower(m_tmp2, p);
+ res.noalias() = m_tmp2 * b;
}
-}
+ else {
+ RealScalar pp = std::abs(p);
+ int squarings, applyings = internal::binary_powering_cost(pp, &squarings);
+ bool init = false;
-template<typename MatrixType, typename PlainObject>
-void MatrixPower<MatrixType,PlainObject>::computeBig()
-{
- using std::ldexp;
- const int digits = std::numeric_limits<RealScalar>::digits;
- const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision
- digits <= 53? 2.789358995219730e-1: // double precision
- digits <= 64? 2.4471944416607995472e-1L: // extended precision
- digits <= 106? 1.1016843812851143391275867258512e-01: // double-double
- 9.134603732914548552537150753385375e-02; // quadruple precision
- int degree, degree2, numberOfSquareRoots = 0, numberOfExtraSquareRoots = 0;
- ComplexMatrix IminusT, sqrtT, T = m_T;
- RealScalar normIminusT;
-
- while (true) {
- IminusT = ComplexMatrix::Identity(m_dimA, m_dimA) - T;
- normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff();
- if (normIminusT < maxNormForPade) {
- degree = getPadeDegree(normIminusT);
- degree2 = getPadeDegree(normIminusT * RealScalar(0.5));
- if (degree - degree2 <= 1 || numberOfExtraSquareRoots)
- break;
- numberOfExtraSquareRoots++;
+ if (p==0) {
+ res = b;
+ return;
+ }
+ else if (p>0) {
+ m_tmp1 = m_A;
+ }
+ else if (m_A.cols() > 2 && b.cols()*(pp-applyings) <= m_A.cols()*squarings) {
+ PartialPivLU<MatrixType> A(m_A);
+ res = A.solve(b);
+ for (--pp; pp >= 1; --pp)
+ res = A.solve(res);
+ return;
+ }
+ else {
+ m_tmp1 = m_A.inverse();
}
- MatrixSquareRootTriangular<ComplexMatrix>(T).compute(sqrtT);
- T = sqrtT;
- numberOfSquareRoots++;
- }
- computePade(degree, IminusT);
- for (; numberOfSquareRoots; numberOfSquareRoots--) {
- compute2x2(ldexp(m_pFrac, -numberOfSquareRoots));
- m_fT *= m_fT;
+ while (b.cols()*(pp-applyings) > m_A.cols()*squarings) {
+ if (std::fmod(pp, 2) >= 1) {
+ apply(b, res, init);
+ --applyings;
+ }
+ m_tmp1 *= m_tmp1;
+ --squarings;
+ pp /= 2;
+ }
+ for (; pp >= 1; --pp)
+ apply(b, res, init);
}
- compute2x2(m_pFrac);
-}
-
-template<typename MatrixType, typename PlainObject>
-inline int MatrixPower<MatrixType,PlainObject>::getPadeDegree(float normIminusT)
-{
- const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f };
- int degree = 3;
- for (; degree <= 4; degree++)
- if (normIminusT <= maxNormForPade[degree - 3])
- break;
- return degree;
-}
-
-template<typename MatrixType, typename PlainObject>
-inline int MatrixPower<MatrixType,PlainObject>::getPadeDegree(double normIminusT)
-{
- const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2,
- 1.239917516308172e-1, 1.999045567181744e-1, 2.789358995219730e-1 };
- int degree = 3;
- for (; degree <= 7; degree++)
- if (normIminusT <= maxNormForPade[degree - 3])
- break;
- return degree;
}
-template<typename MatrixType, typename PlainObject>
-inline int MatrixPower<MatrixType,PlainObject>::getPadeDegree(long double normIminusT)
-{
-#if LDBL_MANT_DIG == 53
- const int maxPadeDegree = 7;
- const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L,
- 1.239917516308172e-1L, 1.999045567181744e-1L, 2.789358995219730e-1L };
-
-#elif LDBL_MANT_DIG <= 64
- const int maxPadeDegree = 8;
- const double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L,
- 6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L };
-
-#elif LDBL_MANT_DIG <= 106
- const int maxPadeDegree = 10;
- const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ ,
- 1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L,
- 2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L,
- 1.1016843812851143391275867258512e-1L };
-#else
- const int maxPadeDegree = 10;
- const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ ,
- 6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L,
- 9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L,
- 3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L,
- 9.134603732914548552537150753385375e-2L };
-#endif
- int degree = 3;
- for (; degree <= maxPadeDegree; degree++)
- if (normIminusT <= maxNormForPade[degree - 3])
- break;
- return degree;
-}
-template<typename MatrixType, typename PlainObject>
-void MatrixPower<MatrixType,PlainObject>::computePade(const int& degree, const ComplexMatrix& IminusT)
+template<typename MatrixType>
+template<typename ResultType>
+void MatrixPower<MatrixType>::computeFracPower(ResultType& res, RealScalar p)
{
- int i = degree << 1;
- m_fT = coeff(i) * IminusT;
- for (i--; i; i--) {
- m_fT = (ComplexMatrix::Identity(m_dimA, m_dimA) + m_fT).template triangularView<Upper>()
- .solve(coeff(i) * IminusT).eval();
+ if (p) {
+ eigen_assert(m_conditionNumber);
+ MatrixPowerTriangularAtomic<ComplexMatrix>(m_T).compute(m_fT, p);
+ internal::recompose_complex_schur<NumTraits<Scalar>::IsComplex>::run(m_tmp1, m_fT, m_U);
+ res = m_tmp1 * res;
}
- m_fT += ComplexMatrix::Identity(m_dimA, m_dimA);
}
-template<typename MatrixType, typename PlainObject>
-inline typename MatrixType::RealScalar MatrixPower<MatrixType,PlainObject>::coeff(const int& i)
+template<typename Lhs, typename Rhs>
+class MatrixPowerMatrixProduct : public MatrixPowerProductBase<MatrixPowerMatrixProduct<Lhs,Rhs>,Lhs,Rhs>
{
- if (i == 1)
- return -m_pFrac;
- else if (i & 1)
- return (-m_pFrac - RealScalar(i >> 1)) / RealScalar(i << 1);
- else
- return (m_pFrac - RealScalar(i >> 1)) / RealScalar((i - 1) << 1);
-}
-
-template<typename MatrixType, typename PlainObject>
-void MatrixPower<MatrixType,PlainObject>::computeTmp(RealScalar)
-{ m_tmp = (m_U * m_fT * m_U.adjoint()).real(); }
-
-template<typename MatrixType, typename PlainObject>
-void MatrixPower<MatrixType,PlainObject>::computeTmp(ComplexScalar)
-{ m_tmp = m_U * m_fT * m_U.adjoint(); }
-
-/**
- * \ingroup MatrixFunctions_Module
- *
- * \brief Proxy for the matrix power multiplied by other matrix.
- *
- * \tparam Derived type of the base, a matrix (expression).
- * \tparam RhsDerived type of the multiplier.
- *
- * This class holds the arguments to the matrix power until it is
- * assigned or evaluated for some other reason (so the argument
- * should not be changed in the meantime). It is the return type of
- * MatrixPowerReturnValue::operator*() and related functions and most
- * of the time this is the only way it is used.
- */
-template<typename Derived, typename RhsDerived>
-class MatrixPowerProductReturnValue : public ReturnByValue<MatrixPowerProductReturnValue<Derived,RhsDerived> >
-{
- private:
- typedef typename Derived::PlainObject MatrixType;
- typedef typename RhsDerived::PlainObject PlainObject;
- typedef typename RhsDerived::RealScalar RealScalar;
- typedef typename RhsDerived::Index Index;
-
public:
- /**
- * \brief Constructor.
- *
- * \param[in] A %Matrix (expression), the base of the matrix power.
- * \param[in] p scalar, the exponent of the matrix power.
- * \prarm[in] b %Matrix (expression), the multiplier.
- */
- MatrixPowerProductReturnValue(const Derived& A, RealScalar p, const RhsDerived& b)
- : m_A(A), m_p(p), m_b(b) { }
+ EIGEN_MATRIX_POWER_PRODUCT_PUBLIC_INTERFACE(MatrixPowerMatrixProduct)
+
+ MatrixPowerMatrixProduct(MatrixPower<Lhs>& pow, const Rhs& b, RealScalar p) :
+ m_pow(pow),
+ m_b(b),
+ m_p(p)
+ { }
- /**
- * \brief Compute the expression.
- *
- * \param[out] result \f$ A^p b \f$ where \p A, \p p and \p bare as
- * in the constructor.
- */
template<typename ResultType>
- inline void evalTo(ResultType& result) const
- {
- const MatrixType A = m_A;
- const PlainObject b = m_b;
- MatrixPower<MatrixType, PlainObject> mp(A, m_p, b);
- mp.compute(result);
- }
+ inline void evalTo(ResultType& res) const
+ { m_pow.compute(m_b, res, m_p); }
- Index rows() const { return m_b.rows(); }
+ Index rows() const { return m_pow.rows(); }
Index cols() const { return m_b.cols(); }
private:
- const Derived& m_A;
+ MatrixPower<Lhs>& m_pow;
+ const Rhs& m_b;
const RealScalar m_p;
- const RhsDerived& m_b;
- MatrixPowerProductReturnValue& operator=(const MatrixPowerProductReturnValue&);
+ MatrixPowerMatrixProduct& operator=(const MatrixPowerMatrixProduct&);
};
/**
@@ -525,39 +284,19 @@ class MatrixPowerProductReturnValue : public ReturnByValue<MatrixPowerProductRet
template<typename Derived>
class MatrixPowerReturnValue : public ReturnByValue<MatrixPowerReturnValue<Derived> >
{
- private:
+ public:
+ typedef typename Derived::PlainObject PlainObject;
typedef typename Derived::RealScalar RealScalar;
typedef typename Derived::Index Index;
- public:
/**
* \brief Constructor.
*
* \param[in] A %Matrix (expression), the base of the matrix power.
* \param[in] p scalar, the exponent of the matrix power.
*/
- MatrixPowerReturnValue(const Derived& A, RealScalar p)
- : m_A(A), m_p(p) { }
-
- /**
- * \brief Return the matrix power multiplied by %Matrix \p b.
- *
- * The %MatrixPower class can optimize \f$ A^p b \f$ computing, and
- * this method provides an elegant way to call it.
- *
- * Unlike general matrix-matrix / matrix-vector product, this does
- * \b NOT produce a temporary storage for the result. Therefore,
- * the code below is \a already optimal:
- * \code
- * v = A.pow(p) * b;
- * // v.noalias() = A.pow(p) * b; Won't compile!
- * \endcode
- *
- * \param[in] b %Matrix (expression), the multiplier.
- */
- template<typename RhsDerived>
- const MatrixPowerProductReturnValue<Derived,RhsDerived> operator*(const MatrixBase<RhsDerived>& b) const
- { return MatrixPowerProductReturnValue<Derived,RhsDerived>(m_A, m_p, b.derived()); }
+ MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p)
+ { }
/**
* \brief Compute the matrix power.
@@ -566,13 +305,21 @@ class MatrixPowerReturnValue : public ReturnByValue<MatrixPowerReturnValue<Deriv
* constructor.
*/
template<typename ResultType>
- inline void evalTo(ResultType& result) const
+ inline void evalTo(ResultType& res) const
+ { MatrixPower<PlainObject>(m_A.eval()).compute(res, m_p); }
+
+ /**
+ * \brief Return the expression \f$ A^p b \f$.
+ *
+ * \p A and \p p are specified in the constructor.
+ *
+ * \param[in] b the matrix (expression) to be applied.
+ */
+ template<typename OtherDerived>
+ const MatrixPowerMatrixProduct<PlainObject,OtherDerived> operator*(const MatrixBase<OtherDerived>& b) const
{
- typedef typename Derived::PlainObject PlainObject;
- const PlainObject A = m_A;
- const PlainObject Identity = PlainObject::Identity(m_A.rows(), m_A.cols());
- MatrixPower<PlainObject> mp(A, m_p, Identity);
- mp.compute(result);
+ MatrixPower<PlainObject> Apow(m_A.eval());
+ return MatrixPowerMatrixProduct<PlainObject,OtherDerived>(Apow, b.derived(), m_p);
}
Index rows() const { return m_A.rows(); }
@@ -584,27 +331,56 @@ class MatrixPowerReturnValue : public ReturnByValue<MatrixPowerReturnValue<Deriv
MatrixPowerReturnValue& operator=(const MatrixPowerReturnValue&);
};
+template<typename MatrixType>
+class MatrixPowerEvaluator : public ReturnByValue<MatrixPowerEvaluator<MatrixType> >
+{
+ public:
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ MatrixPowerEvaluator(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
+ { }
+
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const
+ { m_pow.compute(res, m_p); }
+
+ template<typename Derived>
+ const MatrixPowerMatrixProduct<MatrixType,Derived> operator*(const MatrixBase<Derived>& b) const
+ { return MatrixPowerMatrixProduct<MatrixType,Derived>(m_pow, b.derived(), m_p); }
+
+ Index rows() const { return m_pow.rows(); }
+ Index cols() const { return m_pow.cols(); }
+
+ private:
+ MatrixPower<MatrixType>& m_pow;
+ const RealScalar m_p;
+ MatrixPowerEvaluator& operator=(const MatrixPowerEvaluator&);
+};
+
namespace internal {
- template<typename Derived>
- struct traits<MatrixPowerReturnValue<Derived> >
- {
- typedef typename Derived::PlainObject ReturnType;
- };
-
- template<typename Derived, typename RhsDerived>
- struct traits<MatrixPowerProductReturnValue<Derived,RhsDerived> >
- {
- typedef typename RhsDerived::PlainObject ReturnType;
- };
-}
+template<typename Lhs, typename Rhs>
+struct nested<MatrixPowerMatrixProduct<Lhs,Rhs> >
+{ typedef typename MatrixPowerMatrixProduct<Lhs,Rhs>::PlainObject const& type; };
+
+template<typename Derived>
+struct traits<MatrixPowerReturnValue<Derived> >
+{ typedef typename Derived::PlainObject ReturnType; };
+
+template<typename MatrixType>
+struct traits<MatrixPowerEvaluator<MatrixType> >
+{ typedef MatrixType ReturnType; };
+
+template<typename Lhs, typename Rhs>
+struct traits<MatrixPowerMatrixProduct<Lhs,Rhs> >
+: traits<MatrixPowerProductBase<MatrixPowerMatrixProduct<Lhs,Rhs>,Lhs,Rhs> >
+{ };
+} // namespace internal
template<typename Derived>
const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(RealScalar p) const
-{
- eigen_assert(rows() == cols());
- return MatrixPowerReturnValue<Derived>(derived(), p);
-}
+{ return MatrixPowerReturnValue<Derived>(derived(), p); }
-} // end namespace Eigen
+} // namespace Eigen
#endif // EIGEN_MATRIX_POWER
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPowerBase.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPowerBase.h
new file mode 100644
index 000000000..9616659ca
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPowerBase.h
@@ -0,0 +1,359 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_POWER_BASE
+#define EIGEN_MATRIX_POWER_BASE
+
+namespace Eigen {
+
+namespace internal {
+template<int IsComplex>
+struct recompose_complex_schur
+{
+ template<typename ResultType, typename MatrixType>
+ static inline void run(ResultType& res, const MatrixType& T, const MatrixType& U)
+ { res.noalias() = U * (T.template triangularView<Upper>() * U.adjoint()); }
+};
+
+template<>
+struct recompose_complex_schur<0>
+{
+ template<typename ResultType, typename MatrixType>
+ static inline void run(ResultType& res, const MatrixType& T, const MatrixType& U)
+ { res.noalias() = (U * (T.template triangularView<Upper>() * U.adjoint())).real(); }
+};
+
+template<typename Scalar, int IsComplex=NumTraits<Scalar>::IsComplex>
+struct matrix_power_unwinder
+{
+ static inline Scalar run(const Scalar& eival, const Scalar& eival0, int unwindingNumber)
+ { return internal::atanh2(eival-eival0, eival+eival0) + Scalar(0, M_PI*unwindingNumber); }
+};
+
+template<typename Scalar>
+struct matrix_power_unwinder<Scalar,0>
+{
+ static inline Scalar run(Scalar eival, Scalar eival0, int)
+ { return internal::atanh2(eival-eival0, eival+eival0); }
+};
+
+template<typename T>
+inline int binary_powering_cost(T p, int* squarings)
+{
+ int applyings=0, tmp;
+
+ frexp(p, squarings);
+ --*squarings;
+
+ while (std::frexp(p, &tmp), tmp > 0) {
+ p -= std::ldexp(static_cast<T>(0.5), tmp);
+ ++applyings;
+ }
+ return applyings;
+}
+
+inline int matrix_power_get_pade_degree(float normIminusT)
+{
+ const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f };
+ int degree = 3;
+ for (; degree <= 4; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+inline int matrix_power_get_pade_degree(double normIminusT)
+{
+ const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2, 1.239917516308172e-1,
+ 1.999045567181744e-1, 2.789358995219730e-1 };
+ int degree = 3;
+ for (; degree <= 7; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+inline int matrix_power_get_pade_degree(long double normIminusT)
+{
+#if LDBL_MANT_DIG == 53
+ const int maxPadeDegree = 7;
+ const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L, 1.239917516308172e-1L,
+ 1.999045567181744e-1L, 2.789358995219730e-1L };
+#elif LDBL_MANT_DIG <= 64
+ const int maxPadeDegree = 8;
+ const double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L,
+ 6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L };
+#elif LDBL_MANT_DIG <= 106
+ const int maxPadeDegree = 10;
+ const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ ,
+ 1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L,
+ 2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L,
+ 1.1016843812851143391275867258512e-1L };
+#else
+ const int maxPadeDegree = 10;
+ const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ ,
+ 6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L,
+ 9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L,
+ 3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L,
+ 9.134603732914548552537150753385375e-2L };
+#endif
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+} // namespace internal
+
+template<typename MatrixType>
+class MatrixPowerTriangularAtomic
+{
+ private:
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef Array<Scalar,RowsAtCompileTime,1,ColMajor,MaxRowsAtCompileTime> ArrayType;
+
+ const MatrixType& m_T;
+ const MatrixType m_Id;
+
+ void computePade(int degree, const MatrixType& IminusT, MatrixType& res, RealScalar p) const;
+ void compute2x2(MatrixType& res, RealScalar p) const;
+ void computeBig(MatrixType& res, RealScalar p) const;
+
+ public:
+ explicit MatrixPowerTriangularAtomic(const MatrixType& T);
+ void compute(MatrixType& res, RealScalar p) const;
+};
+
+template<typename MatrixType>
+MatrixPowerTriangularAtomic<MatrixType>::MatrixPowerTriangularAtomic(const MatrixType& T) :
+ m_T(T),
+ m_Id(MatrixType::Identity(T.rows(), T.cols()))
+{ eigen_assert(T.rows() == T.cols()); }
+
+template<typename MatrixType>
+void MatrixPowerTriangularAtomic<MatrixType>::compute(MatrixType& res, RealScalar p) const
+{
+ switch (m_T.rows()) {
+ case 0:
+ break;
+ case 1:
+ res(0,0) = std::pow(m_T(0,0), p);
+ break;
+ case 2:
+ compute2x2(res, p);
+ break;
+ default:
+ computeBig(res, p);
+ }
+}
+
+template<typename MatrixType>
+void MatrixPowerTriangularAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, MatrixType& res,
+ RealScalar p) const
+{
+ int i = degree<<1;
+ res = (p-degree) / ((i-1)<<1) * IminusT;
+ for (--i; i; --i) {
+ res = (m_Id + res).template triangularView<Upper>().solve((i==1 ? -p : i&1 ? (-p-(i>>1))/(i<<1) :
+ (p-(i>>1))/((i-1)<<1)) * IminusT).eval();
+ }
+ res += m_Id;
+}
+
+template<typename MatrixType>
+void MatrixPowerTriangularAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) const
+{
+ using std::abs;
+ using std::pow;
+
+ ArrayType logTdiag = m_T.diagonal().array().log();
+ res.coeffRef(0,0) = pow(m_T.coeff(0,0), p);
+
+ for (int i=1; i < m_T.cols(); ++i) {
+ res.coeffRef(i,i) = pow(m_T.coeff(i,i), p);
+ if (m_T.coeff(i-1,i-1) == m_T.coeff(i,i)) {
+ res.coeffRef(i-1,i) = p * pow(m_T.coeff(i-1,i), p-1);
+ }
+ else if (2*abs(m_T.coeff(i-1,i-1)) < abs(m_T.coeff(i,i)) || 2*abs(m_T.coeff(i,i)) < abs(m_T.coeff(i-1,i-1))) {
+ res.coeffRef(i-1,i) = m_T.coeff(i-1,i) * (res.coeff(i,i)-res.coeff(i-1,i-1)) / (m_T.coeff(i,i)-m_T.coeff(i-1,i-1));
+ }
+ else {
+ int unwindingNumber = std::ceil((internal::imag(logTdiag[i]-logTdiag[i-1]) - M_PI) / (2*M_PI));
+ Scalar w = internal::matrix_power_unwinder<Scalar>::run(m_T.coeff(i,i), m_T.coeff(i-1,i-1), unwindingNumber);
+ res.coeffRef(i-1,i) = m_T.coeff(i-1,i) * RealScalar(2) * std::exp(RealScalar(0.5)*p*(logTdiag[i]+logTdiag[i-1])) *
+ std::sinh(p * w) / (m_T.coeff(i,i) - m_T.coeff(i-1,i-1));
+ }
+ }
+}
+
+template<typename MatrixType>
+void MatrixPowerTriangularAtomic<MatrixType>::computeBig(MatrixType& res, RealScalar p) const
+{
+ const int digits = std::numeric_limits<RealScalar>::digits;
+ const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision
+ digits <= 53? 2.789358995219730e-1: // double precision
+ digits <= 64? 2.4471944416607995472e-1L: // extended precision
+ digits <= 106? 1.1016843812851143391275867258512e-1L: // double-double
+ 9.134603732914548552537150753385375e-2L; // quadruple precision
+ MatrixType IminusT, sqrtT, T=m_T;
+ RealScalar normIminusT;
+ int degree, degree2, numberOfSquareRoots=0;
+ bool hasExtraSquareRoot=false;
+
+ while (true) {
+ IminusT = MatrixType::Identity(m_T.rows(), m_T.cols()) - T;
+ normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff();
+ if (normIminusT < maxNormForPade) {
+ degree = internal::matrix_power_get_pade_degree(normIminusT);
+ degree2 = internal::matrix_power_get_pade_degree(normIminusT/2);
+ if (degree - degree2 <= 1 || hasExtraSquareRoot)
+ break;
+ hasExtraSquareRoot = true;
+ }
+ MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+ T = sqrtT;
+ ++numberOfSquareRoots;
+ }
+ computePade(degree, IminusT, res, p);
+
+ for (; numberOfSquareRoots; --numberOfSquareRoots) {
+ compute2x2(res, std::ldexp(p,-numberOfSquareRoots));
+ res *= res;
+ }
+ compute2x2(res, p);
+}
+
+#define EIGEN_MATRIX_POWER_PUBLIC_INTERFACE(Derived) \
+ typedef MatrixPowerBase<Derived, MatrixType> Base; \
+ using Base::RowsAtCompileTime; \
+ using Base::ColsAtCompileTime; \
+ using Base::Options; \
+ using Base::MaxRowsAtCompileTime; \
+ using Base::MaxColsAtCompileTime; \
+ typedef typename Base::Scalar Scalar; \
+ typedef typename Base::RealScalar RealScalar; \
+ typedef typename Base::RealArray RealArray;
+
+#define EIGEN_MATRIX_POWER_PROTECTED_MEMBERS(Derived) \
+ using Base::m_A; \
+ using Base::m_Id; \
+ using Base::m_tmp1; \
+ using Base::m_tmp2; \
+ using Base::m_conditionNumber;
+
+#define EIGEN_MATRIX_POWER_PRODUCT_PUBLIC_INTERFACE(Derived) \
+ typedef MatrixPowerProductBase<Derived, Lhs, Rhs> Base; \
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+
+namespace internal {
+template<typename Derived, typename _Lhs, typename _Rhs>
+struct traits<MatrixPowerProductBase<Derived,_Lhs,_Rhs> >
+{
+ typedef MatrixXpr XprKind;
+ typedef typename remove_all<_Lhs>::type Lhs;
+ typedef typename remove_all<_Rhs>::type Rhs;
+ typedef typename remove_all<Derived>::type PlainObject;
+ typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
+ typename traits<Rhs>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+
+ enum {
+ RowsAtCompileTime = traits<Lhs>::RowsAtCompileTime,
+ ColsAtCompileTime = traits<Rhs>::ColsAtCompileTime,
+ MaxRowsAtCompileTime = traits<Lhs>::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = traits<Rhs>::MaxColsAtCompileTime,
+ Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0)
+ | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit,
+ CoeffReadCost = 0
+ };
+};
+} // namespace internal
+
+template<typename Derived, typename MatrixType>
+class MatrixPowerBase
+{
+ public:
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ explicit MatrixPowerBase(const MatrixType& A, RealScalar cond);
+
+ void compute(MatrixType& res, RealScalar p);
+
+ template<typename OtherDerived, typename ResultType>
+ void compute(const OtherDerived& b, ResultType& res, RealScalar p);
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ protected:
+ typedef Array<RealScalar,RowsAtCompileTime,1,ColMajor,MaxRowsAtCompileTime> RealArray;
+
+ const MatrixType& m_A;
+ const MatrixType m_Id;
+ MatrixType m_tmp1, m_tmp2;
+ RealScalar m_conditionNumber;
+};
+
+template<typename Derived, typename MatrixType>
+MatrixPowerBase<Derived,MatrixType>::MatrixPowerBase(const MatrixType& A, RealScalar cond) :
+ m_A(A),
+ m_Id(MatrixType::Identity(A.rows(),A.cols())),
+ m_conditionNumber(cond)
+{ eigen_assert(A.rows() == A.cols()); }
+
+template<typename Derived, typename MatrixType>
+void MatrixPowerBase<Derived,MatrixType>::compute(MatrixType& res, RealScalar p)
+{ static_cast<Derived*>(this)->compute(res,p); }
+
+template<typename Derived, typename MatrixType>
+template<typename OtherDerived, typename ResultType>
+void MatrixPowerBase<Derived,MatrixType>::compute(const OtherDerived& b, ResultType& res, RealScalar p)
+{ static_cast<Derived*>(this)->compute(b,res,p); }
+
+template<typename Derived, typename Lhs, typename Rhs>
+class MatrixPowerProductBase : public MatrixBase<Derived>
+{
+ public:
+ typedef MatrixBase<Derived> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(MatrixPowerProductBase)
+
+ inline Index rows() const { return derived().rows(); }
+ inline Index cols() const { return derived().cols(); }
+
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const { derived().evalTo(res); }
+};
+
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::lazyAssign(const MatrixPowerProductBase<ProductDerived,Lhs,Rhs>& other)
+{
+ other.derived().evalTo(derived());
+ return derived();
+}
+
+} // namespace Eigen
+
+#endif // EIGEN_MATRIX_POWER
diff --git a/unsupported/doc/examples/MatrixPower.cpp b/unsupported/doc/examples/MatrixPower.cpp
index 6ade0b8af..222452476 100644
--- a/unsupported/doc/examples/MatrixPower.cpp
+++ b/unsupported/doc/examples/MatrixPower.cpp
@@ -11,6 +11,6 @@ int main()
sin(1), cos(1), 0,
0 , 0 , 1;
std::cout << "The matrix A is:\n" << A << "\n\n"
- << "The matrix power A^(pi/4) is:\n" << A.pow(pi/4) << std::endl;
+ "The matrix power A^(pi/4) is:\n" << A.pow(pi/4) << std::endl;
return 0;
}
diff --git a/unsupported/doc/examples/MatrixPower_optimal.cpp b/unsupported/doc/examples/MatrixPower_optimal.cpp
new file mode 100644
index 000000000..86470ba0a
--- /dev/null
+++ b/unsupported/doc/examples/MatrixPower_optimal.cpp
@@ -0,0 +1,17 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ Matrix4cd A = Matrix4cd::Random();
+ MatrixPower<Matrix4cd> Apow(A);
+
+ std::cout << "The matrix A is:\n" << A << "\n\n"
+ "A^3.1 is:\n" << Apow(3.1) << "\n\n"
+ "A^3.3 is:\n" << Apow(3.3) << "\n\n"
+ "A^3.7 is:\n" << Apow(3.7) << "\n\n"
+ "A^3.9 is:\n" << Apow(3.9) << std::endl;
+ return 0;
+}
diff --git a/unsupported/test/matrix_power.cpp b/unsupported/test/matrix_power.cpp
index 3a3f01464..95c63c574 100644
--- a/unsupported/test/matrix_power.cpp
+++ b/unsupported/test/matrix_power.cpp
@@ -16,15 +16,17 @@ void test2dRotation(double tol)
T angle, c, s;
A << 0, 1, -1, 0;
- for (int i = 0; i <= 20; i++) {
+ MatrixPower<Matrix<T,2,2> > Apow(A);
+
+ for (int i=0; i<=20; ++i) {
angle = pow(10, (i-10) / 5.);
c = std::cos(angle);
s = std::sin(angle);
B << c, s, -s, c;
- C = A.pow(std::ldexp(angle, 1) / M_PI);
- std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C, B) << '\n';
- VERIFY(C.isApprox(B, T(tol)));
+ C = Apow(std::ldexp(angle,1) / M_PI);
+ std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
}
@@ -36,15 +38,17 @@ void test2dHyperbolicRotation(double tol)
std::complex<T> ish(0, std::sinh((T)1));
A << ch, ish, -ish, ch;
- for (int i = 0; i <= 20; i++) {
- angle = std::ldexp(T(i-10), -1);
+ MatrixPower<Matrix<std::complex<T>,2,2> > Apow(A);
+
+ for (int i=0; i<=20; ++i) {
+ angle = std::ldexp(static_cast<T>(i-10), -1);
ch = std::cosh(angle);
ish = std::complex<T>(0, std::sinh(angle));
B << ch, ish, -ish, ch;
- C = A.pow(angle);
- std::cout << "test2dHyperbolicRotation: i = " << i << " error powerm = " << relerr(C, B) << '\n';
- VERIFY(C.isApprox(B, T(tol)));
+ C = Apow(angle);
+ std::cout << "test2dHyperbolicRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
}
}
@@ -55,71 +59,64 @@ void testExponentLaws(const MatrixType& m, double tol)
MatrixType m1, m2, m3, m4, m5;
RealScalar x, y;
- for (int i = 0; i < g_repeat; i++) {
+ for (int i=0; i<g_repeat; ++i) {
generateTestMatrix<MatrixType>::run(m1, m.rows());
+ MatrixPower<MatrixType> mpow(m1);
+
x = internal::random<RealScalar>();
y = internal::random<RealScalar>();
- m2 = m1.pow(x);
- m3 = m1.pow(y);
+ m2 = mpow(x);
+ m3 = mpow(y);
- m4 = m1.pow(x + y);
+ m4 = mpow(x+y);
m5.noalias() = m2 * m3;
- std::cout << "testExponentLaws: error powerm = " << relerr(m4, m5);
- VERIFY(m4.isApprox(m5, RealScalar(tol)));
+ VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
- if (!NumTraits<typename MatrixType::Scalar>::IsComplex) {
- m4 = m1.pow(x * y);
- m5 = m2.pow(y);
- std::cout << " " << relerr(m4, m5);
- VERIFY(m4.isApprox(m5, RealScalar(tol)));
- }
+ m4 = mpow(x*y);
+ m5 = m2.pow(y);
+ VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
m4 = (std::abs(x) * m1).pow(y);
m5 = std::pow(std::abs(x), y) * m3;
- std::cout << " " << relerr(m4, m5) << '\n';
- VERIFY(m4.isApprox(m5, RealScalar(tol)));
+ VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
}
}
template<typename MatrixType, typename VectorType>
-void testMatrixVectorProduct(const MatrixType& m, const VectorType& v, double tol)
+void testProduct(const MatrixType& m, const VectorType& v, double tol)
{
typedef typename MatrixType::RealScalar RealScalar;
MatrixType m1;
VectorType v1, v2, v3;
RealScalar p;
- for (int i = 0; i < g_repeat; i++) {
+ for (int i=0; i<g_repeat; ++i) {
generateTestMatrix<MatrixType>::run(m1, m.rows());
+ MatrixPower<MatrixType> mpow(m1);
+
v1 = VectorType::Random(v.rows(), v.cols());
p = internal::random<RealScalar>();
- v2.noalias() = m1.pow(p).eval() * v1;
- v1 = m1.pow(p) * v1;
- std::cout << "testMatrixVectorProduct: error powerm = " << relerr(v2, v1) << '\n';
- VERIFY(v2.isApprox(v1, RealScalar(tol)));
+ v2.noalias() = mpow(p) * v1;
+ v3.noalias() = mpow(p).eval() * v1;
+ std::cout << "testMatrixVectorProduct: error powerm = " << relerr(v2, v3) << '\n';
+ VERIFY(v2.isApprox(v3, static_cast<RealScalar>(tol)));
}
}
-template<typename MatrixType>
-void testAliasing(const MatrixType& m)
+template<typename MatrixType, typename VectorType>
+void testMatrixVector(const MatrixType& m, const VectorType& v, double tol)
{
- typedef typename MatrixType::RealScalar RealScalar;
- MatrixType m1, m2;
- RealScalar p;
-
- for (int i = 0; i < g_repeat; i++) {
- generateTestMatrix<MatrixType>::run(m1, m.rows());
- p = internal::random<RealScalar>();
-
- m2 = m1.pow(p);
- m1 = m1.pow(p);
- VERIFY(m1 == m2);
- }
+ testExponentLaws(m,tol);
+ testProduct(m,v,tol);
}
void test_matrix_power()
{
+ typedef Matrix<double,3,3,RowMajor> Matrix3dRowMajor;
+ typedef Matrix<long double,Dynamic,Dynamic> MatrixXe;
+ typedef Matrix<long double,Dynamic,1> VectorXe;
+
CALL_SUBTEST_2(test2dRotation<double>(1e-13));
CALL_SUBTEST_1(test2dRotation<float>(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
CALL_SUBTEST_9(test2dRotation<long double>(1e-13));
@@ -127,28 +124,13 @@ void test_matrix_power()
CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
CALL_SUBTEST_9(test2dHyperbolicRotation<long double>(1e-14));
- CALL_SUBTEST_2(testExponentLaws(Matrix2d(), 1e-13));
- CALL_SUBTEST_7(testExponentLaws(Matrix<double,3,3,RowMajor>(), 1e-13));
- CALL_SUBTEST_3(testExponentLaws(Matrix4cd(), 1e-13));
- CALL_SUBTEST_4(testExponentLaws(MatrixXd(8,8), 1e-13));
- CALL_SUBTEST_1(testExponentLaws(Matrix2f(), 1e-4));
- CALL_SUBTEST_5(testExponentLaws(Matrix3cf(), 1e-4));
- CALL_SUBTEST_8(testExponentLaws(Matrix4f(), 1e-4));
- CALL_SUBTEST_6(testExponentLaws(MatrixXf(8,8), 1e-4));
-
- CALL_SUBTEST_2(testMatrixVectorProduct(Matrix2d(), Vector2d(), 1e-13));
- CALL_SUBTEST_7(testMatrixVectorProduct(Matrix<double,3,3,RowMajor>(), Vector3d(), 1e-13));
- CALL_SUBTEST_3(testMatrixVectorProduct(Matrix4cd(), Vector4cd(), 1e-13));
- CALL_SUBTEST_4(testMatrixVectorProduct(MatrixXd(8,8), MatrixXd(8,2), 1e-13));
- CALL_SUBTEST_1(testMatrixVectorProduct(Matrix2f(), Vector2f(), 1e-4));
- CALL_SUBTEST_5(testMatrixVectorProduct(Matrix3cf(), Vector3cf(), 1e-4));
- CALL_SUBTEST_8(testMatrixVectorProduct(Matrix4f(), Vector4f(), 1e-4));
- CALL_SUBTEST_6(testMatrixVectorProduct(MatrixXf(8,8), VectorXf(8), 1e-4));
- CALL_SUBTEST_9(testMatrixVectorProduct(Matrix<long double,Dynamic,Dynamic>(7,7), Matrix<long double,7,9>(), 1e-13));
-
- CALL_SUBTEST_7(testAliasing(Matrix<double,3,3,RowMajor>()));
- CALL_SUBTEST_3(testAliasing(Matrix4cd()));
- CALL_SUBTEST_4(testAliasing(MatrixXd(8,8)));
- CALL_SUBTEST_5(testAliasing(Matrix3cf()));
- CALL_SUBTEST_6(testAliasing(MatrixXf(8,8)));
+ CALL_SUBTEST_2(testMatrixVector(Matrix2d(), Vector2d(), 1e-13));
+ CALL_SUBTEST_7(testMatrixVector(Matrix3dRowMajor(), MatrixXd(3,5), 1e-13));
+ CALL_SUBTEST_3(testMatrixVector(Matrix4cd(), Vector4cd(), 1e-13));
+ CALL_SUBTEST_4(testMatrixVector(MatrixXd(8,8), VectorXd(8), 1e-13));
+ CALL_SUBTEST_1(testMatrixVector(Matrix2f(), Vector2f(), 1e-4));
+ CALL_SUBTEST_5(testMatrixVector(Matrix3cf(), Vector3cf(), 1e-4));
+ CALL_SUBTEST_8(testMatrixVector(Matrix4f(), Vector4f(), 1e-4));
+ CALL_SUBTEST_6(testMatrixVector(MatrixXf(8,8), VectorXf(8), 1e-4));
+ CALL_SUBTEST_9(testMatrixVector(MatrixXe(7,7), VectorXe(7), 1e-13));
}