aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src')
-rw-r--r--Eigen/src/Cholesky/LDLT.h54
-rw-r--r--Eigen/src/Cholesky/LLT.h53
-rw-r--r--Eigen/src/Core/AssignEvaluator.h56
-rw-r--r--Eigen/src/Core/ConditionEstimator.h166
-rw-r--r--Eigen/src/Core/GeneralProduct.h2
-rw-r--r--Eigen/src/Core/MathFunctions.h140
-rw-r--r--Eigen/src/Core/ProductEvaluators.h38
-rw-r--r--Eigen/src/Core/Redux.h30
-rw-r--r--Eigen/src/Core/SolveTriangular.h2
-rw-r--r--Eigen/src/Core/SpecialFunctions.h102
-rw-r--r--Eigen/src/Core/StableNorm.h7
-rw-r--r--Eigen/src/Core/TriangularMatrix.h2
-rw-r--r--Eigen/src/Core/arch/CUDA/Half.h69
-rw-r--r--Eigen/src/Core/functors/BinaryFunctors.h16
-rw-r--r--Eigen/src/Core/functors/UnaryFunctors.h106
-rw-r--r--Eigen/src/Core/products/GeneralBlockPanelKernel.h79
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h2
-rw-r--r--Eigen/src/Core/products/TriangularMatrixVector.h4
-rw-r--r--Eigen/src/Core/products/TriangularSolverMatrix.h2
-rw-r--r--Eigen/src/Core/util/Macros.h14
-rw-r--r--Eigen/src/Householder/HouseholderSequence.h3
-rw-r--r--Eigen/src/LU/FullPivLU.h13
-rw-r--r--Eigen/src/LU/PartialPivLU.h19
-rw-r--r--Eigen/src/QR/CompleteOrthogonalDecomposition.h4
-rw-r--r--Eigen/src/SVD/JacobiSVD.h70
-rw-r--r--Eigen/src/SparseCore/SparseCwiseUnaryOp.h2
-rw-r--r--Eigen/src/SuperLUSupport/SuperLUSupport.h2
-rw-r--r--Eigen/src/plugins/ArrayCwiseBinaryOps.h18
28 files changed, 861 insertions, 214 deletions
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 1d767d5c8..538aff956 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -13,7 +13,7 @@
#ifndef EIGEN_LDLT_H
#define EIGEN_LDLT_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
template<typename MatrixType, int UpLo> struct LDLT_Traits;
@@ -73,11 +73,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
* The default constructor is useful in cases in which the user intends to
* perform decompositions via LDLT::compute(const MatrixType&).
*/
- LDLT()
- : m_matrix(),
- m_transpositions(),
+ LDLT()
+ : m_matrix(),
+ m_transpositions(),
m_sign(internal::ZeroSign),
- m_isInitialized(false)
+ m_isInitialized(false)
{}
/** \brief Default Constructor with memory preallocation
@@ -168,7 +168,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
* \note_about_checking_solutions
*
* More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
- * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
+ * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
* \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
* \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
@@ -192,6 +192,15 @@ template<typename _MatrixType, int _UpLo> class LDLT
template<typename InputType>
LDLT& compute(const EigenBase<InputType>& matrix);
+ /** \returns an estimate of the reciprocal condition number of the matrix of
+ * which \c *this is the LDLT decomposition.
+ */
+ RealScalar rcond() const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return internal::rcond_estimate_helper(m_l1_norm, *this);
+ }
+
template <typename Derived>
LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);
@@ -207,6 +216,13 @@ template<typename _MatrixType, int _UpLo> class LDLT
MatrixType reconstructedMatrix() const;
+ /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint.
+ *
+ * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:
+ * \code x = decomposition.adjoint().solve(b) \endcode
+ */
+ const LDLT& adjoint() const { return *this; };
+
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
@@ -220,7 +236,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
eigen_assert(m_isInitialized && "LDLT is not initialized.");
return Success;
}
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
@@ -228,7 +244,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
#endif
protected:
-
+
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
@@ -241,6 +257,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
* is not stored), and the diagonal entries correspond to D.
*/
MatrixType m_matrix;
+ RealScalar m_l1_norm;
TranspositionType m_transpositions;
TmpMatrixType m_temporary;
internal::SignMatrix m_sign;
@@ -314,7 +331,7 @@ template<> struct ldlt_inplace<Lower>
if(rs>0)
A21.noalias() -= A20 * temp.head(k);
}
-
+
// In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
// was smaller than the cutoff value. However, since LDLT is not rank-revealing
// we should only make sure that we do not introduce INF or NaN values.
@@ -433,12 +450,25 @@ template<typename InputType>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)
{
check_template_parameters();
-
+
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
m_matrix = a.derived();
+ // Compute matrix L1 norm = max abs column sum.
+ m_l1_norm = RealScalar(0);
+ // TODO move this code to SelfAdjointView
+ for (Index col = 0; col < size; ++col) {
+ RealScalar abs_col_sum;
+ if (_UpLo == Lower)
+ abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();
+ else
+ abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();
+ if (abs_col_sum > m_l1_norm)
+ m_l1_norm = abs_col_sum;
+ }
+
m_transpositions.resize(size);
m_isInitialized = false;
m_temporary.resize(size);
@@ -466,7 +496,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
eigen_assert(m_matrix.rows()==size);
}
else
- {
+ {
m_matrix.resize(size,size);
m_matrix.setZero();
m_transpositions.resize(size);
@@ -505,7 +535,7 @@ void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) cons
// diagonal element is not well justified and leads to numerical issues in some cases.
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
-
+
for (Index i = 0; i < vecD.size(); ++i)
{
if(abs(vecD(i)) > tolerance)
diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h
index 74cf5bfe1..19578b216 100644
--- a/Eigen/src/Cholesky/LLT.h
+++ b/Eigen/src/Cholesky/LLT.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_LLT_H
#define EIGEN_LLT_H
-namespace Eigen {
+namespace Eigen {
namespace internal{
template<typename MatrixType, int UpLo> struct LLT_Traits;
@@ -40,7 +40,7 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
*
* Example: \include LLT_example.cpp
* Output: \verbinclude LLT_example.out
- *
+ *
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
*/
/* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
@@ -135,6 +135,16 @@ template<typename _MatrixType, int _UpLo> class LLT
template<typename InputType>
LLT& compute(const EigenBase<InputType>& matrix);
+ /** \returns an estimate of the reciprocal condition number of the matrix of
+ * which \c *this is the Cholesky decomposition.
+ */
+ RealScalar rcond() const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative");
+ return internal::rcond_estimate_helper(m_l1_norm, *this);
+ }
+
/** \returns the LLT decomposition matrix
*
* TODO: document the storage layout
@@ -159,12 +169,19 @@ template<typename _MatrixType, int _UpLo> class LLT
return m_info;
}
+ /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint.
+ *
+ * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:
+ * \code x = decomposition.adjoint().solve(b) \endcode
+ */
+ const LLT& adjoint() const { return *this; };
+
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
template<typename VectorType>
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
@@ -172,17 +189,18 @@ template<typename _MatrixType, int _UpLo> class LLT
#endif
protected:
-
+
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
-
+
/** \internal
* Used to compute and store L
* The strict upper part is not used and even not initialized.
*/
MatrixType m_matrix;
+ RealScalar m_l1_norm;
bool m_isInitialized;
ComputationInfo m_info;
};
@@ -268,7 +286,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
static Index unblocked(MatrixType& mat)
{
using std::sqrt;
-
+
eigen_assert(mat.rows()==mat.cols());
const Index size = mat.rows();
for(Index k = 0; k < size; ++k)
@@ -328,7 +346,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);
}
};
-
+
template<typename Scalar> struct llt_inplace<Scalar, Upper>
{
typedef typename NumTraits<Scalar>::Real RealScalar;
@@ -387,12 +405,25 @@ template<typename InputType>
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)
{
check_template_parameters();
-
+
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
m_matrix.resize(size, size);
m_matrix = a.derived();
+ // Compute matrix L1 norm = max abs column sum.
+ m_l1_norm = RealScalar(0);
+ // TODO move this code to SelfAdjointView
+ for (Index col = 0; col < size; ++col) {
+ RealScalar abs_col_sum;
+ if (_UpLo == Lower)
+ abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();
+ else
+ abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();
+ if (abs_col_sum > m_l1_norm)
+ m_l1_norm = abs_col_sum;
+ }
+
m_isInitialized = true;
bool ok = Traits::inplace_decomposition(m_matrix);
m_info = ok ? Success : NumericalIssue;
@@ -419,7 +450,7 @@ LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, c
return *this;
}
-
+
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename _MatrixType,int _UpLo>
template<typename RhsType, typename DstType>
@@ -431,7 +462,7 @@ void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
#endif
/** \internal use x = llt_object.solve(x);
- *
+ *
* This is the \em in-place version of solve().
*
* \param bAndX represents both the right-hand side matrix b and result x.
@@ -483,7 +514,7 @@ SelfAdjointView<MatrixType, UpLo>::llt() const
return LLT<PlainObject,UpLo>(m_matrix);
}
#endif // __CUDACC__
-
+
} // end namespace Eigen
#endif // EIGEN_LLT_H
diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h
index 3de8aa9a2..9d4b315a0 100644
--- a/Eigen/src/Core/AssignEvaluator.h
+++ b/Eigen/src/Core/AssignEvaluator.h
@@ -29,13 +29,10 @@ struct copy_using_evaluator_traits
{
typedef typename DstEvaluator::XprType Dst;
typedef typename Dst::Scalar DstScalar;
- // TODO distinguish between linear traversal and inner-traversals
- typedef typename find_best_packet<DstScalar,Dst::SizeAtCompileTime>::type PacketType;
enum {
DstFlags = DstEvaluator::Flags,
- SrcFlags = SrcEvaluator::Flags,
- RequiredAlignment = unpacket_traits<PacketType>::alignment
+ SrcFlags = SrcEvaluator::Flags
};
public:
@@ -55,10 +52,25 @@ private:
: int(DstFlags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)
: int(Dst::MaxRowsAtCompileTime),
OuterStride = int(outer_stride_at_compile_time<Dst>::ret),
- MaxSizeAtCompileTime = Dst::SizeAtCompileTime,
- PacketSize = unpacket_traits<PacketType>::size
+ MaxSizeAtCompileTime = Dst::SizeAtCompileTime
+ };
+
+ // TODO distinguish between linear traversal and inner-traversals
+ typedef typename find_best_packet<DstScalar,Dst::SizeAtCompileTime>::type LinearPacketType;
+ typedef typename find_best_packet<DstScalar,InnerSize>::type InnerPacketType;
+
+ enum {
+ LinearPacketSize = unpacket_traits<LinearPacketType>::size,
+ InnerPacketSize = unpacket_traits<InnerPacketType>::size
};
+public:
+ enum {
+ LinearRequiredAlignment = unpacket_traits<LinearPacketType>::alignment,
+ InnerRequiredAlignment = unpacket_traits<InnerPacketType>::alignment
+ };
+
+private:
enum {
DstIsRowMajor = DstFlags&RowMajorBit,
SrcIsRowMajor = SrcFlags&RowMajorBit,
@@ -67,16 +79,16 @@ private:
&& (int(DstFlags) & int(SrcFlags) & ActualPacketAccessBit)
&& (functor_traits<AssignFunc>::PacketAccess),
MayInnerVectorize = MightVectorize
- && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
- && int(OuterStride)!=Dynamic && int(OuterStride)%int(PacketSize)==0
- && int(JointAlignment)>=int(RequiredAlignment),
+ && int(InnerSize)!=Dynamic && int(InnerSize)%int(InnerPacketSize)==0
+ && int(OuterStride)!=Dynamic && int(OuterStride)%int(InnerPacketSize)==0
+ && int(JointAlignment)>=int(InnerRequiredAlignment),
MayLinearize = StorageOrdersAgree && (int(DstFlags) & int(SrcFlags) & LinearAccessBit),
MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess
- && ((int(DstAlignment)>=int(RequiredAlignment)) || MaxSizeAtCompileTime == Dynamic),
+ && ((int(DstAlignment)>=int(LinearRequiredAlignment)) || MaxSizeAtCompileTime == Dynamic),
/* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
so it's only good for large enough sizes. */
MaySliceVectorize = MightVectorize && DstHasDirectAccess
- && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize)
+ && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*InnerPacketSize)
/* slice vectorization can be slow, so we only want it if the slices are big, which is
indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
in a fixed-size matrix */
@@ -84,7 +96,8 @@ private:
public:
enum {
- Traversal = int(MayInnerVectorize) ? int(InnerVectorizedTraversal)
+ Traversal = int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize) ? int(LinearVectorizedTraversal)
+ : int(MayInnerVectorize) ? int(InnerVectorizedTraversal)
: int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
: int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
: int(MayLinearize) ? int(LinearTraversal)
@@ -94,9 +107,14 @@ public:
|| int(Traversal) == SliceVectorizedTraversal
};
+ typedef typename conditional<int(Traversal)==LinearVectorizedTraversal, LinearPacketType, InnerPacketType>::type PacketType;
+
private:
enum {
- UnrollingLimit = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1),
+ ActualPacketSize = int(Traversal)==LinearVectorizedTraversal ? LinearPacketSize
+ : Vectorized ? InnerPacketSize
+ : 1,
+ UnrollingLimit = EIGEN_UNROLLING_LIMIT * ActualPacketSize,
MayUnrollCompletely = int(Dst::SizeAtCompileTime) != Dynamic
&& int(Dst::SizeAtCompileTime) * int(SrcEvaluator::CoeffReadCost) <= int(UnrollingLimit),
MayUnrollInner = int(InnerSize) != Dynamic
@@ -112,7 +130,7 @@ public:
: int(NoUnrolling)
)
: int(Traversal) == int(LinearVectorizedTraversal)
- ? ( bool(MayUnrollCompletely) && (int(DstAlignment)>=int(RequiredAlignment)) ? int(CompleteUnrolling)
+ ? ( bool(MayUnrollCompletely) && (int(DstAlignment)>=int(LinearRequiredAlignment)) ? int(CompleteUnrolling)
: int(NoUnrolling) )
: int(Traversal) == int(LinearTraversal)
? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling)
@@ -131,11 +149,13 @@ public:
std::cerr.unsetf(std::ios::hex);
EIGEN_DEBUG_VAR(DstAlignment)
EIGEN_DEBUG_VAR(SrcAlignment)
- EIGEN_DEBUG_VAR(RequiredAlignment)
+ EIGEN_DEBUG_VAR(LinearRequiredAlignment)
+ EIGEN_DEBUG_VAR(InnerRequiredAlignment)
EIGEN_DEBUG_VAR(JointAlignment)
EIGEN_DEBUG_VAR(InnerSize)
EIGEN_DEBUG_VAR(InnerMaxSize)
- EIGEN_DEBUG_VAR(PacketSize)
+ EIGEN_DEBUG_VAR(LinearPacketSize)
+ EIGEN_DEBUG_VAR(InnerPacketSize)
EIGEN_DEBUG_VAR(StorageOrdersAgree)
EIGEN_DEBUG_VAR(MightVectorize)
EIGEN_DEBUG_VAR(MayLinearize)
@@ -370,7 +390,7 @@ struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, NoUnrolling>
typedef typename Kernel::Scalar Scalar;
typedef typename Kernel::PacketType PacketType;
enum {
- requestedAlignment = Kernel::AssignmentTraits::RequiredAlignment,
+ requestedAlignment = Kernel::AssignmentTraits::LinearRequiredAlignment,
packetSize = unpacket_traits<PacketType>::size,
dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment),
dstAlignment = packet_traits<Scalar>::AlignedOnScalar ? int(requestedAlignment)
@@ -484,7 +504,7 @@ struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, NoUnrolling>
typedef typename Kernel::PacketType PacketType;
enum {
packetSize = unpacket_traits<PacketType>::size,
- requestedAlignment = int(Kernel::AssignmentTraits::RequiredAlignment),
+ requestedAlignment = int(Kernel::AssignmentTraits::InnerRequiredAlignment),
alignable = packet_traits<Scalar>::AlignedOnScalar || int(Kernel::AssignmentTraits::DstAlignment)>=sizeof(Scalar),
dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment),
dstAlignment = alignable ? int(requestedAlignment)
diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h
new file mode 100644
index 000000000..68c5e918e
--- /dev/null
+++ b/Eigen/src/Core/ConditionEstimator.h
@@ -0,0 +1,166 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONDITIONESTIMATOR_H
+#define EIGEN_CONDITIONESTIMATOR_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Vector, typename RealVector, bool IsComplex>
+struct rcond_compute_sign {
+ static inline Vector run(const Vector& v) {
+ const RealVector v_abs = v.cwiseAbs();
+ return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))
+ .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
+ }
+};
+
+// Partial specialization to avoid elementwise division for real vectors.
+template <typename Vector>
+struct rcond_compute_sign<Vector, Vector, false> {
+ static inline Vector run(const Vector& v) {
+ return (v.array() < static_cast<typename Vector::RealScalar>(0))
+ .select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
+ }
+};
+
+/** \brief Reciprocal condition number estimator.
+ *
+ * Computing a decomposition of a dense matrix takes O(n^3) operations, while
+ * this method estimates the condition number quickly and reliably in O(n^2)
+ * operations.
+ *
+ * \returns an estimate of the reciprocal condition number
+ * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
+ * its decomposition. Supports the following decompositions: FullPivLU,
+ * PartialPivLU, LDLT, and LLT.
+ *
+ * \sa FullPivLU, PartialPivLU, LDLT, LLT.
+ */
+template <typename Decomposition>
+typename Decomposition::RealScalar
+rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec)
+{
+ typedef typename Decomposition::RealScalar RealScalar;
+ eigen_assert(dec.rows() == dec.cols());
+ if (dec.rows() == 0) return RealScalar(1);
+ if (matrix_norm == RealScalar(0)) return RealScalar(0);
+ if (dec.rows() == 1) return RealScalar(1);
+ const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
+ return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0)
+ : (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
+}
+
+/**
+ * \returns an estimate of ||inv(matrix)||_1 given a decomposition of
+ * \a matrix that implements .solve() and .adjoint().solve() methods.
+ *
+ * This function implements Algorithms 4.1 and 5.1 from
+ * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
+ * which also forms the basis for the condition number estimators in
+ * LAPACK. Since at most 10 calls to the solve method of dec are
+ * performed, the total cost is O(dims^2), as opposed to O(dims^3)
+ * needed to compute the inverse matrix explicitly.
+ *
+ * The most common usage is in estimating the condition number
+ * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
+ * computed directly in O(n^2) operations.
+ *
+ * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
+ * LLT.
+ *
+ * \sa FullPivLU, PartialPivLU, LDLT, LLT.
+ */
+template <typename Decomposition>
+typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec)
+{
+ typedef typename Decomposition::MatrixType MatrixType;
+ typedef typename Decomposition::Scalar Scalar;
+ typedef typename Decomposition::RealScalar RealScalar;
+ typedef typename internal::plain_col_type<MatrixType>::type Vector;
+ typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;
+ const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
+
+ eigen_assert(dec.rows() == dec.cols());
+ const Index n = dec.rows();
+ if (n == 0)
+ return 0;
+
+ Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
+
+ // lower_bound is a lower bound on
+ // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
+ // and is the objective maximized by the ("super-") gradient ascent
+ // algorithm below.
+ RealScalar lower_bound = v.template lpNorm<1>();
+ if (n == 1)
+ return lower_bound;
+
+ // Gradient ascent algorithm follows: We know that the optimum is achieved at
+ // one of the simplices v = e_i, so in each iteration we follow a
+ // super-gradient to move towards the optimal one.
+ RealScalar old_lower_bound = lower_bound;
+ Vector sign_vector(n);
+ Vector old_sign_vector;
+ Index v_max_abs_index = -1;
+ Index old_v_max_abs_index = v_max_abs_index;
+ for (int k = 0; k < 4; ++k)
+ {
+ sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v);
+ if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
+ // Break if the solution stagnated.
+ break;
+ }
+ // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
+ v = dec.adjoint().solve(sign_vector);
+ v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
+ if (v_max_abs_index == old_v_max_abs_index) {
+ // Break if the solution stagnated.
+ break;
+ }
+ // Move to the new simplex e_j, where j = v_max_abs_index.
+ v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
+ lower_bound = v.template lpNorm<1>();
+ if (lower_bound <= old_lower_bound) {
+ // Break if the gradient step did not increase the lower_bound.
+ break;
+ }
+ if (!is_complex) {
+ old_sign_vector = sign_vector;
+ }
+ old_v_max_abs_index = v_max_abs_index;
+ old_lower_bound = lower_bound;
+ }
+ // The following calculates an independent estimate of ||matrix||_1 by
+ // multiplying matrix by a vector with entries of slowly increasing
+ // magnitude and alternating sign:
+ // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
+ // This improvement to Hager's algorithm above is due to Higham. It was
+ // added to make the algorithm more robust in certain corner cases where
+ // large elements in the matrix might otherwise escape detection due to
+ // exact cancellation (especially when op and op_adjoint correspond to a
+ // sequence of backsubstitutions and permutations), which could cause
+ // Hager's algorithm to vastly underestimate ||matrix||_1.
+ Scalar alternating_sign(RealScalar(1));
+ for (Index i = 0; i < n; ++i) {
+ v[i] = alternating_sign * (RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
+ alternating_sign = -alternating_sign;
+ }
+ v = dec.solve(v);
+ const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));
+ return numext::maxi(lower_bound, alternate_lower_bound);
+}
+
+} // namespace internal
+
+} // namespace Eigen
+
+#endif
diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h
index 53f934999..f7c5f4276 100644
--- a/Eigen/src/Core/GeneralProduct.h
+++ b/Eigen/src/Core/GeneralProduct.h
@@ -81,6 +81,8 @@ public:
* This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
// FIXME I'm not sure the current mapping is the ideal one.
template<int M, int N> struct product_type_selector<M,N,1> { enum { ret = OuterProduct }; };
+template<int M> struct product_type_selector<M, 1, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<int N> struct product_type_selector<1, N, 1> { enum { ret = LazyCoeffBasedProductMode }; };
template<int Depth> struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; };
template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; };
template<> struct product_type_selector<Small,1, Small> { enum { ret = CoeffBasedProductMode }; };
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index 8e7dd2b73..5771abf7d 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -1025,6 +1025,66 @@ double log(const double &x) { return ::log(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+typename NumTraits<T>::Real abs(const T &x) {
+ EIGEN_USING_STD_MATH(abs);
+ return abs(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float abs(const float &x) { return ::fabsf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double abs(const double &x) { return ::fabs(x); }
+#endif
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T exp(const T &x) {
+ EIGEN_USING_STD_MATH(exp);
+ return exp(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float exp(const float &x) { return ::expf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double exp(const double &x) { return ::exp(x); }
+#endif
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T cos(const T &x) {
+ EIGEN_USING_STD_MATH(cos);
+ return cos(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float cos(const float &x) { return ::cosf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double cos(const double &x) { return ::cos(x); }
+#endif
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T sin(const T &x) {
+ EIGEN_USING_STD_MATH(sin);
+ return sin(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float sin(const float &x) { return ::sinf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double sin(const double &x) { return ::sin(x); }
+#endif
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T tan(const T &x) {
EIGEN_USING_STD_MATH(tan);
return tan(x);
@@ -1040,34 +1100,94 @@ double tan(const double &x) { return ::tan(x); }
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-typename NumTraits<T>::Real abs(const T &x) {
- EIGEN_USING_STD_MATH(abs);
- return abs(x);
+T acos(const T &x) {
+ EIGEN_USING_STD_MATH(acos);
+ return acos(x);
}
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-float abs(const float &x) { return ::fabsf(x); }
+float acos(const float &x) { return ::acosf(x); }
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-double abs(const double &x) { return ::fabs(x); }
+double acos(const double &x) { return ::acos(x); }
#endif
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-T exp(const T &x) {
- EIGEN_USING_STD_MATH(exp);
- return exp(x);
+T asin(const T &x) {
+ EIGEN_USING_STD_MATH(asin);
+ return asin(x);
}
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-float exp(const float &x) { return ::expf(x); }
+float asin(const float &x) { return ::asinf(x); }
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
-double exp(const double &x) { return ::exp(x); }
+double asin(const double &x) { return ::asin(x); }
+#endif
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T atan(const T &x) {
+ EIGEN_USING_STD_MATH(atan);
+ return atan(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float atan(const float &x) { return ::atanf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double atan(const double &x) { return ::atan(x); }
+#endif
+
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T cosh(const T &x) {
+ EIGEN_USING_STD_MATH(cosh);
+ return cosh(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float cosh(const float &x) { return ::coshf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double cosh(const double &x) { return ::cosh(x); }
#endif
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T sinh(const T &x) {
+ EIGEN_USING_STD_MATH(sinh);
+ return sinh(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float sinh(const float &x) { return ::sinhf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double sinh(const double &x) { return ::sinh(x); }
+#endif
+
+template<typename T>
+EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+T tanh(const T &x) {
+ EIGEN_USING_STD_MATH(tanh);
+ return tanh(x);
+}
+
+#ifdef __CUDACC__
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+float tanh(const float &x) { return ::tanhf(x); }
+
+template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
+double tanh(const double &x) { return ::tanh(x); }
+#endif
template <typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h
index 3ce86e8cd..d9fd888cf 100644
--- a/Eigen/src/Core/ProductEvaluators.h
+++ b/Eigen/src/Core/ProductEvaluators.h
@@ -410,8 +410,6 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
typedef Product<Lhs, Rhs, LazyProduct> XprType;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
- typedef typename XprType::PacketScalar PacketScalar;
- typedef typename XprType::PacketReturnType PacketReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
explicit product_evaluator(const XprType& xpr)
@@ -437,16 +435,20 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
typedef evaluator<LhsNestedCleaned> LhsEtorType;
typedef evaluator<RhsNestedCleaned> RhsEtorType;
-
+
enum {
RowsAtCompileTime = LhsNestedCleaned::RowsAtCompileTime,
ColsAtCompileTime = RhsNestedCleaned::ColsAtCompileTime,
InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime),
MaxRowsAtCompileTime = LhsNestedCleaned::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime,
-
- PacketSize = packet_traits<Scalar>::size,
+ MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime
+ };
+
+ typedef typename find_best_packet<Scalar,RowsAtCompileTime>::type LhsVecPacketType;
+ typedef typename find_best_packet<Scalar,ColsAtCompileTime>::type RhsVecPacketType;
+ enum {
+
LhsCoeffReadCost = LhsEtorType::CoeffReadCost,
RhsCoeffReadCost = RhsEtorType::CoeffReadCost,
CoeffReadCost = InnerSize==0 ? NumTraits<Scalar>::ReadCost
@@ -459,19 +461,23 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
LhsFlags = LhsEtorType::Flags,
RhsFlags = RhsEtorType::Flags,
- LhsAlignment = LhsEtorType::Alignment,
- RhsAlignment = RhsEtorType::Alignment,
-
LhsRowMajor = LhsFlags & RowMajorBit,
RhsRowMajor = RhsFlags & RowMajorBit,
+
+ LhsVecPacketSize = unpacket_traits<LhsVecPacketType>::size,
+ RhsVecPacketSize = unpacket_traits<RhsVecPacketType>::size,
+
+ // Here, we don't care about alignment larger than the usable packet size.
+ LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))),
+ RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))),
SameType = is_same<typename LhsNestedCleaned::Scalar,typename RhsNestedCleaned::Scalar>::value,
CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit)
- && (ColsAtCompileTime == Dynamic || ((ColsAtCompileTime % PacketSize) == 0) ),
+ && (ColsAtCompileTime == Dynamic || ((ColsAtCompileTime % RhsVecPacketSize) == 0) ),
CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit)
- && (RowsAtCompileTime == Dynamic || ((RowsAtCompileTime % PacketSize) == 0) ),
+ && (RowsAtCompileTime == Dynamic || ((RowsAtCompileTime % LhsVecPacketSize) == 0) ),
EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
@@ -491,10 +497,10 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
: 0,
/* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside
- * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner
- * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect
- * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.
- */
+ * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner
+ * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect
+ * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.
+ */
CanVectorizeInner = SameType
&& LhsRowMajor
&& (!RhsRowMajor)
@@ -1000,7 +1006,7 @@ struct transposition_matrix_product
const Index size = tr.size();
StorageIndex j = 0;
- if(!(is_same<MatrixTypeCleaned,Dest>::value && extract_data(dst) == extract_data(mat)))
+ if(!is_same_dense(dst,mat))
dst = mat;
for(Index k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
index d170cae29..98b2fd868 100644
--- a/Eigen/src/Core/Redux.h
+++ b/Eigen/src/Core/Redux.h
@@ -27,8 +27,9 @@ template<typename Func, typename Derived>
struct redux_traits
{
public:
+ typedef typename find_best_packet<typename Derived::Scalar,Derived::SizeAtCompileTime>::type PacketType;
enum {
- PacketSize = packet_traits<typename Derived::Scalar>::size,
+ PacketSize = unpacket_traits<PacketType>::size,
InnerMaxSize = int(Derived::IsRowMajor)
? Derived::MaxColsAtCompileTime
: Derived::MaxRowsAtCompileTime
@@ -137,12 +138,12 @@ template<typename Func, typename Derived, int Start, int Length>
struct redux_vec_unroller
{
enum {
- PacketSize = packet_traits<typename Derived::Scalar>::size,
+ PacketSize = redux_traits<Func, Derived>::PacketSize,
HalfLength = Length/2
};
typedef typename Derived::Scalar Scalar;
- typedef typename packet_traits<Scalar>::type PacketScalar;
+ typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func)
{
@@ -156,14 +157,14 @@ template<typename Func, typename Derived, int Start>
struct redux_vec_unroller<Func, Derived, Start, 1>
{
enum {
- index = Start * packet_traits<typename Derived::Scalar>::size,
+ index = Start * redux_traits<Func, Derived>::PacketSize,
outer = index / int(Derived::InnerSizeAtCompileTime),
inner = index % int(Derived::InnerSizeAtCompileTime),
alignment = Derived::Alignment
};
typedef typename Derived::Scalar Scalar;
- typedef typename packet_traits<Scalar>::type PacketScalar;
+ typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&)
{
@@ -209,13 +210,13 @@ template<typename Func, typename Derived>
struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
{
typedef typename Derived::Scalar Scalar;
- typedef typename packet_traits<Scalar>::type PacketScalar;
+ typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
static Scalar run(const Derived &mat, const Func& func)
{
const Index size = mat.size();
- const Index packetSize = packet_traits<Scalar>::size;
+ const Index packetSize = redux_traits<Func, Derived>::PacketSize;
const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
enum {
alignment0 = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
@@ -268,7 +269,7 @@ template<typename Func, typename Derived, int Unrolling>
struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
{
typedef typename Derived::Scalar Scalar;
- typedef typename packet_traits<Scalar>::type PacketType;
+ typedef typename redux_traits<Func, Derived>::PacketType PacketType;
EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func)
{
@@ -276,7 +277,7 @@ struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
const Index innerSize = mat.innerSize();
const Index outerSize = mat.outerSize();
enum {
- packetSize = packet_traits<Scalar>::size
+ packetSize = redux_traits<Func, Derived>::PacketSize
};
const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
Scalar res;
@@ -306,9 +307,10 @@ template<typename Func, typename Derived>
struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
{
typedef typename Derived::Scalar Scalar;
- typedef typename packet_traits<Scalar>::type PacketScalar;
+
+ typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
enum {
- PacketSize = packet_traits<Scalar>::size,
+ PacketSize = redux_traits<Func, Derived>::PacketSize,
Size = Derived::SizeAtCompileTime,
VectorizedSize = (Size / PacketSize) * PacketSize
};
@@ -367,11 +369,11 @@ public:
{ return m_evaluator.coeff(index); }
template<int LoadMode, typename PacketType>
- PacketReturnType packet(Index row, Index col) const
+ PacketType packet(Index row, Index col) const
{ return m_evaluator.template packet<LoadMode,PacketType>(row, col); }
template<int LoadMode, typename PacketType>
- PacketReturnType packet(Index index) const
+ PacketType packet(Index index) const
{ return m_evaluator.template packet<LoadMode,PacketType>(index); }
EIGEN_DEVICE_FUNC
@@ -379,7 +381,7 @@ public:
{ return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
template<int LoadMode, typename PacketType>
- PacketReturnType packetByOuterInner(Index outer, Index inner) const
+ PacketType packetByOuterInner(Index outer, Index inner) const
{ return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
const XprType & nestedExpression() const { return m_xpr; }
diff --git a/Eigen/src/Core/SolveTriangular.h b/Eigen/src/Core/SolveTriangular.h
index 5a2010449..a33356423 100644
--- a/Eigen/src/Core/SolveTriangular.h
+++ b/Eigen/src/Core/SolveTriangular.h
@@ -213,7 +213,7 @@ template<int Side, typename TriangularType, typename Rhs> struct triangular_solv
template<typename Dest> inline void evalTo(Dest& dst) const
{
- if(!(is_same<RhsNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_rhs)))
+ if(!is_same_dense(dst,m_rhs))
dst = m_rhs;
m_triangularMatrix.template solveInPlace<Side>(dst);
}
diff --git a/Eigen/src/Core/SpecialFunctions.h b/Eigen/src/Core/SpecialFunctions.h
index adb055b15..3513a5c63 100644
--- a/Eigen/src/Core/SpecialFunctions.h
+++ b/Eigen/src/Core/SpecialFunctions.h
@@ -281,20 +281,18 @@ struct digamma_impl {
*/
Scalar p, q, nz, s, w, y;
- bool negative;
+ bool negative = false;
const Scalar maxnum = NumTraits<Scalar>::infinity();
- const Scalar m_pi = EIGEN_PI;
+ const Scalar m_pi(EIGEN_PI);
- negative = 0;
- nz = 0.0;
-
- const Scalar zero = 0.0;
- const Scalar one = 1.0;
- const Scalar half = 0.5;
+ const Scalar zero = Scalar(0);
+ const Scalar one = Scalar(1);
+ const Scalar half = Scalar(0.5);
+ nz = zero;
if (x <= zero) {
- negative = one;
+ negative = true;
q = x;
p = numext::floor(q);
if (p == q) {
@@ -463,7 +461,7 @@ template <typename Scalar>
struct igammac_impl {
EIGEN_DEVICE_FUNC
static Scalar run(Scalar a, Scalar x) {
- /* igamc()
+ /* igamc()
*
* Incomplete gamma integral (modified for Eigen)
*
@@ -519,26 +517,51 @@ struct igammac_impl {
*/
const Scalar zero = 0;
const Scalar one = 1;
+ const Scalar nan = NumTraits<Scalar>::quiet_NaN();
+
+ if ((x < zero) || (a <= zero)) {
+ // domain error
+ return nan;
+ }
+
+ if ((x < one) || (x < a)) {
+ /* The checks above ensure that we meet the preconditions for
+ * igamma_impl::Impl(), so call it, rather than igamma_impl::Run().
+ * Calling Run() would also work, but in that case the compiler may not be
+ * able to prove that igammac_impl::Run and igamma_impl::Run are not
+ * mutually recursive. This leads to worse code, particularly on
+ * platforms like nvptx, where recursion is allowed only begrudgingly.
+ */
+ return (one - igamma_impl<Scalar>::Impl(a, x));
+ }
+
+ return Impl(a, x);
+ }
+
+ private:
+ /* igamma_impl calls igammac_impl::Impl. */
+ friend struct igamma_impl<Scalar>;
+
+ /* Actually computes igamc(a, x).
+ *
+ * Preconditions:
+ * a > 0
+ * x >= 1
+ * x >= a
+ */
+ EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) {
+ const Scalar zero = 0;
+ const Scalar one = 1;
const Scalar two = 2;
const Scalar machep = igamma_helper<Scalar>::machep();
const Scalar maxlog = numext::log(NumTraits<Scalar>::highest());
const Scalar big = igamma_helper<Scalar>::big();
const Scalar biginv = 1 / big;
- const Scalar nan = NumTraits<Scalar>::quiet_NaN();
const Scalar inf = NumTraits<Scalar>::infinity();
Scalar ans, ax, c, yc, r, t, y, z;
Scalar pk, pkm1, pkm2, qk, qkm1, qkm2;
- if ((x < zero) || ( a <= zero)) {
- // domain error
- return nan;
- }
-
- if ((x < one) || (x < a)) {
- return (one - igamma_impl<Scalar>::run(a, x));
- }
-
if (x == inf) return zero; // std::isinf crashes on CUDA
/* Compute x**a * exp(-x) / gamma(a) */
@@ -618,7 +641,7 @@ template <typename Scalar>
struct igamma_impl {
EIGEN_DEVICE_FUNC
static Scalar run(Scalar a, Scalar x) {
- /* igam()
+ /* igam()
* Incomplete gamma integral
*
*
@@ -680,22 +703,47 @@ struct igamma_impl {
*/
const Scalar zero = 0;
const Scalar one = 1;
- const Scalar machep = igamma_helper<Scalar>::machep();
- const Scalar maxlog = numext::log(NumTraits<Scalar>::highest());
const Scalar nan = NumTraits<Scalar>::quiet_NaN();
- double ans, ax, c, r;
-
if (x == zero) return zero;
- if ((x < zero) || ( a <= zero)) { // domain error
+ if ((x < zero) || (a <= zero)) { // domain error
return nan;
}
if ((x > one) && (x > a)) {
- return (one - igammac_impl<Scalar>::run(a, x));
+ /* The checks above ensure that we meet the preconditions for
+ * igammac_impl::Impl(), so call it, rather than igammac_impl::Run().
+ * Calling Run() would also work, but in that case the compiler may not be
+ * able to prove that igammac_impl::Run and igamma_impl::Run are not
+ * mutually recursive. This leads to worse code, particularly on
+ * platforms like nvptx, where recursion is allowed only begrudgingly.
+ */
+ return (one - igammac_impl<Scalar>::Impl(a, x));
}
+ return Impl(a, x);
+ }
+
+ private:
+ /* igammac_impl calls igamma_impl::Impl. */
+ friend struct igammac_impl<Scalar>;
+
+ /* Actually computes igam(a, x).
+ *
+ * Preconditions:
+ * x > 0
+ * a > 0
+ * !(x > 1 && x > a)
+ */
+ EIGEN_DEVICE_FUNC static Scalar Impl(Scalar a, Scalar x) {
+ const Scalar zero = 0;
+ const Scalar one = 1;
+ const Scalar machep = igamma_helper<Scalar>::machep();
+ const Scalar maxlog = numext::log(NumTraits<Scalar>::highest());
+
+ double ans, ax, c, r;
+
/* Compute x**a * exp(-x) / gamma(a) */
ax = a * numext::log(x) - x - lgamma_impl<Scalar>::run(a);
if (ax < -maxlog) {
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
index 7fe39808b..d2fe1e199 100644
--- a/Eigen/src/Core/StableNorm.h
+++ b/Eigen/src/Core/StableNorm.h
@@ -168,11 +168,12 @@ MatrixBase<Derived>::stableNorm() const
DerivedCopy copy(derived());
enum {
- CanAlign = (int(Flags)&DirectAccessBit) || (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME
+ CanAlign = ( (int(DerivedCopyClean::Flags)&DirectAccessBit)
+ || (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough
+ ) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT) // ifwe cannot allocate on the stack, then let's not bother about this optimization
};
typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<DerivedCopyClean>::Alignment>,
- typename DerivedCopyClean
- ::ConstSegmentReturnType>::type SegmentWrapper;
+ typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper;
Index n = size();
if(n==1)
diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h
index e6d137e40..5c5e5028e 100644
--- a/Eigen/src/Core/TriangularMatrix.h
+++ b/Eigen/src/Core/TriangularMatrix.h
@@ -532,7 +532,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
- if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))
+ if(!internal::is_same_dense(dst,rhs))
dst = rhs;
this->solveInPlace(dst);
}
diff --git a/Eigen/src/Core/arch/CUDA/Half.h b/Eigen/src/Core/arch/CUDA/Half.h
index 281b8e4c6..6387f2870 100644
--- a/Eigen/src/Core/arch/CUDA/Half.h
+++ b/Eigen/src/Core/arch/CUDA/Half.h
@@ -70,12 +70,18 @@ struct half : public __half {
explicit EIGEN_DEVICE_FUNC half(bool b)
: __half(internal::raw_uint16_to_half(b ? 0x3c00 : 0)) {}
+ explicit EIGEN_DEVICE_FUNC half(unsigned int ui)
+ : __half(internal::float_to_half_rtne(static_cast<float>(ui))) {}
explicit EIGEN_DEVICE_FUNC half(int i)
: __half(internal::float_to_half_rtne(static_cast<float>(i))) {}
+ explicit EIGEN_DEVICE_FUNC half(unsigned long ul)
+ : __half(internal::float_to_half_rtne(static_cast<float>(ul))) {}
explicit EIGEN_DEVICE_FUNC half(long l)
: __half(internal::float_to_half_rtne(static_cast<float>(l))) {}
explicit EIGEN_DEVICE_FUNC half(long long ll)
: __half(internal::float_to_half_rtne(static_cast<float>(ll))) {}
+ explicit EIGEN_DEVICE_FUNC half(unsigned long long ull)
+ : __half(internal::float_to_half_rtne(static_cast<float>(ull))) {}
explicit EIGEN_DEVICE_FUNC half(float f)
: __half(internal::float_to_half_rtne(f)) {}
explicit EIGEN_DEVICE_FUNC half(double d)
@@ -401,6 +407,7 @@ static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const Eigen::half& a)
static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const Eigen::half& a) {
return !(Eigen::numext::isinf)(a) && !(Eigen::numext::isnan)(a);
}
+
template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half abs(const Eigen::half& a) {
Eigen::half result;
result.x = a.x & 0x7FFF;
@@ -418,6 +425,18 @@ template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half sqrt(const Eigen::h
template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half pow(const Eigen::half& a, const Eigen::half& b) {
return Eigen::half(::powf(float(a), float(b)));
}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half sin(const Eigen::half& a) {
+ return Eigen::half(::sinf(float(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half cos(const Eigen::half& a) {
+ return Eigen::half(::cosf(float(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half tan(const Eigen::half& a) {
+ return Eigen::half(::tanf(float(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half tanh(const Eigen::half& a) {
+ return Eigen::half(::tanhf(float(a)));
+}
template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half floor(const Eigen::half& a) {
return Eigen::half(::floorf(float(a)));
}
@@ -425,6 +444,51 @@ template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half ceil(const Eigen::h
return Eigen::half(::ceilf(float(a)));
}
+template <> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half mini(const Eigen::half& a, const Eigen::half& b) {
+#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+ return __hlt(b, a) ? b : a;
+#else
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return f2 < f1 ? b : a;
+#endif
+}
+template <> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half maxi(const Eigen::half& a, const Eigen::half& b) {
+#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530
+ return __hlt(a, b) ? b : a;
+#else
+ const float f1 = static_cast<float>(a);
+ const float f2 = static_cast<float>(b);
+ return f1 < f2 ? b : a;
+#endif
+}
+
+#ifdef EIGEN_HAS_C99_MATH
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half lgamma(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::lgamma(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half digamma(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::digamma(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half zeta(const Eigen::half& x, const Eigen::half& q) {
+ return Eigen::half(Eigen::numext::zeta(static_cast<float>(x), static_cast<float>(q)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half polygamma(const Eigen::half& n, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::polygamma(static_cast<float>(n), static_cast<float>(x)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erf(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::erf(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erfc(const Eigen::half& a) {
+ return Eigen::half(Eigen::numext::erfc(static_cast<float>(a)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igamma(const Eigen::half& a, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::igamma(static_cast<float>(a), static_cast<float>(x)));
+}
+template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igammac(const Eigen::half& a, const Eigen::half& x) {
+ return Eigen::half(Eigen::numext::igammac(static_cast<float>(a), static_cast<float>(x)));
+}
+#endif
} // end namespace numext
} // end namespace Eigen
@@ -466,6 +530,11 @@ static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC int (isfinite)(const Eigen::half& a
namespace std {
+EIGEN_ALWAYS_INLINE ostream& operator << (ostream& os, const Eigen::half& v) {
+ os << static_cast<float>(v);
+ return os;
+}
+
#if __cplusplus > 199711L
template <>
struct hash<Eigen::half> {
diff --git a/Eigen/src/Core/functors/BinaryFunctors.h b/Eigen/src/Core/functors/BinaryFunctors.h
index e28fecfd0..5cd8ca950 100644
--- a/Eigen/src/Core/functors/BinaryFunctors.h
+++ b/Eigen/src/Core/functors/BinaryFunctors.h
@@ -345,6 +345,22 @@ template<> struct functor_traits<scalar_boolean_or_op> {
};
/** \internal
+ * \brief Template functor to compute the xor of two booleans
+ *
+ * \sa class CwiseBinaryOp, ArrayBase::operator^
+ */
+struct scalar_boolean_xor_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_xor_op)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a ^ b; }
+};
+template<> struct functor_traits<scalar_boolean_xor_op> {
+ enum {
+ Cost = NumTraits<bool>::AddCost,
+ PacketAccess = false
+ };
+};
+
+/** \internal
* \brief Template functor to compute the incomplete gamma function igamma(a, x)
*
* \sa class CwiseBinaryOp, Cwise::igamma
diff --git a/Eigen/src/Core/functors/UnaryFunctors.h b/Eigen/src/Core/functors/UnaryFunctors.h
index 7ba0abedc..5baba1494 100644
--- a/Eigen/src/Core/functors/UnaryFunctors.h
+++ b/Eigen/src/Core/functors/UnaryFunctors.h
@@ -234,9 +234,33 @@ template<typename Scalar> struct scalar_exp_op {
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }
};
-template<typename Scalar>
-struct functor_traits<scalar_exp_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasExp }; };
+template <typename Scalar>
+struct functor_traits<scalar_exp_op<Scalar> > {
+ enum {
+ PacketAccess = packet_traits<Scalar>::HasExp,
+ // The following numbers are based on the AVX implementation.
+#ifdef EIGEN_VECTORIZE_FMA
+ // Haswell can issue 2 add/mul/madd per cycle.
+ Cost =
+ (sizeof(Scalar) == 4
+ // float: 8 pmadd, 4 pmul, 2 padd/psub, 6 other
+ ? (8 * NumTraits<Scalar>::AddCost + 6 * NumTraits<Scalar>::MulCost)
+ // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div, 13 other
+ : (14 * NumTraits<Scalar>::AddCost +
+ 6 * NumTraits<Scalar>::MulCost +
+ NumTraits<Scalar>::template Div<packet_traits<Scalar>::HasDiv>::Cost))
+#else
+ Cost =
+ (sizeof(Scalar) == 4
+ // float: 7 pmadd, 6 pmul, 4 padd/psub, 10 other
+ ? (21 * NumTraits<Scalar>::AddCost + 13 * NumTraits<Scalar>::MulCost)
+ // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div, 13 other
+ : (23 * NumTraits<Scalar>::AddCost +
+ 12 * NumTraits<Scalar>::MulCost +
+ NumTraits<Scalar>::template Div<packet_traits<Scalar>::HasDiv>::Cost))
+#endif
+ };
+};
/** \internal
*
@@ -250,9 +274,24 @@ template<typename Scalar> struct scalar_log_op {
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog(a); }
};
-template<typename Scalar>
-struct functor_traits<scalar_log_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog }; };
+template <typename Scalar>
+struct functor_traits<scalar_log_op<Scalar> > {
+ enum {
+ PacketAccess = packet_traits<Scalar>::HasLog,
+ Cost =
+ (PacketAccess
+ // The following numbers are based on the AVX implementation.
+#ifdef EIGEN_VECTORIZE_FMA
+ // 8 pmadd, 6 pmul, 8 padd/psub, 16 other, can issue 2 add/mul/madd per cycle.
+ ? (20 * NumTraits<Scalar>::AddCost + 7 * NumTraits<Scalar>::MulCost)
+#else
+ // 8 pmadd, 6 pmul, 8 padd/psub, 20 other
+ ? (36 * NumTraits<Scalar>::AddCost + 14 * NumTraits<Scalar>::MulCost)
+#endif
+ // Measured cost of std::log.
+ : sizeof(Scalar)==4 ? 40 : 85)
+ };
+};
/** \internal
*
@@ -280,10 +319,19 @@ template<typename Scalar> struct scalar_sqrt_op {
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }
};
-template<typename Scalar>
-struct functor_traits<scalar_sqrt_op<Scalar> >
-{ enum {
- Cost = 5 * NumTraits<Scalar>::MulCost,
+template <typename Scalar>
+struct functor_traits<scalar_sqrt_op<Scalar> > {
+ enum {
+#if EIGEN_FAST_MATH
+ // The following numbers are based on the AVX implementation.
+ Cost = (sizeof(Scalar) == 8 ? 28
+ // 4 pmul, 1 pmadd, 3 other
+ : (3 * NumTraits<Scalar>::AddCost +
+ 5 * NumTraits<Scalar>::MulCost)),
+#else
+ // The following numbers are based on min VSQRT throughput on Haswell.
+ Cost = (sizeof(Scalar) == 8 ? 28 : 14),
+#endif
PacketAccess = packet_traits<Scalar>::HasSqrt
};
};
@@ -313,7 +361,7 @@ struct functor_traits<scalar_rsqrt_op<Scalar> >
*/
template<typename Scalar> struct scalar_cos_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)
- EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { using std::cos; return cos(a); }
+ EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return numext::cos(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }
};
@@ -332,7 +380,7 @@ struct functor_traits<scalar_cos_op<Scalar> >
*/
template<typename Scalar> struct scalar_sin_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::sin; return sin(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sin(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psin(a); }
};
@@ -352,7 +400,7 @@ struct functor_traits<scalar_sin_op<Scalar> >
*/
template<typename Scalar> struct scalar_tan_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::tan; return tan(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::tan(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }
};
@@ -371,7 +419,7 @@ struct functor_traits<scalar_tan_op<Scalar> >
*/
template<typename Scalar> struct scalar_acos_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::acos; return acos(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::acos(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }
};
@@ -390,7 +438,7 @@ struct functor_traits<scalar_acos_op<Scalar> >
*/
template<typename Scalar> struct scalar_asin_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::asin; return asin(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::asin(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }
};
@@ -546,7 +594,7 @@ struct functor_traits<scalar_erfc_op<Scalar> >
*/
template<typename Scalar> struct scalar_atan_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_atan_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::atan; return atan(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::atan(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::patan(a); }
};
@@ -566,7 +614,7 @@ struct functor_traits<scalar_atan_op<Scalar> >
*/
template<typename Scalar> struct scalar_tanh_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_tanh_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::tanh; return tanh(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::tanh(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::ptanh(a); }
};
@@ -574,8 +622,24 @@ template<typename Scalar>
struct functor_traits<scalar_tanh_op<Scalar> >
{
enum {
- Cost = 5 * NumTraits<Scalar>::MulCost,
- PacketAccess = packet_traits<Scalar>::HasTanh
+ PacketAccess = packet_traits<Scalar>::HasTanh,
+ Cost =
+ (PacketAccess
+ // The following numbers are based on the AVX implementation,
+#ifdef EIGEN_VECTORIZE_FMA
+ // Haswell can issue 2 add/mul/madd per cycle.
+ // 9 pmadd, 2 pmul, 1 div, 2 other
+ ? (2 * NumTraits<Scalar>::AddCost + 6 * NumTraits<Scalar>::MulCost +
+ NumTraits<Scalar>::template Div<packet_traits<Scalar>::HasDiv>::Cost)
+#else
+ ? (11 * NumTraits<Scalar>::AddCost +
+ 11 * NumTraits<Scalar>::MulCost +
+ NumTraits<Scalar>::template Div<packet_traits<Scalar>::HasDiv>::Cost)
+#endif
+ // This number assumes a naive implementation of tanh
+ : (6 * NumTraits<Scalar>::AddCost + 3 * NumTraits<Scalar>::MulCost +
+ 2 * NumTraits<Scalar>::template Div<packet_traits<Scalar>::HasDiv>::Cost +
+ functor_traits<scalar_exp_op<Scalar> >::Cost))
};
};
@@ -585,7 +649,7 @@ struct functor_traits<scalar_tanh_op<Scalar> >
*/
template<typename Scalar> struct scalar_sinh_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_sinh_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::sinh; return sinh(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sinh(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psinh(a); }
};
@@ -604,7 +668,7 @@ struct functor_traits<scalar_sinh_op<Scalar> >
*/
template<typename Scalar> struct scalar_cosh_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cosh_op)
- EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { using std::cosh; return cosh(a); }
+ EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::cosh(a); }
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcosh(a); }
};
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index 4c1a63d40..a96c7bfd4 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -11,8 +11,8 @@
#define EIGEN_GENERAL_BLOCK_PANEL_H
-namespace Eigen {
-
+namespace Eigen {
+
namespace internal {
template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
@@ -36,7 +36,7 @@ const std::ptrdiff_t defaultL3CacheSize = 512*1024;
#endif
/** \internal */
-struct CacheSizes {
+struct CacheSizes {
CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {
int l1CacheSize, l2CacheSize, l3CacheSize;
queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize);
@@ -89,7 +89,7 @@ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff
*
* \sa setCpuCacheSizes */
-template<typename LhsScalar, typename RhsScalar, int KcFactor>
+template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>
void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1)
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
@@ -107,21 +107,17 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
enum {
kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
ksub = Traits::mr * Traits::nr * sizeof(ResScalar),
- k_mask = -8,
-
+ kr = 8,
mr = Traits::mr,
- mr_mask = -mr,
-
- nr = Traits::nr,
- nr_mask = -nr
+ nr = Traits::nr
};
// Increasing k gives us more time to prefetch the content of the "C"
// registers. However once the latency is hidden there is no point in
// increasing the value of k, so we'll cap it at 320 (value determined
// experimentally).
- const Index k_cache = (std::min<Index>)((l1-ksub)/kdiv, 320);
+ const Index k_cache = (numext::mini<Index>)((l1-ksub)/kdiv, 320);
if (k_cache < k) {
- k = k_cache & k_mask;
+ k = k_cache - (k_cache % kr);
eigen_internal_assert(k > 0);
}
@@ -130,10 +126,10 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
if (n_cache <= n_per_thread) {
// Don't exceed the capacity of the l2 cache.
eigen_internal_assert(n_cache >= static_cast<Index>(nr));
- n = n_cache & nr_mask;
+ n = n_cache - (n_cache % nr);
eigen_internal_assert(n > 0);
} else {
- n = (std::min<Index>)(n, (n_per_thread + nr - 1) & nr_mask);
+ n = (numext::mini<Index>)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr));
}
if (l3 > l2) {
@@ -141,10 +137,10 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads);
const Index m_per_thread = numext::div_ceil(m, num_threads);
if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
- m = m_cache & mr_mask;
+ m = m_cache - (m_cache % mr);
eigen_internal_assert(m > 0);
} else {
- m = (std::min<Index>)(m, (m_per_thread + mr - 1) & mr_mask);
+ m = (numext::mini<Index>)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr));
}
}
}
@@ -156,29 +152,29 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
l2 = 32*1024;
l3 = 512*1024;
#endif
-
+
// Early return for small problems because the computation below are time consuming for small problems.
// Perhaps it would make more sense to consider k*n*m??
// Note that for very tiny problem, this function should be bypassed anyway
// because we use the coefficient-based implementation for them.
- if((std::max)(k,(std::max)(m,n))<48)
+ if((numext::maxi)(k,(numext::maxi)(m,n))<48)
return;
-
+
typedef typename Traits::ResScalar ResScalar;
enum {
k_peeling = 8,
k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
k_sub = Traits::mr * Traits::nr * sizeof(ResScalar)
};
-
+
// ---- 1st level of blocking on L1, yields kc ----
-
+
// Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel
// of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache.
// We also include a register-level block of the result (mx x nr).
// (In an ideal world only the lhs panel would stay in L1)
// Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of:
- const Index max_kc = std::max<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
+ const Index max_kc = numext::maxi<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
const Index old_k = k;
if(k>max_kc)
{
@@ -187,12 +183,12 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
// while keeping the same number of sweeps over the result.
k = (k%max_kc)==0 ? max_kc
: max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
-
+
eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same");
}
-
+
// ---- 2nd level of blocking on max(L2,L3), yields nc ----
-
+
// TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is:
// actual_l2 = max(l2, l3/nb_core_sharing_l3)
// The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it)
@@ -202,7 +198,7 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
#else
const Index actual_l2 = 1572864; // == 1.5 MB
#endif
-
+
// Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2.
// The second half is implicitly reserved to access the result and lhs coefficients.
// When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful
@@ -223,7 +219,7 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar));
}
// WARNING Below, we assume that Traits::nr is a power of two.
- Index nc = std::min<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
+ Index nc = numext::mini<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
if(n>nc)
{
// We are really blocking over the columns:
@@ -252,9 +248,9 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
// we have both L2 and L3, and problem is small enough to be kept in L2
// Let's choose m such that lhs's block fit in 1/3 of L2
actual_lm = l2;
- max_mc = (std::min<Index>)(576,max_mc);
+ max_mc = (numext::mini<Index>)(576,max_mc);
}
- Index mc = (std::min<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);
+ Index mc = (numext::mini<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);
if (mc > Traits::mr) mc -= mc % Traits::mr;
else if (mc==0) return;
m = (m%mc)==0 ? mc
@@ -263,13 +259,14 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n
}
}
+template <typename Index>
inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)
{
#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
- k = std::min<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
- m = std::min<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
- n = std::min<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
+ k = numext::mini<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
+ m = numext::mini<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
+ n = numext::mini<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
return true;
}
#else
@@ -296,11 +293,11 @@ inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)
*
* \sa setCpuCacheSizes */
-template<typename LhsScalar, typename RhsScalar, int KcFactor>
+template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>
void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
{
if (!useSpecificBlockingSizes(k, m, n)) {
- evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor>(k, m, n, num_threads);
+ evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor, Index>(k, m, n, num_threads);
}
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
@@ -314,10 +311,10 @@ void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads
if (n > nr) n -= n % nr;
}
-template<typename LhsScalar, typename RhsScalar>
+template<typename LhsScalar, typename RhsScalar, typename Index>
inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
{
- computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n, num_threads);
+ computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads);
}
#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
@@ -2225,6 +2222,16 @@ inline std::ptrdiff_t l2CacheSize()
return l2;
}
+/** \returns the currently set level 3 cpu cache size (in bytes) used to estimate the ideal blocking size paramete\
+rs.
+* \sa setCpuCacheSize */
+inline std::ptrdiff_t l3CacheSize()
+{
+ std::ptrdiff_t l1, l2, l3;
+ internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
+ return l3;
+}
+
/** Set the cpu L1 and L2 cache sizes (in bytes).
* These values are use to adjust the size of the blocks
* for the algorithms working per blocks.
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
index 831089dee..80ba89465 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -43,7 +43,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride,
- const ResScalar& alpha, level3_blocking<LhsScalar,RhsScalar>& blocking)
+ const ResScalar& alpha, level3_blocking<RhsScalar,LhsScalar>& blocking)
{
general_matrix_matrix_triangular_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h
index 7c014b72a..f79840aa7 100644
--- a/Eigen/src/Core/products/TriangularMatrixVector.h
+++ b/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -27,13 +27,13 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
};
static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
- const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha);
};
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
- const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha)
{
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
Index size = (std::min)(_rows,_cols);
diff --git a/Eigen/src/Core/products/TriangularSolverMatrix.h b/Eigen/src/Core/products/TriangularSolverMatrix.h
index 208593718..1bed66ed8 100644
--- a/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -83,7 +83,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
// coherence when accessing the rhs elements
std::ptrdiff_t l1, l2, l3;
manage_caching_sizes(GetAction, &l1, &l2, &l3);
- Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * otherStride) : 0;
+ Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * std::max<Index>(otherStride,size)) : 0;
subcols = std::max<Index>((subcols/Traits::nr)*Traits::nr, Traits::nr);
for(Index k2=IsLower ? 0 : size;
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index a0cbd2247..e2bb147dc 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -371,10 +371,10 @@
// Does the compiler support const expressions?
#ifdef __CUDACC__
// Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
-#if __cplusplus > 199711L && defined(__CUDACC_VER__) && (defined(__clang__) || __CUDACC_VER__ >= 70500)
+#if __cplusplus > 199711L && defined(__CUDACC_VER__) && (EIGEN_COMP_CLANG || __CUDACC_VER__ >= 70500)
#define EIGEN_HAS_CONSTEXPR 1
#endif
-#elif (defined(__cplusplus) && __cplusplus >= 201402L) || \
+#elif __has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \
EIGEN_GNUC_AT_LEAST(4,8)
#define EIGEN_HAS_CONSTEXPR 1
#endif
@@ -572,12 +572,12 @@ namespace Eigen {
//------------------------------------------------------------------------------------------
// Static and dynamic alignment control
-//
+//
// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES
// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively.
// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not,
// a default value is automatically computed based on architecture, compiler, and OS.
-//
+//
// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX}
// to be used to declare statically aligned buffers.
//------------------------------------------------------------------------------------------
@@ -640,7 +640,7 @@ namespace Eigen {
#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES
// Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES
-
+
// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
@@ -667,13 +667,13 @@ namespace Eigen {
#else
#define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
#endif
-
+
#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT
#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES
#else
#define EIGEN_MAX_STATIC_ALIGN_BYTES 0
#endif
-
+
#endif
// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_ALIGN_BYTES
diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h
index 74cd0a472..e9f3ebf88 100644
--- a/Eigen/src/Householder/HouseholderSequence.h
+++ b/Eigen/src/Householder/HouseholderSequence.h
@@ -243,8 +243,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
{
workspace.resize(rows());
Index vecs = m_length;
- if( internal::is_same<typename internal::remove_all<VectorsType>::type,Dest>::value
- && internal::extract_data(dst) == internal::extract_data(m_vectors))
+ if(is_same_dense(dst,m_vectors))
{
// in-place
dst.diagonal().setOnes();
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index 1721213d6..64b9eb7f1 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -231,6 +231,15 @@ template<typename _MatrixType> class FullPivLU
return Solve<FullPivLU, Rhs>(*this, b.derived());
}
+ /** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
+ the LU decomposition.
+ */
+ inline RealScalar rcond() const
+ {
+ eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+ return internal::rcond_estimate_helper(m_l1_norm, *this);
+ }
+
/** \returns the determinant of the matrix of which
* *this is the LU decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
@@ -410,6 +419,7 @@ template<typename _MatrixType> class FullPivLU
IntColVectorType m_rowsTranspositions;
IntRowVectorType m_colsTranspositions;
Index m_det_pq, m_nonzero_pivots;
+ RealScalar m_l1_norm;
RealScalar m_maxpivot, m_prescribedThreshold;
bool m_isInitialized, m_usePrescribedThreshold;
};
@@ -455,11 +465,12 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const EigenBase<InputType>
// the permutations are stored as int indices, so just to be sure:
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
- m_isInitialized = true;
m_lu = matrix.derived();
+ m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
computeInPlace();
+ m_isInitialized = true;
return *this;
}
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index ab7797d2a..2e6d91939 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -76,7 +76,6 @@ template<typename _MatrixType> class PartialPivLU
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
typedef typename MatrixType::PlainObject PlainObject;
-
/**
* \brief Default Constructor.
*
@@ -152,6 +151,15 @@ template<typename _MatrixType> class PartialPivLU
return Solve<PartialPivLU, Rhs>(*this, b.derived());
}
+ /** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
+ the LU decomposition.
+ */
+ inline RealScalar rcond() const
+ {
+ eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+ return internal::rcond_estimate_helper(m_l1_norm, *this);
+ }
+
/** \returns the inverse of the matrix of which *this is the LU decomposition.
*
* \warning The matrix being decomposed here is assumed to be invertible. If you need to check for
@@ -178,7 +186,7 @@ template<typename _MatrixType> class PartialPivLU
*
* \sa MatrixBase::determinant()
*/
- typename internal::traits<MatrixType>::Scalar determinant() const;
+ Scalar determinant() const;
MatrixType reconstructedMatrix() const;
@@ -247,6 +255,7 @@ template<typename _MatrixType> class PartialPivLU
PermutationType m_p;
TranspositionType m_rowsTranspositions;
Index m_det_p;
+ RealScalar m_l1_norm;
bool m_isInitialized;
};
@@ -256,6 +265,7 @@ PartialPivLU<MatrixType>::PartialPivLU()
m_p(),
m_rowsTranspositions(),
m_det_p(0),
+ m_l1_norm(0),
m_isInitialized(false)
{
}
@@ -266,6 +276,7 @@ PartialPivLU<MatrixType>::PartialPivLU(Index size)
m_p(size),
m_rowsTranspositions(size),
m_det_p(0),
+ m_l1_norm(0),
m_isInitialized(false)
{
}
@@ -277,6 +288,7 @@ PartialPivLU<MatrixType>::PartialPivLU(const EigenBase<InputType>& matrix)
m_p(matrix.rows()),
m_rowsTranspositions(matrix.rows()),
m_det_p(0),
+ m_l1_norm(0),
m_isInitialized(false)
{
compute(matrix.derived());
@@ -467,6 +479,7 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const EigenBase<Inpu
eigen_assert(matrix.rows()<NumTraits<int>::highest());
m_lu = matrix.derived();
+ m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
const Index size = matrix.rows();
@@ -484,7 +497,7 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const EigenBase<Inpu
}
template<typename MatrixType>
-typename internal::traits<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
+typename PartialPivLU<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return Scalar(m_det_p) * m_lu.diagonal().prod();
diff --git a/Eigen/src/QR/CompleteOrthogonalDecomposition.h b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
index e71944fd7..230d0d23c 100644
--- a/Eigen/src/QR/CompleteOrthogonalDecomposition.h
+++ b/Eigen/src/QR/CompleteOrthogonalDecomposition.h
@@ -397,6 +397,10 @@ CompleteOrthogonalDecomposition<MatrixType>& CompleteOrthogonalDecomposition<
const Index rank = m_cpqr.rank();
const Index cols = matrix.cols();
+ const Index rows = matrix.rows();
+ m_zCoeffs.resize((std::min)(rows, cols));
+ m_temp.resize(cols);
+
if (rank < cols) {
// We have reduced the (permuted) matrix to the form
// [R11 R12]
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index bf5ff48c3..1940c8294 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -350,7 +350,8 @@ template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
- static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+ typedef typename MatrixType::RealScalar RealScalar;
+ static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }
};
template<typename MatrixType, int QRPreconditioner>
@@ -359,19 +360,30 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
- static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+ static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)
{
using std::sqrt;
+ using std::abs;
Scalar z;
JacobiRotation<Scalar> rot;
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
-
+
+ const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
+ const RealScalar precision = NumTraits<Scalar>::epsilon();
+
if(n==0)
{
- z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
- work_matrix.row(p) *= z;
- if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
- if(work_matrix.coeff(q,q)!=Scalar(0))
+ // make sure first column is zero
+ work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0);
+
+ if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
+ {
+ // work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n
+ z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.row(p) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+ }
+ if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
@@ -385,19 +397,25 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
rot.s() = work_matrix.coeff(q,p) / n;
work_matrix.applyOnTheLeft(p,q,rot);
if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
- if(work_matrix.coeff(p,q) != Scalar(0))
+ if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.col(q) *= z;
if(svd.computeV()) svd.m_matrixV.col(q) *= z;
}
- if(work_matrix.coeff(q,q) != Scalar(0))
+ if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
}
+
+ // update largest diagonal entry
+ maxDiagEntry = numext::maxi(maxDiagEntry,numext::maxi(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q))));
+ // and check whether the 2x2 block is already diagonal
+ RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
+ return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold;
}
};
@@ -414,7 +432,6 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
-
if(d == RealScalar(0))
{
rot1.s() = RealScalar(0);
@@ -707,6 +724,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
}
/*** step 2. The main Jacobi SVD iteration. ***/
+ RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff();
bool finished = false;
while(!finished)
@@ -722,25 +740,27 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// if this 2x2 sub-matrix is not diagonal already...
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
- RealScalar threshold = numext::maxi<RealScalar>(considerAsZero,
- precision * numext::maxi<RealScalar>(abs(m_workMatrix.coeff(p,p)),
- abs(m_workMatrix.coeff(q,q))));
- // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791)
+ RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{
finished = false;
-
// perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
- internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
- JacobiRotation<RealScalar> j_left, j_right;
- internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
-
- // accumulate resulting Jacobi rotations
- m_workMatrix.applyOnTheLeft(p,q,j_left);
- if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
-
- m_workMatrix.applyOnTheRight(p,q,j_right);
- if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
+ // the complex to real operation returns true is the updated 2x2 block is not already diagonal
+ if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))
+ {
+ JacobiRotation<RealScalar> j_left, j_right;
+ internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+ // accumulate resulting Jacobi rotations
+ m_workMatrix.applyOnTheLeft(p,q,j_left);
+ if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+ m_workMatrix.applyOnTheRight(p,q,j_right);
+ if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
+
+ // keep track of the largest diagonal coefficient
+ maxDiagEntry = numext::maxi(maxDiagEntry,numext::maxi(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q))));
+ }
}
}
}
diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
index fe4a97120..9143a4c82 100644
--- a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -22,7 +22,7 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
class InnerIterator;
-// class ReverseInnerIterator;
+ class ReverseInnerIterator;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h
index 0ae3017cc..7e2efd452 100644
--- a/Eigen/src/SuperLUSupport/SuperLUSupport.h
+++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -986,7 +986,7 @@ void SuperILU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest
&m_sluStat, &info, Scalar());
StatFree(&m_sluStat);
- if(&x.coeffRef(0) != x_ref.data())
+ if(x.derived().data() != x_ref.data())
x = x_ref;
m_info = info==0 ? Success : NumericalIssue;
diff --git a/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/Eigen/src/plugins/ArrayCwiseBinaryOps.h
index 9422c40bc..5694592d6 100644
--- a/Eigen/src/plugins/ArrayCwiseBinaryOps.h
+++ b/Eigen/src/plugins/ArrayCwiseBinaryOps.h
@@ -280,3 +280,21 @@ operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
}
+/** \returns an expression of the coefficient-wise ^ operator of *this and \a other
+ *
+ * \warning this operator is for expression of bool only.
+ *
+ * Example: \include Cwise_boolean_xor.cpp
+ * Output: \verbinclude Cwise_boolean_xor.out
+ *
+ * \sa operator&&(), select()
+ */
+template<typename OtherDerived>
+EIGEN_DEVICE_FUNC
+inline const CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>
+operator^(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
+ return CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>(derived(),other.derived());
+}